aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/connector/cn_test.c13
-rw-r--r--Documentation/devicetree/bindings/net/broadcom-bcm87xx.txt29
-rw-r--r--Documentation/devicetree/bindings/net/can/fsl-flexcan.txt3
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fec.txt6
-rw-r--r--Documentation/devicetree/bindings/net/phy.txt12
-rw-r--r--Documentation/feature-removal-schedule.txt44
-rw-r--r--Documentation/networking/batman-adv.txt5
-rw-r--r--Documentation/networking/caif/Linux-CAIF.txt91
-rw-r--r--Documentation/networking/can.txt186
-rw-r--r--Documentation/networking/ip-sysctl.txt16
-rw-r--r--Documentation/networking/s2io.txt14
-rw-r--r--Documentation/networking/stmmac.txt36
-rw-r--r--Documentation/networking/vxge.txt7
-rw-r--r--MAINTAINERS21
-rw-r--r--arch/sparc/net/bpf_jit_comp.c4
-rw-r--r--arch/x86/net/bpf_jit_comp.c4
-rw-r--r--crypto/crypto_user.c7
-rw-r--r--drivers/bcma/scan.c6
-rw-r--r--drivers/bluetooth/bluecard_cs.c10
-rw-r--r--drivers/bluetooth/bpa10x.c2
-rw-r--r--drivers/bluetooth/bt3c_cs.c4
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c3
-rw-r--r--drivers/bluetooth/btuart_cs.c4
-rw-r--r--drivers/bluetooth/btusb.c14
-rw-r--r--drivers/bluetooth/dtl1_cs.c4
-rw-r--r--drivers/bluetooth/hci_bcsp.c2
-rw-r--r--drivers/bluetooth/hci_h4.c2
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/bluetooth/hci_ll.c6
-rw-r--r--drivers/connector/connector.c25
-rw-r--r--drivers/ieee802154/Kconfig6
-rw-r--r--drivers/ieee802154/Makefile1
-rw-r--r--drivers/ieee802154/at86rf230.c964
-rw-r--r--drivers/infiniband/core/netlink.c17
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c5
-rw-r--r--drivers/infiniband/hw/mlx4/main.c62
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c35
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c13
-rw-r--r--drivers/net/bonding/bond_3ad.h4
-rw-r--r--drivers/net/bonding/bond_alb.c26
-rw-r--r--drivers/net/bonding/bond_main.c55
-rw-r--r--drivers/net/bonding/bonding.h4
-rw-r--r--drivers/net/caif/caif_hsi.c548
-rw-r--r--drivers/net/can/bfin_can.c2
-rw-r--r--drivers/net/can/c_can/Kconfig20
-rw-r--r--drivers/net/can/c_can/Makefile1
-rw-r--r--drivers/net/can/c_can/c_can.c120
-rw-r--r--drivers/net/can/c_can/c_can.h163
-rw-r--r--drivers/net/can/c_can/c_can_pci.c221
-rw-r--r--drivers/net/can/c_can/c_can_platform.c76
-rw-r--r--drivers/net/can/cc770/cc770.c2
-rw-r--r--drivers/net/can/dev.c37
-rw-r--r--drivers/net/can/flexcan.c107
-rw-r--r--drivers/net/can/mcp251x.c3
-rw-r--r--drivers/net/can/vcan.c27
-rw-r--r--drivers/net/cris/eth_v10.c2
-rw-r--r--drivers/net/dummy.c15
-rw-r--r--drivers/net/ethernet/3com/3c501.c2
-rw-r--r--drivers/net/ethernet/8390/apne.c2
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c8
-rw-r--r--drivers/net/ethernet/amd/declance.c4
-rw-r--r--drivers/net/ethernet/amd/lance.c5
-rw-r--r--drivers/net/ethernet/apple/macmace.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c6
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c77
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c105
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_param.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c43
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c56
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c93
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h45
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h166
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c242
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h63
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c584
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h184
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c1230
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h53
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c279
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h168
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h128
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h42
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c68
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c53
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h13
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c97
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cs.h34
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h63
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_cna.h15
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h35
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_status.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c393
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h43
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c48
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_msgq.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h81
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_cna.h42
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h107
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_reg.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h51
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c15
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h33
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c17
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h66
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c12
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/cna_fwimg.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.c13
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c5
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h7
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c21
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c214
-rw-r--r--drivers/net/ethernet/ethoc.c4
-rw-r--r--drivers/net/ethernet/freescale/fec.c32
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c29
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c491
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c420
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/hp/hp100.c6
-rw-r--r--drivers/net/ethernet/i825xx/lp486e.c8
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c13
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c43
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h25
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c52
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c157
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c12
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c5
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c147
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c61
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c187
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h39
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c6
-rw-r--r--drivers/net/ethernet/lantiq_etop.c1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c18
-rw-r--r--drivers/net/ethernet/marvell/sky2.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c382
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c316
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c524
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c282
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c35
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c6
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c24
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c8
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c15
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h3
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c5
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c5
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c17
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c12
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c10
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c42
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c21
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c37
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h13
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c315
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c94
-rw-r--r--drivers/net/ethernet/rdc/r6040.c16
-rw-r--r--drivers/net/ethernet/realtek/r8169.c984
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c371
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h77
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h3
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/rx.c1
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c4
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c17
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c101
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c196
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c4
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c1
-rw-r--r--drivers/net/ethernet/sun/sunhme.c3
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c177
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c78
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c6
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c3
-rw-r--r--drivers/net/fddi/defxx.c4
-rw-r--r--drivers/net/fddi/skfp/pmf.c8
-rw-r--r--drivers/net/hamradio/mkiss.c8
-rw-r--r--drivers/net/hyperv/netvsc.c2
-rw-r--r--drivers/net/irda/ali-ircc.c6
-rw-r--r--drivers/net/irda/au1k_ir.c2
-rw-r--r--drivers/net/macvtap.c8
-rw-r--r--drivers/net/phy/Kconfig5
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/amd.c8
-rw-r--r--drivers/net/phy/bcm63xx.c31
-rw-r--r--drivers/net/phy/bcm87xx.c231
-rw-r--r--drivers/net/phy/broadcom.c119
-rw-r--r--drivers/net/phy/cicada.c35
-rw-r--r--drivers/net/phy/davicom.c41
-rw-r--r--drivers/net/phy/dp83640.c23
-rw-r--r--drivers/net/phy/fixed.c4
-rw-r--r--drivers/net/phy/icplus.c31
-rw-r--r--drivers/net/phy/lxt.c47
-rw-r--r--drivers/net/phy/marvell.c22
-rw-r--r--drivers/net/phy/mdio_bus.c14
-rw-r--r--drivers/net/phy/micrel.c62
-rw-r--r--drivers/net/phy/national.c8
-rw-r--r--drivers/net/phy/phy.c316
-rw-r--r--drivers/net/phy/phy_device.c139
-rw-r--r--drivers/net/phy/realtek.c6
-rw-r--r--drivers/net/phy/smsc.c64
-rw-r--r--drivers/net/phy/spi_ks8995.c4
-rw-r--r--drivers/net/phy/ste10Xp.c21
-rw-r--r--drivers/net/phy/vitesse.c52
-rw-r--r--drivers/net/slip/slip.c4
-rw-r--r--drivers/net/team/team.c592
-rw-r--r--drivers/net/team/team_mode_activebackup.c14
-rw-r--r--drivers/net/team/team_mode_loadbalance.c543
-rw-r--r--drivers/net/team/team_mode_roundrobin.c4
-rw-r--r--drivers/net/usb/asix.c28
-rw-r--r--drivers/net/usb/cdc-phonet.c4
-rw-r--r--drivers/net/usb/pegasus.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c324
-rw-r--r--drivers/net/usb/smsc95xx.c31
-rw-r--r--drivers/net/usb/usbnet.c75
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/wan/x25_asy.c2
-rw-r--r--drivers/net/wimax/i2400m/fw.c2
-rw-r--r--drivers/net/wireless/adm8211.c3
-rw-r--r--drivers/net/wireless/airo.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c228
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h41
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c28
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c28
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h1
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c48
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c148
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h58
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile4
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c489
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h32
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c776
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c176
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c728
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.h40
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c48
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h32
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h53
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h882
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h755
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h1404
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h772
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h51
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c46
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c122
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c78
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c223
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h21
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c510
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c739
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c242
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c768
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c63
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c4
-rw-r--r--drivers/net/wireless/atmel.c4
-rw-r--r--drivers/net/wireless/b43legacy/dma.c2
-rw-r--r--drivers/net/wireless/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c29
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c126
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h59
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c9
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c669
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c17
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c1226
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c16
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c38
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/utils.c2
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/hostap/hostap_proc.c3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c27
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c13
-rw-r--r--drivers/net/wireless/iwlegacy/common.c7
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig5
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile32
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/Makefile13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h (renamed from drivers/net/wireless/iwlwifi/iwl-agn.h)113
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-calib.c)24
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.h (renamed from drivers/net/wireless/iwlwifi/iwl-agn-calib.h)4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h (renamed from drivers/net/wireless/iwlwifi/iwl-commands.h)7
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c (renamed from drivers/net/wireless/iwlwifi/iwl-debugfs.c)31
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h (renamed from drivers/net/wireless/iwlwifi/iwl-dev.h)176
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-devices.c)191
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.c (renamed from drivers/net/wireless/iwlwifi/iwl-led.c)5
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.h (renamed from drivers/net/wireless/iwlwifi/iwl-led.h)0
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-lib.c)22
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c (renamed from drivers/net/wireless/iwlwifi/iwl-mac80211.c)201
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn.c)447
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.c (renamed from drivers/net/wireless/iwlwifi/iwl-power.c)11
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.h (renamed from drivers/net/wireless/iwlwifi/iwl-power.h)2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rs.c)50
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.h (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rs.h)3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rx.c)34
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rxon.c)52
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c (renamed from drivers/net/wireless/iwlwifi/iwl-scan.c)158
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-sta.c)60
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/testmode.c471
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-tt.c)13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.h (renamed from drivers/net/wireless/iwlwifi/iwl-agn-tt.h)2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-tx.c)62
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c (renamed from drivers/net/wireless/iwlwifi/iwl-ucode.c)34
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h28
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h28
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c151
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c903
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h138
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.c463
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.h70
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c1148
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h269
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c53
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.c856
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.h161
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.c1114
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h56
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/1000.c (renamed from drivers/net/wireless/iwlwifi/iwl-1000.c)19
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/2000.c (renamed from drivers/net/wireless/iwlwifi/iwl-2000.c)22
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/5000.c (renamed from drivers/net/wireless/iwlwifi/iwl-5000.c)20
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/6000.c (renamed from drivers/net/wireless/iwlwifi/iwl-6000.c)25
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/cfg.h (renamed from drivers/net/wireless/iwlwifi/iwl-cfg.h)0
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c (renamed from drivers/net/wireless/iwlwifi/iwl-pci.c)5
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h (renamed from drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h)22
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c (renamed from drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c)105
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c (renamed from drivers/net/wireless/iwlwifi/iwl-trans-pcie.c)366
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c (renamed from drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c)200
-rw-r--r--drivers/net/wireless/libertas/cfg.c39
-rw-r--r--drivers/net/wireless/libertas/debugfs.c4
-rw-r--r--drivers/net/wireless/libertas/dev.h1
-rw-r--r--drivers/net/wireless/libertas/if_usb.c2
-rw-r--r--drivers/net/wireless/libertas/mesh.c7
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c12
-rw-r--r--drivers/net/wireless/mwifiex/11n.c14
-rw-r--r--drivers/net/wireless/mwifiex/11n.h3
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c18
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c67
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c7
-rw-r--r--drivers/net/wireless/mwifiex/fw.h23
-rw-r--r--drivers/net/wireless/mwifiex/ie.c55
-rw-r--r--drivers/net/wireless/mwifiex/init.c66
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h10
-rw-r--r--drivers/net/wireless/mwifiex/join.c20
-rw-r--r--drivers/net/wireless/mwifiex/main.c7
-rw-r--r--drivers/net/wireless/mwifiex/main.h19
-rw-r--r--drivers/net/wireless/mwifiex/scan.c108
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c39
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c51
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c2
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c3
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c303
-rw-r--r--drivers/net/wireless/orinoco/cfg.c9
-rw-r--r--drivers/net/wireless/p54/eeprom.c2
-rw-r--r--drivers/net/wireless/p54/fwio.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c2
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig8
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h181
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c377
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c83
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c14
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c9
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c2
-rw-r--r--drivers/net/wireless/rtlwifi/base.c2
-rw-r--r--drivers/net/wireless/rtlwifi/cam.c7
-rw-r--r--drivers/net/wireless/rtlwifi/core.c14
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c4
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c31
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c10
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c43
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c43
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/fw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c34
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c46
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c2
-rw-r--r--drivers/net/wireless/ti/Kconfig1
-rw-r--r--drivers/net/wireless/ti/Makefile1
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c9
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c67
-rw-r--r--drivers/net/wireless/ti/wl1251/wl1251.h1
-rw-r--r--drivers/net/wireless/ti/wl12xx/Makefile2
-rw-r--r--drivers/net/wireless/ti/wl12xx/acx.h237
-rw-r--r--drivers/net/wireless/ti/wl12xx/cmd.c24
-rw-r--r--drivers/net/wireless/ti/wl12xx/debugfs.c243
-rw-r--r--drivers/net/wireless/ti/wl12xx/debugfs.h28
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c574
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h8
-rw-r--r--drivers/net/wireless/ti/wl18xx/Kconfig7
-rw-r--r--drivers/net/wireless/ti/wl18xx/Makefile3
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.c111
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.h287
-rw-r--r--drivers/net/wireless/ti/wl18xx/conf.h92
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c403
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.h28
-rw-r--r--drivers/net/wireless/ti/wl18xx/io.c75
-rw-r--r--drivers/net/wireless/ti/wl18xx/io.h28
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c1542
-rw-r--r--drivers/net/wireless/ti/wl18xx/reg.h191
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c127
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.h46
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h88
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c16
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h259
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.c139
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c139
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h34
-rw-r--r--drivers/net/wireless/ti/wlcore/conf.h99
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c643
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.h87
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c39
-rw-r--r--drivers/net/wireless/ti/wlcore/hw_ops.h90
-rw-r--r--drivers/net/wireless/ti/wlcore/ini.h22
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c46
-rw-r--r--drivers/net/wireless/ti/wlcore/io.c55
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h144
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c812
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c37
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c50
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h15
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c52
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.h19
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c85
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c14
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c14
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c274
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h53
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h94
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h (renamed from drivers/net/wireless/ti/wlcore/wl12xx.h)53
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h2
-rw-r--r--drivers/net/xen-netback/netback.c4
-rw-r--r--drivers/nfc/pn533.c662
-rw-r--r--drivers/nfc/pn544_hci.c10
-rw-r--r--drivers/of/of_mdio.c16
-rw-r--r--drivers/s390/net/qeth_l3_main.c3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c44
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h16
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h58
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c35
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c40
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c11
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c3
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c5
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c12
-rw-r--r--drivers/scsi/scsi_netlink.c7
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c9
-rw-r--r--drivers/ssb/b43_pci_bridge.c1
-rw-r--r--drivers/ssb/scan.c2
-rw-r--r--drivers/staging/gdm72xx/netlink_k.c16
-rw-r--r--include/linux/bcma/bcma.h7
-rw-r--r--include/linux/can.h70
-rw-r--r--include/linux/can/core.h4
-rw-r--r--include/linux/can/dev.h33
-rw-r--r--include/linux/can/error.h4
-rw-r--r--include/linux/can/raw.h3
-rw-r--r--include/linux/etherdevice.h11
-rw-r--r--include/linux/ethtool.h43
-rw-r--r--include/linux/ieee80211.h52
-rw-r--r--include/linux/if.h2
-rw-r--r--include/linux/if_ether.h3
-rw-r--r--include/linux/if_team.h25
-rw-r--r--include/linux/inetdevice.h2
-rw-r--r--include/linux/ks8851_mll.h33
-rw-r--r--include/linux/mdio.h28
-rw-r--r--include/linux/mii.h9
-rw-r--r--include/linux/mlx4/cmd.h4
-rw-r--r--include/linux/mlx4/device.h135
-rw-r--r--include/linux/netdevice.h11
-rw-r--r--include/linux/netfilter.h26
-rw-r--r--include/linux/netfilter/Kbuild1
-rw-r--r--include/linux/netfilter/nf_conntrack_sip.h2
-rw-r--r--include/linux/netfilter/nfnetlink.h3
-rw-r--r--include/linux/netfilter/nfnetlink_conntrack.h39
-rw-r--r--include/linux/netfilter/nfnetlink_cthelper.h55
-rw-r--r--include/linux/netfilter/nfnetlink_queue.h9
-rw-r--r--include/linux/netfilter/xt_connlimit.h9
-rw-r--r--include/linux/netfilter/xt_recent.h10
-rw-r--r--include/linux/netfilter_ipv4.h1
-rw-r--r--include/linux/netfilter_ipv4/Kbuild1
-rw-r--r--include/linux/netfilter_ipv4/ipt_addrtype.h27
-rw-r--r--include/linux/netfilter_ipv6.h1
-rw-r--r--include/linux/netlink.h24
-rw-r--r--include/linux/nfc.h12
-rw-r--r--include/linux/nl80211.h164
-rw-r--r--include/linux/nl802154.h14
-rw-r--r--include/linux/phy.h31
-rw-r--r--include/linux/pkt_cls.h5
-rw-r--r--include/linux/rtnetlink.h132
-rw-r--r--include/linux/sock_diag.h1
-rw-r--r--include/linux/spi/at86rf230.h31
-rw-r--r--include/linux/ssb/ssb.h1
-rw-r--r--include/linux/tcp.h2
-rw-r--r--include/linux/usb/usbnet.h5
-rw-r--r--include/net/af_unix.h3
-rw-r--r--include/net/arp.h28
-rw-r--r--include/net/bluetooth/a2mp.h126
-rw-r--r--include/net/bluetooth/bluetooth.h39
-rw-r--r--include/net/bluetooth/hci.h99
-rw-r--r--include/net/bluetooth/hci_core.h29
-rw-r--r--include/net/bluetooth/l2cap.h205
-rw-r--r--include/net/caif/caif_hsi.h71
-rw-r--r--include/net/cfg80211.h139
-rw-r--r--include/net/dn_route.h2
-rw-r--r--include/net/dst.h77
-rw-r--r--include/net/dst_ops.h4
-rw-r--r--include/net/fib_rules.h1
-rw-r--r--include/net/flow.h4
-rw-r--r--include/net/genetlink.h2
-rw-r--r--include/net/inet6_connection_sock.h1
-rw-r--r--include/net/inet_connection_sock.h4
-rw-r--r--include/net/inet_hashtables.h4
-rw-r--r--include/net/inet_sock.h2
-rw-r--r--include/net/inetpeer.h90
-rw-r--r--include/net/ip.h8
-rw-r--r--include/net/ip6_fib.h35
-rw-r--r--include/net/ip6_route.h29
-rw-r--r--include/net/ip6_tunnel.h2
-rw-r--r--include/net/ip_fib.h53
-rw-r--r--include/net/ipv6.h24
-rw-r--r--include/net/mac80211.h25
-rw-r--r--include/net/mac802154.h8
-rw-r--r--include/net/neighbour.h26
-rw-r--r--include/net/netevent.h4
-rw-r--r--include/net/netfilter/nf_conntrack.h35
-rw-r--r--include/net/netfilter/nf_conntrack_core.h4
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h4
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h9
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h29
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h11
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h33
-rw-r--r--include/net/netfilter/nf_nat_helper.h4
-rw-r--r--include/net/netfilter/nfnetlink_queue.h43
-rw-r--r--include/net/netns/conntrack.h55
-rw-r--r--include/net/netns/ipv4.h13
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/nfc/hci.h3
-rw-r--r--include/net/nfc/nfc.h14
-rw-r--r--include/net/nfc/shdlc.h3
-rw-r--r--include/net/protocol.h8
-rw-r--r--include/net/route.h25
-rw-r--r--include/net/sock.h5
-rw-r--r--include/net/tcp.h13
-rw-r--r--include/net/timewait_sock.h8
-rw-r--r--include/net/xfrm.h10
-rw-r--r--kernel/audit.c30
-rw-r--r--lib/kobject_uevent.c5
-rw-r--r--net/9p/client.c2
-rw-r--r--net/9p/trans_virtio.c2
-rw-r--r--net/appletalk/ddp.c8
-rw-r--r--net/atm/lec.c8
-rw-r--r--net/atm/pppoatm.c2
-rw-r--r--net/ax25/ax25_addr.c6
-rw-r--r--net/ax25/ax25_out.c2
-rw-r--r--net/ax25/ax25_route.c2
-rw-r--r--net/batman-adv/Makefile4
-rw-r--r--net/batman-adv/bat_algo.h6
-rw-r--r--net/batman-adv/bat_debugfs.c388
-rw-r--r--net/batman-adv/bat_iv_ogm.c1050
-rw-r--r--net/batman-adv/bat_sysfs.c735
-rw-r--r--net/batman-adv/bitarray.c65
-rw-r--r--net/batman-adv/bitarray.h24
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c799
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h76
-rw-r--r--net/batman-adv/debugfs.c409
-rw-r--r--net/batman-adv/debugfs.h (renamed from net/batman-adv/bat_debugfs.h)15
-rw-r--r--net/batman-adv/gateway_client.c354
-rw-r--r--net/batman-adv/gateway_client.h32
-rw-r--r--net/batman-adv/gateway_common.c61
-rw-r--r--net/batman-adv/gateway_common.h23
-rw-r--r--net/batman-adv/hard-interface.c342
-rw-r--r--net/batman-adv/hard-interface.h51
-rw-r--r--net/batman-adv/hash.c25
-rw-r--r--net/batman-adv/hash.h78
-rw-r--r--net/batman-adv/icmp_socket.c180
-rw-r--r--net/batman-adv/icmp_socket.h14
-rw-r--r--net/batman-adv/main.c276
-rw-r--r--net/batman-adv/main.h257
-rw-r--r--net/batman-adv/originator.c337
-rw-r--r--net/batman-adv/originator.h57
-rw-r--r--net/batman-adv/packet.h181
-rw-r--r--net/batman-adv/ring_buffer.c13
-rw-r--r--net/batman-adv/ring_buffer.h9
-rw-r--r--net/batman-adv/routing.c689
-rw-r--r--net/batman-adv/routing.h64
-rw-r--r--net/batman-adv/send.c237
-rw-r--r--net/batman-adv/send.h23
-rw-r--r--net/batman-adv/soft-interface.c304
-rw-r--r--net/batman-adv/soft-interface.h17
-rw-r--r--net/batman-adv/sysfs.c787
-rw-r--r--net/batman-adv/sysfs.h (renamed from net/batman-adv/bat_sysfs.h)24
-rw-r--r--net/batman-adv/translation-table.c1659
-rw-r--r--net/batman-adv/translation-table.h75
-rw-r--r--net/batman-adv/types.h183
-rw-r--r--net/batman-adv/unicast.c179
-rw-r--r--net/batman-adv/unicast.h34
-rw-r--r--net/batman-adv/vis.c728
-rw-r--r--net/batman-adv/vis.h26
-rw-r--r--net/bluetooth/Makefile3
-rw-r--r--net/bluetooth/a2mp.c568
-rw-r--r--net/bluetooth/af_bluetooth.c14
-rw-r--r--net/bluetooth/bnep/core.c21
-rw-r--r--net/bluetooth/bnep/netdev.c16
-rw-r--r--net/bluetooth/bnep/sock.c18
-rw-r--r--net/bluetooth/hci_conn.c98
-rw-r--r--net/bluetooth/hci_core.c214
-rw-r--r--net/bluetooth/hci_event.c309
-rw-r--r--net/bluetooth/hci_sock.c59
-rw-r--r--net/bluetooth/hci_sysfs.c99
-rw-r--r--net/bluetooth/hidp/core.c26
-rw-r--r--net/bluetooth/hidp/sock.c16
-rw-r--r--net/bluetooth/l2cap_core.c2125
-rw-r--r--net/bluetooth/l2cap_sock.c130
-rw-r--r--net/bluetooth/lib.c7
-rw-r--r--net/bluetooth/mgmt.c71
-rw-r--r--net/bluetooth/rfcomm/core.c32
-rw-r--r--net/bluetooth/rfcomm/sock.c21
-rw-r--r--net/bluetooth/rfcomm/tty.c9
-rw-r--r--net/bluetooth/sco.c43
-rw-r--r--net/bluetooth/smp.c7
-rw-r--r--net/bridge/br_netfilter.c68
-rw-r--r--net/bridge/netfilter/ebt_ulog.c29
-rw-r--r--net/caif/caif_dev.c8
-rw-r--r--net/caif/cfctrl.c17
-rw-r--r--net/can/af_can.c126
-rw-r--r--net/can/af_can.h3
-rw-r--r--net/can/proc.c3
-rw-r--r--net/can/raw.c50
-rw-r--r--net/ceph/pagelist.c14
-rw-r--r--net/core/datagram.c1
-rw-r--r--net/core/dev.c34
-rw-r--r--net/core/dst.c21
-rw-r--r--net/core/ethtool.c45
-rw-r--r--net/core/fib_rules.c4
-rw-r--r--net/core/neighbour.c31
-rw-r--r--net/core/net-sysfs.c74
-rw-r--r--net/core/rtnetlink.c28
-rw-r--r--net/core/skbuff.c9
-rw-r--r--net/core/sock.c5
-rw-r--r--net/core/sock_diag.c21
-rw-r--r--net/dcb/dcbnl.c1168
-rw-r--r--net/dccp/ackvec.h7
-rw-r--r--net/dccp/ccid.c1
-rw-r--r--net/dccp/ccids/ccid3.c8
-rw-r--r--net/dccp/ccids/lib/loss_interval.c1
-rw-r--r--net/dccp/ccids/lib/packet_history.c3
-rw-r--r--net/dccp/ccids/lib/tfrc_equation.c2
-rw-r--r--net/dccp/dccp.h1
-rw-r--r--net/dccp/feat.c10
-rw-r--r--net/dccp/input.c1
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dccp/ipv6.c23
-rw-r--r--net/dccp/options.c1
-rw-r--r--net/dccp/output.c1
-rw-r--r--net/decnet/dn_fib.c8
-rw-r--r--net/decnet/dn_neigh.c8
-rw-r--r--net/decnet/dn_nsp_out.c2
-rw-r--r--net/decnet/dn_route.c124
-rw-r--r--net/decnet/dn_table.c76
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c30
-rw-r--r--net/ethernet/eth.c5
-rw-r--r--net/ieee802154/6lowpan.c181
-rw-r--r--net/ieee802154/netlink.c4
-rw-r--r--net/ieee802154/nl-mac.c2
-rw-r--r--net/ieee802154/nl-phy.c2
-rw-r--r--net/ipv4/Makefile2
-rw-r--r--net/ipv4/af_inet.c46
-rw-r--r--net/ipv4/ah4.c1
-rw-r--r--net/ipv4/arp.c3
-rw-r--r--net/ipv4/devinet.c5
-rw-r--r--net/ipv4/esp4.c1
-rw-r--r--net/ipv4/fib_frontend.c117
-rw-r--r--net/ipv4/fib_rules.c24
-rw-r--r--net/ipv4/fib_semantics.c19
-rw-r--r--net/ipv4/fib_trie.c16
-rw-r--r--net/ipv4/icmp.c25
-rw-r--r--net/ipv4/inet_connection_sock.c8
-rw-r--r--net/ipv4/inet_diag.c125
-rw-r--r--net/ipv4/inet_fragment.c2
-rw-r--r--net/ipv4/inetpeer.c99
-rw-r--r--net/ipv4/ip_fragment.c6
-rw-r--r--net/ipv4/ip_gre.c14
-rw-r--r--net/ipv4/ip_input.c28
-rw-r--r--net/ipv4/ip_options.c26
-rw-r--r--net/ipv4/ip_output.c44
-rw-r--r--net/ipv4/ip_sockglue.c7
-rw-r--r--net/ipv4/ipcomp.c1
-rw-r--r--net/ipv4/ipip.c15
-rw-r--r--net/ipv4/ipmr.c32
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c23
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c172
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c81
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_amanda.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c8
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c13
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_tftp.c4
-rw-r--r--net/ipv4/ping.c1
-rw-r--r--net/ipv4/protocol.c8
-rw-r--r--net/ipv4/raw.c3
-rw-r--r--net/ipv4/route.c677
-rw-r--r--net/ipv4/sysctl_net_ipv4.c7
-rw-r--r--net/ipv4/tcp.c5
-rw-r--r--net/ipv4/tcp_input.c219
-rw-r--r--net/ipv4/tcp_ipv4.c127
-rw-r--r--net/ipv4/tcp_metrics.c697
-rw-r--r--net/ipv4/tcp_minisocks.c56
-rw-r--r--net/ipv4/tcp_output.c20
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv4/xfrm4_policy.c9
-rw-r--r--net/ipv6/ah6.c3
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/exthdrs.c4
-rw-r--r--net/ipv6/icmp.c21
-rw-r--r--net/ipv6/inet6_connection_sock.c26
-rw-r--r--net/ipv6/ip6_fib.c5
-rw-r--r--net/ipv6/ip6_input.c9
-rw-r--r--net/ipv6/ip6_output.c40
-rw-r--r--net/ipv6/ip6_tunnel.c67
-rw-r--r--net/ipv6/ip6mr.c5
-rw-r--r--net/ipv6/ipcomp6.c2
-rw-r--r--net/ipv6/ndisc.c10
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c131
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c51
-rw-r--r--net/ipv6/protocol.c8
-rw-r--r--net/ipv6/raw.c9
-rw-r--r--net/ipv6/route.c308
-rw-r--r--net/ipv6/sit.c15
-rw-r--r--net/ipv6/syncookies.c3
-rw-r--r--net/ipv6/tcp_ipv6.c131
-rw-r--r--net/ipv6/udp.c11
-rw-r--r--net/ipv6/xfrm6_policy.c11
-rw-r--r--net/irda/irqueue.c6
-rw-r--r--net/l2tp/l2tp_core.c11
-rw-r--r--net/l2tp/l2tp_eth.c15
-rw-r--r--net/l2tp/l2tp_netlink.c6
-rw-r--r--net/l2tp/l2tp_ppp.c8
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/llc/llc_station.c16
-rw-r--r--net/mac80211/Kconfig56
-rw-r--r--net/mac80211/Makefile7
-rw-r--r--net/mac80211/agg-rx.c38
-rw-r--r--net/mac80211/agg-tx.c115
-rw-r--r--net/mac80211/cfg.c674
-rw-r--r--net/mac80211/chan.c4
-rw-r--r--net/mac80211/debug.h170
-rw-r--r--net/mac80211/debugfs_netdev.c49
-rw-r--r--net/mac80211/driver-ops.h17
-rw-r--r--net/mac80211/driver-trace.c9
-rw-r--r--net/mac80211/ht.c10
-rw-r--r--net/mac80211/ibss.c127
-rw-r--r--net/mac80211/ieee80211_i.h112
-rw-r--r--net/mac80211/iface.c61
-rw-r--r--net/mac80211/key.c4
-rw-r--r--net/mac80211/main.c28
-rw-r--r--net/mac80211/mesh.c19
-rw-r--r--net/mac80211/mesh.h4
-rw-r--r--net/mac80211/mesh_hwmp.c173
-rw-r--r--net/mac80211/mesh_pathtbl.c34
-rw-r--r--net/mac80211/mesh_plink.c66
-rw-r--r--net/mac80211/mesh_sync.c47
-rw-r--r--net/mac80211/mlme.c292
-rw-r--r--net/mac80211/offchannel.c289
-rw-r--r--net/mac80211/pm.c11
-rw-r--r--net/mac80211/rx.c81
-rw-r--r--net/mac80211/scan.c7
-rw-r--r--net/mac80211/sta_info.c45
-rw-r--r--net/mac80211/status.c41
-rw-r--r--net/mac80211/tkip.c46
-rw-r--r--net/mac80211/trace.c75
-rw-r--r--net/mac80211/trace.h (renamed from net/mac80211/driver-trace.h)67
-rw-r--r--net/mac80211/tx.c49
-rw-r--r--net/mac80211/util.c120
-rw-r--r--net/mac80211/wme.c11
-rw-r--r--net/mac80211/wme.h2
-rw-r--r--net/mac80211/work.c370
-rw-r--r--net/mac802154/Makefile2
-rw-r--r--net/mac802154/ieee802154_dev.c4
-rw-r--r--net/mac802154/mac802154.h8
-rw-r--r--net/mac802154/mac_cmd.c29
-rw-r--r--net/mac802154/mib.c92
-rw-r--r--net/mac802154/rx.c1
-rw-r--r--net/mac802154/tx.c2
-rw-r--r--net/mac802154/wpan.c559
-rw-r--r--net/netfilter/Kconfig21
-rw-r--r--net/netfilter/Makefile3
-rw-r--r--net/netfilter/core.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c24
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c22
-rw-r--r--net/netfilter/nf_conntrack_extend.c16
-rw-r--r--net/netfilter/nf_conntrack_ftp.c11
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c16
-rw-r--r--net/netfilter/nf_conntrack_helper.c38
-rw-r--r--net/netfilter/nf_conntrack_irc.c8
-rw-r--r--net/netfilter/nf_conntrack_netlink.c407
-rw-r--r--net/netfilter/nf_conntrack_pptp.c17
-rw-r--r--net/netfilter/nf_conntrack_proto.c300
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c143
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c81
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c79
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c175
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c163
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c111
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c127
-rw-r--r--net/netfilter/nf_conntrack_sane.c12
-rw-r--r--net/netfilter/nf_conntrack_sip.c32
-rw-r--r--net/netfilter/nf_conntrack_tftp.c8
-rw-r--r--net/netfilter/nfnetlink.c40
-rw-r--r--net/netfilter/nfnetlink_cthelper.c672
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c13
-rw-r--r--net/netfilter/nfnetlink_log.c29
-rw-r--r--net/netfilter/nfnetlink_queue_core.c (renamed from net/netfilter/nfnetlink_queue.c)95
-rw-r--r--net/netfilter/nfnetlink_queue_ct.c98
-rw-r--r--net/netfilter/xt_CT.c44
-rw-r--r--net/netfilter/xt_NFQUEUE.c28
-rw-r--r--net/netfilter/xt_TPROXY.c4
-rw-r--r--net/netfilter/xt_connlimit.c35
-rw-r--r--net/netfilter/xt_recent.c62
-rw-r--r--net/netlink/af_netlink.c35
-rw-r--r--net/netlink/genetlink.c12
-rw-r--r--net/nfc/core.c119
-rw-r--r--net/nfc/hci/core.c13
-rw-r--r--net/nfc/hci/shdlc.c6
-rw-r--r--net/nfc/llcp/commands.c54
-rw-r--r--net/nfc/llcp/llcp.c421
-rw-r--r--net/nfc/llcp/llcp.h26
-rw-r--r--net/nfc/llcp/sock.c47
-rw-r--r--net/nfc/nci/core.c15
-rw-r--r--net/nfc/netlink.c95
-rw-r--r--net/nfc/nfc.h12
-rw-r--r--net/packet/af_packet.c29
-rw-r--r--net/rds/page.c9
-rw-r--r--net/rfkill/core.c2
-rw-r--r--net/rxrpc/ar-error.c4
-rw-r--r--net/rxrpc/ar-output.c2
-rw-r--r--net/sched/Kconfig10
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/act_api.c59
-rw-r--r--net/sched/cls_api.c12
-rw-r--r--net/sched/em_canid.c240
-rw-r--r--net/sched/sch_api.c24
-rw-r--r--net/sched/sch_teql.c47
-rw-r--r--net/sctp/output.c79
-rw-r--r--net/sunrpc/backchannel_rqst.c9
-rw-r--r--net/sunrpc/clnt.c2
-rw-r--r--net/sunrpc/svcsock.c12
-rw-r--r--net/sunrpc/xdr.c12
-rw-r--r--net/sunrpc/xprt.c2
-rw-r--r--net/sunrpc/xprtsock.c3
-rw-r--r--net/tipc/bcast.c10
-rw-r--r--net/tipc/bearer.c7
-rw-r--r--net/tipc/bearer.h2
-rw-r--r--net/tipc/link.c22
-rw-r--r--net/tipc/name_table.c10
-rw-r--r--net/tipc/port.c11
-rw-r--r--net/tipc/port.h1
-rw-r--r--net/tipc/socket.c7
-rw-r--r--net/unix/af_unix.c110
-rw-r--r--net/unix/diag.c106
-rw-r--r--net/wireless/Kconfig14
-rw-r--r--net/wireless/chan.c55
-rw-r--r--net/wireless/core.c63
-rw-r--r--net/wireless/core.h38
-rw-r--r--net/wireless/mesh.c97
-rw-r--r--net/wireless/mlme.c2
-rw-r--r--net/wireless/nl80211.c435
-rw-r--r--net/wireless/wext-compat.c21
-rw-r--r--net/wireless/wext-sme.c10
-rw-r--r--net/x25/x25_route.c2
-rw-r--r--net/xfrm/xfrm_policy.c9
-rw-r--r--net/xfrm/xfrm_user.c401
-rw-r--r--security/selinux/hooks.c10
-rw-r--r--security/selinux/netlink.c17
1012 files changed, 49784 insertions, 32650 deletions
diff --git a/Documentation/connector/cn_test.c b/Documentation/connector/cn_test.c
index 7764594778d..adcca0368d6 100644
--- a/Documentation/connector/cn_test.c
+++ b/Documentation/connector/cn_test.c
@@ -69,9 +69,13 @@ static int cn_test_want_notify(void)
69 return -ENOMEM; 69 return -ENOMEM;
70 } 70 }
71 71
72 nlh = NLMSG_PUT(skb, 0, 0x123, NLMSG_DONE, size - sizeof(*nlh)); 72 nlh = nlmsg_put(skb, 0, 0x123, NLMSG_DONE, size - sizeof(*nlh), 0);
73 if (!nlh) {
74 kfree_skb(skb);
75 return -EMSGSIZE;
76 }
73 77
74 msg = (struct cn_msg *)NLMSG_DATA(nlh); 78 msg = nlmsg_data(nlh);
75 79
76 memset(msg, 0, size0); 80 memset(msg, 0, size0);
77 81
@@ -117,11 +121,6 @@ static int cn_test_want_notify(void)
117 pr_info("request was sent: group=0x%x\n", ctl->group); 121 pr_info("request was sent: group=0x%x\n", ctl->group);
118 122
119 return 0; 123 return 0;
120
121nlmsg_failure:
122 pr_err("failed to send %u.%u\n", msg->seq, msg->ack);
123 kfree_skb(skb);
124 return -EINVAL;
125} 124}
126#endif 125#endif
127 126
diff --git a/Documentation/devicetree/bindings/net/broadcom-bcm87xx.txt b/Documentation/devicetree/bindings/net/broadcom-bcm87xx.txt
new file mode 100644
index 00000000000..7c86d5e28a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/broadcom-bcm87xx.txt
@@ -0,0 +1,29 @@
1The Broadcom BCM87XX devices are a family of 10G Ethernet PHYs. They
2have these bindings in addition to the standard PHY bindings.
3
4Compatible: Should contain "broadcom,bcm8706" or "broadcom,bcm8727" and
5 "ethernet-phy-ieee802.3-c45"
6
7Optional Properties:
8
9- broadcom,c45-reg-init : one of more sets of 4 cells. The first cell
10 is the MDIO Manageable Device (MMD) address, the second a register
11 address within the MMD, the third cell contains a mask to be ANDed
12 with the existing register value, and the fourth cell is ORed with
13 he result to yield the new register value. If the third cell has a
14 value of zero, no read of the existing value is performed.
15
16Example:
17
18 ethernet-phy@5 {
19 reg = <5>;
20 compatible = "broadcom,bcm8706", "ethernet-phy-ieee802.3-c45";
21 interrupt-parent = <&gpio>;
22 interrupts = <12 8>; /* Pin 12, active low */
23 /*
24 * Set PMD Digital Control Register for
25 * GPIO[1] Tx/Rx
26 * GPIO[0] R64 Sync Acquired
27 */
28 broadcom,c45-reg-init = <1 0xc808 0xff8f 0x70>;
29 };
diff --git a/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt b/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
index f31b686d455..8ff324eaa88 100644
--- a/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
+++ b/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
@@ -11,6 +11,9 @@ Required properties:
11 11
12- reg : Offset and length of the register set for this device 12- reg : Offset and length of the register set for this device
13- interrupts : Interrupt tuple for this device 13- interrupts : Interrupt tuple for this device
14
15Optional properties:
16
14- clock-frequency : The oscillator frequency driving the flexcan device 17- clock-frequency : The oscillator frequency driving the flexcan device
15 18
16Example: 19Example:
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index 7ab9e1a2d8b..f7a2fefc8ef 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -7,10 +7,14 @@ Required properties:
7- phy-mode : String, operation mode of the PHY interface. 7- phy-mode : String, operation mode of the PHY interface.
8 Supported values are: "mii", "gmii", "sgmii", "tbi", "rmii", 8 Supported values are: "mii", "gmii", "sgmii", "tbi", "rmii",
9 "rgmii", "rgmii-id", "rgmii-rxid", "rgmii-txid", "rtbi", "smii". 9 "rgmii", "rgmii-id", "rgmii-rxid", "rgmii-txid", "rtbi", "smii".
10- phy-reset-gpios : Should specify the gpio for phy reset
11 10
12Optional properties: 11Optional properties:
13- local-mac-address : 6 bytes, mac address 12- local-mac-address : 6 bytes, mac address
13- phy-reset-gpios : Should specify the gpio for phy reset
14- phy-reset-duration : Reset duration in milliseconds. Should present
15 only if property "phy-reset-gpios" is available. Missing the property
16 will have the duration be 1 millisecond. Numbers greater than 1000 are
17 invalid and 1 millisecond will be used instead.
14 18
15Example: 19Example:
16 20
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
index bb8c742eb8c..7cd18fbfcf7 100644
--- a/Documentation/devicetree/bindings/net/phy.txt
+++ b/Documentation/devicetree/bindings/net/phy.txt
@@ -14,10 +14,20 @@ Required properties:
14 - linux,phandle : phandle for this node; likely referenced by an 14 - linux,phandle : phandle for this node; likely referenced by an
15 ethernet controller node. 15 ethernet controller node.
16 16
17Optional Properties:
18
19- compatible: Compatible list, may contain
20 "ethernet-phy-ieee802.3-c22" or "ethernet-phy-ieee802.3-c45" for
21 PHYs that implement IEEE802.3 clause 22 or IEEE802.3 clause 45
22 specifications. If neither of these are specified, the default is to
23 assume clause 22. The compatible list may also contain other
24 elements.
25
17Example: 26Example:
18 27
19ethernet-phy@0 { 28ethernet-phy@0 {
20 linux,phandle = <2452000> 29 compatible = "ethernet-phy-ieee802.3-c22";
30 linux,phandle = <2452000>;
21 interrupt-parent = <40000>; 31 interrupt-parent = <40000>;
22 interrupts = <35 1>; 32 interrupts = <35 1>;
23 reg = <0>; 33 reg = <0>;
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 56000b33340..61d1a89baea 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -249,15 +249,6 @@ Who: Ravikiran Thirumalai <kiran@scalex86.org>
249 249
250--------------------------- 250---------------------------
251 251
252What: Code that is now under CONFIG_WIRELESS_EXT_SYSFS
253 (in net/core/net-sysfs.c)
254When: 3.5
255Why: Over 1K .text/.data size reduction, data is available in other
256 ways (ioctls)
257Who: Johannes Berg <johannes@sipsolutions.net>
258
259---------------------------
260
261What: sysfs ui for changing p4-clockmod parameters 252What: sysfs ui for changing p4-clockmod parameters
262When: September 2009 253When: September 2009
263Why: See commits 129f8ae9b1b5be94517da76009ea956e89104ce8 and 254Why: See commits 129f8ae9b1b5be94517da76009ea956e89104ce8 and
@@ -414,21 +405,6 @@ Who: Jean Delvare <khali@linux-fr.org>
414 405
415---------------------------- 406----------------------------
416 407
417What: xt_connlimit rev 0
418When: 2012
419Who: Jan Engelhardt <jengelh@medozas.de>
420Files: net/netfilter/xt_connlimit.c
421
422----------------------------
423
424What: ipt_addrtype match include file
425When: 2012
426Why: superseded by xt_addrtype
427Who: Florian Westphal <fw@strlen.de>
428Files: include/linux/netfilter_ipv4/ipt_addrtype.h
429
430----------------------------
431
432What: i2c_driver.attach_adapter 408What: i2c_driver.attach_adapter
433 i2c_driver.detach_adapter 409 i2c_driver.detach_adapter
434When: September 2011 410When: September 2011
@@ -449,6 +425,19 @@ Who: Hans Verkuil <hans.verkuil@cisco.com>
449 425
450---------------------------- 426----------------------------
451 427
428What: CONFIG_CFG80211_WEXT
429When: as soon as distributions ship new wireless tools, ie. wpa_supplicant 1.0
430 and NetworkManager/connman/etc. that are able to use nl80211
431Why: Wireless extensions are deprecated, and userland tools are moving to
432 using nl80211. New drivers are no longer using wireless extensions,
433 and while there might still be old drivers, both new drivers and new
434 userland no longer needs them and they can't be used for an feature
435 developed in the past couple of years. As such, compatibility with
436 wireless extensions in new drivers will be removed.
437Who: Johannes Berg <johannes@sipsolutions.net>
438
439----------------------------
440
452What: g_file_storage driver 441What: g_file_storage driver
453When: 3.8 442When: 3.8
454Why: This driver has been superseded by g_mass_storage. 443Why: This driver has been superseded by g_mass_storage.
@@ -589,6 +578,13 @@ Why: Remount currently allows changing bound subsystems and
589 578
590---------------------------- 579----------------------------
591 580
581What: xt_recent rev 0
582When: 2013
583Who: Pablo Neira Ayuso <pablo@netfilter.org>
584Files: net/netfilter/xt_recent.c
585
586----------------------------
587
592What: KVM debugfs statistics 588What: KVM debugfs statistics
593When: 2013 589When: 2013
594Why: KVM tracepoints provide mostly equivalent information in a much more 590Why: KVM tracepoints provide mostly equivalent information in a much more
diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt
index 75a592365af..8f3ae4a6147 100644
--- a/Documentation/networking/batman-adv.txt
+++ b/Documentation/networking/batman-adv.txt
@@ -211,6 +211,11 @@ The debug output can be changed at runtime using the file
211 211
212will enable debug messages for when routes change. 212will enable debug messages for when routes change.
213 213
214Counters for different types of packets entering and leaving the
215batman-adv module are available through ethtool:
216
217# ethtool --statistics bat0
218
214 219
215BATCTL 220BATCTL
216------ 221------
diff --git a/Documentation/networking/caif/Linux-CAIF.txt b/Documentation/networking/caif/Linux-CAIF.txt
index e52fd62bef3..0aa4bd381be 100644
--- a/Documentation/networking/caif/Linux-CAIF.txt
+++ b/Documentation/networking/caif/Linux-CAIF.txt
@@ -19,60 +19,36 @@ and host. Currently, UART and Loopback are available for Linux.
19Architecture: 19Architecture:
20------------ 20------------
21The implementation of CAIF is divided into: 21The implementation of CAIF is divided into:
22* CAIF Socket Layer, Kernel API, and Net Device. 22* CAIF Socket Layer and GPRS IP Interface.
23* CAIF Core Protocol Implementation 23* CAIF Core Protocol Implementation
24* CAIF Link Layer, implemented as NET devices. 24* CAIF Link Layer, implemented as NET devices.
25 25
26 26
27 RTNL 27 RTNL
28 ! 28 !
29 ! +------+ +------+ +------+ 29 ! +------+ +------+
30 ! +------+! +------+! +------+! 30 ! +------+! +------+!
31 ! ! Sock !! !Kernel!! ! Net !! 31 ! ! IP !! !Socket!!
32 ! ! API !+ ! API !+ ! Dev !+ <- CAIF Client APIs 32 +-------> !interf!+ ! API !+ <- CAIF Client APIs
33 ! +------+ +------! +------+ 33 ! +------+ +------!
34 ! ! ! ! 34 ! ! !
35 ! +----------!----------+ 35 ! +-----------+
36 ! +------+ <- CAIF Protocol Implementation 36 ! !
37 +-------> ! CAIF ! 37 ! +------+ <- CAIF Core Protocol
38 ! Core ! 38 ! ! CAIF !
39 +------+ 39 ! ! Core !
40 +--------!--------+ 40 ! +------+
41 ! ! 41 ! +----------!---------+
42 +------+ +-----+ 42 ! ! ! !
43 ! ! ! TTY ! <- Link Layer (Net Devices) 43 ! +------+ +-----+ +------+
44 +------+ +-----+ 44 +--> ! HSI ! ! TTY ! ! USB ! <- Link Layer (Net Devices)
45 45 +------+ +-----+ +------+
46 46
47Using the Kernel API
48----------------------
49The Kernel API is used for accessing CAIF channels from the
50kernel.
51The user of the API has to implement two callbacks for receive
52and control.
53The receive callback gives a CAIF packet as a SKB. The control
54callback will
55notify of channel initialization complete, and flow-on/flow-
56off.
57
58
59 struct caif_device caif_dev = {
60 .caif_config = {
61 .name = "MYDEV"
62 .type = CAIF_CHTY_AT
63 }
64 .receive_cb = my_receive,
65 .control_cb = my_control,
66 };
67 caif_add_device(&caif_dev);
68 caif_transmit(&caif_dev, skb);
69
70See the caif_kernel.h for details about the CAIF kernel API.
71 47
72 48
73I M P L E M E N T A T I O N 49I M P L E M E N T A T I O N
74=========================== 50===========================
75=========================== 51
76 52
77CAIF Core Protocol Layer 53CAIF Core Protocol Layer
78========================================= 54=========================================
@@ -88,17 +64,13 @@ The Core CAIF implementation contains:
88 - Simple implementation of CAIF. 64 - Simple implementation of CAIF.
89 - Layered architecture (a la Streams), each layer in the CAIF 65 - Layered architecture (a la Streams), each layer in the CAIF
90 specification is implemented in a separate c-file. 66 specification is implemented in a separate c-file.
91 - Clients must implement PHY layer to access physical HW
92 with receive and transmit functions.
93 - Clients must call configuration function to add PHY layer. 67 - Clients must call configuration function to add PHY layer.
94 - Clients must implement CAIF layer to consume/produce 68 - Clients must implement CAIF layer to consume/produce
95 CAIF payload with receive and transmit functions. 69 CAIF payload with receive and transmit functions.
96 - Clients must call configuration function to add and connect the 70 - Clients must call configuration function to add and connect the
97 Client layer. 71 Client layer.
98 - When receiving / transmitting CAIF Packets (cfpkt), ownership is passed 72 - When receiving / transmitting CAIF Packets (cfpkt), ownership is passed
99 to the called function (except for framing layers' receive functions 73 to the called function (except for framing layers' receive function)
100 or if a transmit function returns an error, in which case the caller
101 must free the packet).
102 74
103Layered Architecture 75Layered Architecture
104-------------------- 76--------------------
@@ -109,11 +81,6 @@ Implementation. The support functions include:
109 CAIF Packet has functions for creating, destroying and adding content 81 CAIF Packet has functions for creating, destroying and adding content
110 and for adding/extracting header and trailers to protocol packets. 82 and for adding/extracting header and trailers to protocol packets.
111 83
112 - CFLST CAIF list implementation.
113
114 - CFGLUE CAIF Glue. Contains OS Specifics, such as memory
115 allocation, endianness, etc.
116
117The CAIF Protocol implementation contains: 84The CAIF Protocol implementation contains:
118 85
119 - CFCNFG CAIF Configuration layer. Configures the CAIF Protocol 86 - CFCNFG CAIF Configuration layer. Configures the CAIF Protocol
@@ -128,7 +95,7 @@ The CAIF Protocol implementation contains:
128 control and remote shutdown requests. 95 control and remote shutdown requests.
129 96
130 - CFVEI CAIF VEI layer. Handles CAIF AT Channels on VEI (Virtual 97 - CFVEI CAIF VEI layer. Handles CAIF AT Channels on VEI (Virtual
131 External Interface). This layer encodes/decodes VEI frames. 98 External Interface). This layer encodes/decodes VEI frames.
132 99
133 - CFDGML CAIF Datagram layer. Handles CAIF Datagram layer (IP 100 - CFDGML CAIF Datagram layer. Handles CAIF Datagram layer (IP
134 traffic), encodes/decodes Datagram frames. 101 traffic), encodes/decodes Datagram frames.
@@ -170,7 +137,7 @@ The CAIF Protocol implementation contains:
170 +---------+ +---------+ 137 +---------+ +---------+
171 ! ! 138 ! !
172 +---------+ +---------+ 139 +---------+ +---------+
173 | | | Serial | 140 | | | Serial |
174 | | | CFSERL | 141 | | | CFSERL |
175 +---------+ +---------+ 142 +---------+ +---------+
176 143
@@ -186,24 +153,20 @@ In this layered approach the following "rules" apply.
186 layer->dn->transmit(layer->dn, packet); 153 layer->dn->transmit(layer->dn, packet);
187 154
188 155
189Linux Driver Implementation 156CAIF Socket and IP interface
190=========================== 157===========================
191 158
192Linux GPRS Net Device and CAIF socket are implemented on top of the 159The IP interface and CAIF socket API are implemented on top of the
193CAIF Core protocol. The Net device and CAIF socket have an instance of 160CAIF Core protocol. The IP Interface and CAIF socket have an instance of
194'struct cflayer', just like the CAIF Core protocol stack. 161'struct cflayer', just like the CAIF Core protocol stack.
195Net device and Socket implement the 'receive()' function defined by 162Net device and Socket implement the 'receive()' function defined by
196'struct cflayer', just like the rest of the CAIF stack. In this way, transmit and 163'struct cflayer', just like the rest of the CAIF stack. In this way, transmit and
197receive of packets is handled as by the rest of the layers: the 'dn->transmit()' 164receive of packets is handled as by the rest of the layers: the 'dn->transmit()'
198function is called in order to transmit data. 165function is called in order to transmit data.
199 166
200The layer on top of the CAIF Core implementation is
201sometimes referred to as the "Client layer".
202
203
204Configuration of Link Layer 167Configuration of Link Layer
205--------------------------- 168---------------------------
206The Link Layer is implemented as Linux net devices (struct net_device). 169The Link Layer is implemented as Linux network devices (struct net_device).
207Payload handling and registration is done using standard Linux mechanisms. 170Payload handling and registration is done using standard Linux mechanisms.
208 171
209The CAIF Protocol relies on a loss-less link layer without implementing 172The CAIF Protocol relies on a loss-less link layer without implementing
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt
index ac295399f0d..820f55344ed 100644
--- a/Documentation/networking/can.txt
+++ b/Documentation/networking/can.txt
@@ -22,7 +22,8 @@ This file contains
22 4.1.2 RAW socket option CAN_RAW_ERR_FILTER 22 4.1.2 RAW socket option CAN_RAW_ERR_FILTER
23 4.1.3 RAW socket option CAN_RAW_LOOPBACK 23 4.1.3 RAW socket option CAN_RAW_LOOPBACK
24 4.1.4 RAW socket option CAN_RAW_RECV_OWN_MSGS 24 4.1.4 RAW socket option CAN_RAW_RECV_OWN_MSGS
25 4.1.5 RAW socket returned message flags 25 4.1.5 RAW socket option CAN_RAW_FD_FRAMES
26 4.1.6 RAW socket returned message flags
26 4.2 Broadcast Manager protocol sockets (SOCK_DGRAM) 27 4.2 Broadcast Manager protocol sockets (SOCK_DGRAM)
27 4.3 connected transport protocols (SOCK_SEQPACKET) 28 4.3 connected transport protocols (SOCK_SEQPACKET)
28 4.4 unconnected transport protocols (SOCK_DGRAM) 29 4.4 unconnected transport protocols (SOCK_DGRAM)
@@ -41,7 +42,8 @@ This file contains
41 6.5.1 Netlink interface to set/get devices properties 42 6.5.1 Netlink interface to set/get devices properties
42 6.5.2 Setting the CAN bit-timing 43 6.5.2 Setting the CAN bit-timing
43 6.5.3 Starting and stopping the CAN network device 44 6.5.3 Starting and stopping the CAN network device
44 6.6 supported CAN hardware 45 6.6 CAN FD (flexible data rate) driver support
46 6.7 supported CAN hardware
45 47
46 7 Socket CAN resources 48 7 Socket CAN resources
47 49
@@ -232,16 +234,16 @@ solution for a couple of reasons:
232 arbitration problems and error frames caused by the different 234 arbitration problems and error frames caused by the different
233 ECUs. The occurrence of detected errors are important for diagnosis 235 ECUs. The occurrence of detected errors are important for diagnosis
234 and have to be logged together with the exact timestamp. For this 236 and have to be logged together with the exact timestamp. For this
235 reason the CAN interface driver can generate so called Error Frames 237 reason the CAN interface driver can generate so called Error Message
236 that can optionally be passed to the user application in the same 238 Frames that can optionally be passed to the user application in the
237 way as other CAN frames. Whenever an error on the physical layer 239 same way as other CAN frames. Whenever an error on the physical layer
238 or the MAC layer is detected (e.g. by the CAN controller) the driver 240 or the MAC layer is detected (e.g. by the CAN controller) the driver
239 creates an appropriate error frame. Error frames can be requested by 241 creates an appropriate error message frame. Error messages frames can
240 the user application using the common CAN filter mechanisms. Inside 242 be requested by the user application using the common CAN filter
241 this filter definition the (interested) type of errors may be 243 mechanisms. Inside this filter definition the (interested) type of
242 selected. The reception of error frames is disabled by default. 244 errors may be selected. The reception of error messages is disabled
243 The format of the CAN error frame is briefly described in the Linux 245 by default. The format of the CAN error message frame is briefly
244 header file "include/linux/can/error.h". 246 described in the Linux header file "include/linux/can/error.h".
245 247
2464. How to use Socket CAN 2484. How to use Socket CAN
247------------------------ 249------------------------
@@ -273,7 +275,7 @@ solution for a couple of reasons:
273 275
274 struct can_frame { 276 struct can_frame {
275 canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */ 277 canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
276 __u8 can_dlc; /* data length code: 0 .. 8 */ 278 __u8 can_dlc; /* frame payload length in byte (0 .. 8) */
277 __u8 data[8] __attribute__((aligned(8))); 279 __u8 data[8] __attribute__((aligned(8)));
278 }; 280 };
279 281
@@ -375,6 +377,51 @@ solution for a couple of reasons:
375 nbytes = sendto(s, &frame, sizeof(struct can_frame), 377 nbytes = sendto(s, &frame, sizeof(struct can_frame),
376 0, (struct sockaddr*)&addr, sizeof(addr)); 378 0, (struct sockaddr*)&addr, sizeof(addr));
377 379
380 Remark about CAN FD (flexible data rate) support:
381
382 Generally the handling of CAN FD is very similar to the formerly described
383 examples. The new CAN FD capable CAN controllers support two different
384 bitrates for the arbitration phase and the payload phase of the CAN FD frame
385 and up to 64 bytes of payload. This extended payload length breaks all the
386 kernel interfaces (ABI) which heavily rely on the CAN frame with fixed eight
387 bytes of payload (struct can_frame) like the CAN_RAW socket. Therefore e.g.
388 the CAN_RAW socket supports a new socket option CAN_RAW_FD_FRAMES that
389 switches the socket into a mode that allows the handling of CAN FD frames
390 and (legacy) CAN frames simultaneously (see section 4.1.5).
391
392 The struct canfd_frame is defined in include/linux/can.h:
393
394 struct canfd_frame {
395 canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
396 __u8 len; /* frame payload length in byte (0 .. 64) */
397 __u8 flags; /* additional flags for CAN FD */
398 __u8 __res0; /* reserved / padding */
399 __u8 __res1; /* reserved / padding */
400 __u8 data[64] __attribute__((aligned(8)));
401 };
402
403 The struct canfd_frame and the existing struct can_frame have the can_id,
404 the payload length and the payload data at the same offset inside their
405 structures. This allows to handle the different structures very similar.
406 When the content of a struct can_frame is copied into a struct canfd_frame
407 all structure elements can be used as-is - only the data[] becomes extended.
408
409 When introducing the struct canfd_frame it turned out that the data length
410 code (DLC) of the struct can_frame was used as a length information as the
411 length and the DLC has a 1:1 mapping in the range of 0 .. 8. To preserve
412 the easy handling of the length information the canfd_frame.len element
413 contains a plain length value from 0 .. 64. So both canfd_frame.len and
414 can_frame.can_dlc are equal and contain a length information and no DLC.
415 For details about the distinction of CAN and CAN FD capable devices and
416 the mapping to the bus-relevant data length code (DLC), see chapter 6.6.
417
418 The length of the two CAN(FD) frame structures define the maximum transfer
419 unit (MTU) of the CAN(FD) network interface and skbuff data length. Two
420 definitions are specified for CAN specific MTUs in include/linux/can.h :
421
422 #define CAN_MTU (sizeof(struct can_frame)) == 16 => 'legacy' CAN frame
423 #define CANFD_MTU (sizeof(struct canfd_frame)) == 72 => CAN FD frame
424
378 4.1 RAW protocol sockets with can_filters (SOCK_RAW) 425 4.1 RAW protocol sockets with can_filters (SOCK_RAW)
379 426
380 Using CAN_RAW sockets is extensively comparable to the commonly 427 Using CAN_RAW sockets is extensively comparable to the commonly
@@ -383,7 +430,7 @@ solution for a couple of reasons:
383 defaults are set at RAW socket binding time: 430 defaults are set at RAW socket binding time:
384 431
385 - The filters are set to exactly one filter receiving everything 432 - The filters are set to exactly one filter receiving everything
386 - The socket only receives valid data frames (=> no error frames) 433 - The socket only receives valid data frames (=> no error message frames)
387 - The loopback of sent CAN frames is enabled (see chapter 3.2) 434 - The loopback of sent CAN frames is enabled (see chapter 3.2)
388 - The socket does not receive its own sent frames (in loopback mode) 435 - The socket does not receive its own sent frames (in loopback mode)
389 436
@@ -434,7 +481,7 @@ solution for a couple of reasons:
434 4.1.2 RAW socket option CAN_RAW_ERR_FILTER 481 4.1.2 RAW socket option CAN_RAW_ERR_FILTER
435 482
436 As described in chapter 3.4 the CAN interface driver can generate so 483 As described in chapter 3.4 the CAN interface driver can generate so
437 called Error Frames that can optionally be passed to the user 484 called Error Message Frames that can optionally be passed to the user
438 application in the same way as other CAN frames. The possible 485 application in the same way as other CAN frames. The possible
439 errors are divided into different error classes that may be filtered 486 errors are divided into different error classes that may be filtered
440 using the appropriate error mask. To register for every possible 487 using the appropriate error mask. To register for every possible
@@ -472,7 +519,69 @@ solution for a couple of reasons:
472 setsockopt(s, SOL_CAN_RAW, CAN_RAW_RECV_OWN_MSGS, 519 setsockopt(s, SOL_CAN_RAW, CAN_RAW_RECV_OWN_MSGS,
473 &recv_own_msgs, sizeof(recv_own_msgs)); 520 &recv_own_msgs, sizeof(recv_own_msgs));
474 521
475 4.1.5 RAW socket returned message flags 522 4.1.5 RAW socket option CAN_RAW_FD_FRAMES
523
524 CAN FD support in CAN_RAW sockets can be enabled with a new socket option
525 CAN_RAW_FD_FRAMES which is off by default. When the new socket option is
526 not supported by the CAN_RAW socket (e.g. on older kernels), switching the
527 CAN_RAW_FD_FRAMES option returns the error -ENOPROTOOPT.
528
529 Once CAN_RAW_FD_FRAMES is enabled the application can send both CAN frames
530 and CAN FD frames. OTOH the application has to handle CAN and CAN FD frames
531 when reading from the socket.
532
533 CAN_RAW_FD_FRAMES enabled: CAN_MTU and CANFD_MTU are allowed
534 CAN_RAW_FD_FRAMES disabled: only CAN_MTU is allowed (default)
535
536 Example:
537 [ remember: CANFD_MTU == sizeof(struct canfd_frame) ]
538
539 struct canfd_frame cfd;
540
541 nbytes = read(s, &cfd, CANFD_MTU);
542
543 if (nbytes == CANFD_MTU) {
544 printf("got CAN FD frame with length %d\n", cfd.len);
545 /* cfd.flags contains valid data */
546 } else if (nbytes == CAN_MTU) {
547 printf("got legacy CAN frame with length %d\n", cfd.len);
548 /* cfd.flags is undefined */
549 } else {
550 fprintf(stderr, "read: invalid CAN(FD) frame\n");
551 return 1;
552 }
553
554 /* the content can be handled independently from the received MTU size */
555
556 printf("can_id: %X data length: %d data: ", cfd.can_id, cfd.len);
557 for (i = 0; i < cfd.len; i++)
558 printf("%02X ", cfd.data[i]);
559
560 When reading with size CANFD_MTU only returns CAN_MTU bytes that have
561 been received from the socket a legacy CAN frame has been read into the
562 provided CAN FD structure. Note that the canfd_frame.flags data field is
563 not specified in the struct can_frame and therefore it is only valid in
564 CANFD_MTU sized CAN FD frames.
565
566 As long as the payload length is <=8 the received CAN frames from CAN FD
567 capable CAN devices can be received and read by legacy sockets too. When
568 user-generated CAN FD frames have a payload length <=8 these can be send
569 by legacy CAN network interfaces too. Sending CAN FD frames with payload
570 length > 8 to a legacy CAN network interface returns an -EMSGSIZE error.
571
572 Implementation hint for new CAN applications:
573
574 To build a CAN FD aware application use struct canfd_frame as basic CAN
575 data structure for CAN_RAW based applications. When the application is
576 executed on an older Linux kernel and switching the CAN_RAW_FD_FRAMES
577 socket option returns an error: No problem. You'll get legacy CAN frames
578 or CAN FD frames and can process them the same way.
579
580 When sending to CAN devices make sure that the device is capable to handle
581 CAN FD frames by checking if the device maximum transfer unit is CANFD_MTU.
582 The CAN device MTU can be retrieved e.g. with a SIOCGIFMTU ioctl() syscall.
583
584 4.1.6 RAW socket returned message flags
476 585
477 When using recvmsg() call, the msg->msg_flags may contain following flags: 586 When using recvmsg() call, the msg->msg_flags may contain following flags:
478 587
@@ -527,7 +636,7 @@ solution for a couple of reasons:
527 636
528 rcvlist_all - list for unfiltered entries (no filter operations) 637 rcvlist_all - list for unfiltered entries (no filter operations)
529 rcvlist_eff - list for single extended frame (EFF) entries 638 rcvlist_eff - list for single extended frame (EFF) entries
530 rcvlist_err - list for error frames masks 639 rcvlist_err - list for error message frames masks
531 rcvlist_fil - list for mask/value filters 640 rcvlist_fil - list for mask/value filters
532 rcvlist_inv - list for mask/value filters (inverse semantic) 641 rcvlist_inv - list for mask/value filters (inverse semantic)
533 rcvlist_sff - list for single standard frame (SFF) entries 642 rcvlist_sff - list for single standard frame (SFF) entries
@@ -573,10 +682,13 @@ solution for a couple of reasons:
573 dev->type = ARPHRD_CAN; /* the netdevice hardware type */ 682 dev->type = ARPHRD_CAN; /* the netdevice hardware type */
574 dev->flags = IFF_NOARP; /* CAN has no arp */ 683 dev->flags = IFF_NOARP; /* CAN has no arp */
575 684
576 dev->mtu = sizeof(struct can_frame); 685 dev->mtu = CAN_MTU; /* sizeof(struct can_frame) -> legacy CAN interface */
577 686
578 The struct can_frame is the payload of each socket buffer in the 687 or alternative, when the controller supports CAN with flexible data rate:
579 protocol family PF_CAN. 688 dev->mtu = CANFD_MTU; /* sizeof(struct canfd_frame) -> CAN FD interface */
689
690 The struct can_frame or struct canfd_frame is the payload of each socket
691 buffer (skbuff) in the protocol family PF_CAN.
580 692
581 6.2 local loopback of sent frames 693 6.2 local loopback of sent frames
582 694
@@ -784,15 +896,41 @@ solution for a couple of reasons:
784 $ ip link set canX type can restart-ms 100 896 $ ip link set canX type can restart-ms 100
785 897
786 Alternatively, the application may realize the "bus-off" condition 898 Alternatively, the application may realize the "bus-off" condition
787 by monitoring CAN error frames and do a restart when appropriate with 899 by monitoring CAN error message frames and do a restart when
788 the command: 900 appropriate with the command:
789 901
790 $ ip link set canX type can restart 902 $ ip link set canX type can restart
791 903
792 Note that a restart will also create a CAN error frame (see also 904 Note that a restart will also create a CAN error message frame (see
793 chapter 3.4). 905 also chapter 3.4).
906
907 6.6 CAN FD (flexible data rate) driver support
908
909 CAN FD capable CAN controllers support two different bitrates for the
910 arbitration phase and the payload phase of the CAN FD frame. Therefore a
911 second bittiming has to be specified in order to enable the CAN FD bitrate.
912
913 Additionally CAN FD capable CAN controllers support up to 64 bytes of
914 payload. The representation of this length in can_frame.can_dlc and
915 canfd_frame.len for userspace applications and inside the Linux network
916 layer is a plain value from 0 .. 64 instead of the CAN 'data length code'.
917 The data length code was a 1:1 mapping to the payload length in the legacy
918 CAN frames anyway. The payload length to the bus-relevant DLC mapping is
919 only performed inside the CAN drivers, preferably with the helper
920 functions can_dlc2len() and can_len2dlc().
921
922 The CAN netdevice driver capabilities can be distinguished by the network
923 devices maximum transfer unit (MTU):
924
925 MTU = 16 (CAN_MTU) => sizeof(struct can_frame) => 'legacy' CAN device
926 MTU = 72 (CANFD_MTU) => sizeof(struct canfd_frame) => CAN FD capable device
927
928 The CAN device MTU can be retrieved e.g. with a SIOCGIFMTU ioctl() syscall.
929 N.B. CAN FD capable devices can also handle and send legacy CAN frames.
930
931 FIXME: Add details about the CAN FD controller configuration when available.
794 932
795 6.6 Supported CAN hardware 933 6.7 Supported CAN hardware
796 934
797 Please check the "Kconfig" file in "drivers/net/can" to get an actual 935 Please check the "Kconfig" file in "drivers/net/can" to get an actual
798 list of the support CAN hardware. On the Socket CAN project website 936 list of the support CAN hardware. On the Socket CAN project website
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 6f896b94abd..47b6c79e9b0 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -857,9 +857,19 @@ accept_source_route - BOOLEAN
857 FALSE (host) 857 FALSE (host)
858 858
859accept_local - BOOLEAN 859accept_local - BOOLEAN
860 Accept packets with local source addresses. In combination with 860 Accept packets with local source addresses. In combination
861 suitable routing, this can be used to direct packets between two 861 with suitable routing, this can be used to direct packets
862 local interfaces over the wire and have them accepted properly. 862 between two local interfaces over the wire and have them
863 accepted properly.
864
865 rp_filter must be set to a non-zero value in order for
866 accept_local to have an effect.
867
868 default FALSE
869
870route_localnet - BOOLEAN
871 Do not consider loopback addresses as martian source or destination
872 while routing. This enables the use of 127/8 for local routing purposes.
863 default FALSE 873 default FALSE
864 874
865rp_filter - INTEGER 875rp_filter - INTEGER
diff --git a/Documentation/networking/s2io.txt b/Documentation/networking/s2io.txt
index 4be0c039edb..d2a9f43b554 100644
--- a/Documentation/networking/s2io.txt
+++ b/Documentation/networking/s2io.txt
@@ -136,16 +136,6 @@ For more information, please review the AMD8131 errata at
136http://vip.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/ 136http://vip.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/
13726310_AMD-8131_HyperTransport_PCI-X_Tunnel_Revision_Guide_rev_3_18.pdf 13726310_AMD-8131_HyperTransport_PCI-X_Tunnel_Revision_Guide_rev_3_18.pdf
138 138
1396. Available Downloads 1396. Support
140Neterion "s2io" driver in Red Hat and Suse 2.6-based distributions is kept up
141to date, also the latest "s2io" code (including support for 2.4 kernels) is
142available via "Support" link on the Neterion site: http://www.neterion.com.
143
144For Xframe User Guide (Programming manual), visit ftp site ns1.s2io.com,
145user: linuxdocs password: HALdocs
146
1477. Support
148For further support please contact either your 10GbE Xframe NIC vendor (IBM, 140For further support please contact either your 10GbE Xframe NIC vendor (IBM,
149HP, SGI etc.) or click on the "Support" link on the Neterion site: 141HP, SGI etc.)
150http://www.neterion.com.
151
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index 5cb9a197246..c676b9cedbd 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -257,9 +257,11 @@ reset procedure etc).
257 o Makefile 257 o Makefile
258 o stmmac_main.c: main network device driver; 258 o stmmac_main.c: main network device driver;
259 o stmmac_mdio.c: mdio functions; 259 o stmmac_mdio.c: mdio functions;
260 o stmmac_pci: PCI driver;
261 o stmmac_platform.c: platform driver
260 o stmmac_ethtool.c: ethtool support; 262 o stmmac_ethtool.c: ethtool support;
261 o stmmac_timer.[ch]: timer code used for mitigating the driver dma interrupts 263 o stmmac_timer.[ch]: timer code used for mitigating the driver dma interrupts
262 Only tested on ST40 platforms based. 264 (only tested on ST40 platforms based);
263 o stmmac.h: private driver structure; 265 o stmmac.h: private driver structure;
264 o common.h: common definitions and VFTs; 266 o common.h: common definitions and VFTs;
265 o descs.h: descriptor structure definitions; 267 o descs.h: descriptor structure definitions;
@@ -269,9 +271,11 @@ reset procedure etc).
269 o dwmac100_core: MAC 100 core and dma code; 271 o dwmac100_core: MAC 100 core and dma code;
270 o dwmac100_dma.c: dma funtions for the MAC chip; 272 o dwmac100_dma.c: dma funtions for the MAC chip;
271 o dwmac1000.h: specific header file for the MAC; 273 o dwmac1000.h: specific header file for the MAC;
272 o dwmac_lib.c: generic DMA functions shared among chips 274 o dwmac_lib.c: generic DMA functions shared among chips;
273 o enh_desc.c: functions for handling enhanced descriptors 275 o enh_desc.c: functions for handling enhanced descriptors;
274 o norm_desc.c: functions for handling normal descriptors 276 o norm_desc.c: functions for handling normal descriptors;
277 o chain_mode.c/ring_mode.c:: functions to manage RING/CHAINED modes;
278 o mmc_core.c/mmc.h: Management MAC Counters;
275 279
2765) Debug Information 2805) Debug Information
277 281
@@ -304,7 +308,27 @@ All these are only useful during the developing stage
304and should never enabled inside the code for general usage. 308and should never enabled inside the code for general usage.
305In fact, these can generate an huge amount of debug messages. 309In fact, these can generate an huge amount of debug messages.
306 310
3076) TODO: 3116) Energy Efficient Ethernet
312
313Energy Efficient Ethernet(EEE) enables IEEE 802.3 MAC sublayer along
314with a family of Physical layer to operate in the Low power Idle(LPI)
315mode. The EEE mode supports the IEEE 802.3 MAC operation at 100Mbps,
3161000Mbps & 10Gbps.
317
318The LPI mode allows power saving by switching off parts of the
319communication device functionality when there is no data to be
320transmitted & received. The system on both the side of the link can
321disable some functionalities & save power during the period of low-link
322utilization. The MAC controls whether the system should enter or exit
323the LPI mode & communicate this to PHY.
324
325As soon as the interface is opened, the driver verifies if the EEE can
326be supported. This is done by looking at both the DMA HW capability
327register and the PHY devices MCD registers.
328To enter in Tx LPI mode the driver needs to have a software timer
329that enable and disable the LPI mode when there is nothing to be
330transmitted.
331
3327) TODO:
308 o XGMAC is not supported. 333 o XGMAC is not supported.
309 o Add the EEE - Energy Efficient Ethernet
310 o Add the PTP - precision time protocol 334 o Add the PTP - precision time protocol
diff --git a/Documentation/networking/vxge.txt b/Documentation/networking/vxge.txt
index d2e2997e6fa..bb76c667a47 100644
--- a/Documentation/networking/vxge.txt
+++ b/Documentation/networking/vxge.txt
@@ -91,10 +91,3 @@ v) addr_learn_en
91 virtualization environment. 91 virtualization environment.
92 Valid range: 0,1 (disabled, enabled respectively) 92 Valid range: 0,1 (disabled, enabled respectively)
93 Default: 0 93 Default: 0
94
954) Troubleshooting:
96-------------------
97
98To resolve an issue with the source code or X3100 series adapter, please collect
99the statistics, register dumps using ethool, relevant logs and email them to
100support@neterion.com.
diff --git a/MAINTAINERS b/MAINTAINERS
index 03df1d15ebf..ce7398e1e1e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -329,7 +329,7 @@ F: drivers/hwmon/adm1029.c
329 329
330ADM8211 WIRELESS DRIVER 330ADM8211 WIRELESS DRIVER
331L: linux-wireless@vger.kernel.org 331L: linux-wireless@vger.kernel.org
332W: http://linuxwireless.org/ 332W: http://wireless.kernel.org/
333S: Orphan 333S: Orphan
334F: drivers/net/wireless/adm8211.* 334F: drivers/net/wireless/adm8211.*
335 335
@@ -1423,7 +1423,7 @@ B43 WIRELESS DRIVER
1423M: Stefano Brivio <stefano.brivio@polimi.it> 1423M: Stefano Brivio <stefano.brivio@polimi.it>
1424L: linux-wireless@vger.kernel.org 1424L: linux-wireless@vger.kernel.org
1425L: b43-dev@lists.infradead.org 1425L: b43-dev@lists.infradead.org
1426W: http://linuxwireless.org/en/users/Drivers/b43 1426W: http://wireless.kernel.org/en/users/Drivers/b43
1427S: Maintained 1427S: Maintained
1428F: drivers/net/wireless/b43/ 1428F: drivers/net/wireless/b43/
1429 1429
@@ -1432,7 +1432,7 @@ M: Larry Finger <Larry.Finger@lwfinger.net>
1432M: Stefano Brivio <stefano.brivio@polimi.it> 1432M: Stefano Brivio <stefano.brivio@polimi.it>
1433L: linux-wireless@vger.kernel.org 1433L: linux-wireless@vger.kernel.org
1434L: b43-dev@lists.infradead.org 1434L: b43-dev@lists.infradead.org
1435W: http://linuxwireless.org/en/users/Drivers/b43 1435W: http://wireless.kernel.org/en/users/Drivers/b43
1436S: Maintained 1436S: Maintained
1437F: drivers/net/wireless/b43legacy/ 1437F: drivers/net/wireless/b43legacy/
1438 1438
@@ -1595,6 +1595,7 @@ M: Arend van Spriel <arend@broadcom.com>
1595M: Franky (Zhenhui) Lin <frankyl@broadcom.com> 1595M: Franky (Zhenhui) Lin <frankyl@broadcom.com>
1596M: Kan Yan <kanyan@broadcom.com> 1596M: Kan Yan <kanyan@broadcom.com>
1597L: linux-wireless@vger.kernel.org 1597L: linux-wireless@vger.kernel.org
1598L: brcm80211-dev-list@broadcom.com
1598S: Supported 1599S: Supported
1599F: drivers/net/wireless/brcm80211/ 1600F: drivers/net/wireless/brcm80211/
1600 1601
@@ -4351,7 +4352,7 @@ F: arch/m68k/hp300/
4351MAC80211 4352MAC80211
4352M: Johannes Berg <johannes@sipsolutions.net> 4353M: Johannes Berg <johannes@sipsolutions.net>
4353L: linux-wireless@vger.kernel.org 4354L: linux-wireless@vger.kernel.org
4354W: http://linuxwireless.org/ 4355W: http://wireless.kernel.org/
4355T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git 4356T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
4356T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git 4357T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
4357S: Maintained 4358S: Maintained
@@ -4363,7 +4364,7 @@ MAC80211 PID RATE CONTROL
4363M: Stefano Brivio <stefano.brivio@polimi.it> 4364M: Stefano Brivio <stefano.brivio@polimi.it>
4364M: Mattias Nissler <mattias.nissler@gmx.de> 4365M: Mattias Nissler <mattias.nissler@gmx.de>
4365L: linux-wireless@vger.kernel.org 4366L: linux-wireless@vger.kernel.org
4366W: http://linuxwireless.org/en/developers/Documentation/mac80211/RateControl/PID 4367W: http://wireless.kernel.org/en/developers/Documentation/mac80211/RateControl/PID
4367T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git 4368T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
4368T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git 4369T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
4369S: Maintained 4370S: Maintained
@@ -4637,8 +4638,6 @@ F: net/sched/sch_netem.c
4637NETERION 10GbE DRIVERS (s2io/vxge) 4638NETERION 10GbE DRIVERS (s2io/vxge)
4638M: Jon Mason <jdmason@kudzu.us> 4639M: Jon Mason <jdmason@kudzu.us>
4639L: netdev@vger.kernel.org 4640L: netdev@vger.kernel.org
4640W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous
4641W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous
4642S: Supported 4641S: Supported
4643F: Documentation/networking/s2io.txt 4642F: Documentation/networking/s2io.txt
4644F: Documentation/networking/vxge.txt 4643F: Documentation/networking/vxge.txt
@@ -5048,7 +5047,7 @@ F: fs/ocfs2/
5048 5047
5049ORINOCO DRIVER 5048ORINOCO DRIVER
5050L: linux-wireless@vger.kernel.org 5049L: linux-wireless@vger.kernel.org
5051W: http://linuxwireless.org/en/users/Drivers/orinoco 5050W: http://wireless.kernel.org/en/users/Drivers/orinoco
5052W: http://www.nongnu.org/orinoco/ 5051W: http://www.nongnu.org/orinoco/
5053S: Orphan 5052S: Orphan
5054F: drivers/net/wireless/orinoco/ 5053F: drivers/net/wireless/orinoco/
@@ -5753,7 +5752,7 @@ F: net/rose/
5753RTL8180 WIRELESS DRIVER 5752RTL8180 WIRELESS DRIVER
5754M: "John W. Linville" <linville@tuxdriver.com> 5753M: "John W. Linville" <linville@tuxdriver.com>
5755L: linux-wireless@vger.kernel.org 5754L: linux-wireless@vger.kernel.org
5756W: http://linuxwireless.org/ 5755W: http://wireless.kernel.org/
5757T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git 5756T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
5758S: Maintained 5757S: Maintained
5759F: drivers/net/wireless/rtl818x/rtl8180/ 5758F: drivers/net/wireless/rtl818x/rtl8180/
@@ -5763,7 +5762,7 @@ M: Herton Ronaldo Krzesinski <herton@canonical.com>
5763M: Hin-Tak Leung <htl10@users.sourceforge.net> 5762M: Hin-Tak Leung <htl10@users.sourceforge.net>
5764M: Larry Finger <Larry.Finger@lwfinger.net> 5763M: Larry Finger <Larry.Finger@lwfinger.net>
5765L: linux-wireless@vger.kernel.org 5764L: linux-wireless@vger.kernel.org
5766W: http://linuxwireless.org/ 5765W: http://wireless.kernel.org/
5767T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git 5766T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
5768S: Maintained 5767S: Maintained
5769F: drivers/net/wireless/rtl818x/rtl8187/ 5768F: drivers/net/wireless/rtl818x/rtl8187/
@@ -5772,7 +5771,7 @@ RTL8192CE WIRELESS DRIVER
5772M: Larry Finger <Larry.Finger@lwfinger.net> 5771M: Larry Finger <Larry.Finger@lwfinger.net>
5773M: Chaoming Li <chaoming_li@realsil.com.cn> 5772M: Chaoming Li <chaoming_li@realsil.com.cn>
5774L: linux-wireless@vger.kernel.org 5773L: linux-wireless@vger.kernel.org
5775W: http://linuxwireless.org/ 5774W: http://wireless.kernel.org/
5776T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git 5775T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
5777S: Maintained 5776S: Maintained
5778F: drivers/net/wireless/rtlwifi/ 5777F: drivers/net/wireless/rtlwifi/
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index 1a69244e785..e9073e9501b 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -96,6 +96,7 @@ static void bpf_flush_icache(void *start_, void *end_)
96#define AND F3(2, 0x01) 96#define AND F3(2, 0x01)
97#define ANDCC F3(2, 0x11) 97#define ANDCC F3(2, 0x11)
98#define OR F3(2, 0x02) 98#define OR F3(2, 0x02)
99#define XOR F3(2, 0x03)
99#define SUB F3(2, 0x04) 100#define SUB F3(2, 0x04)
100#define SUBCC F3(2, 0x14) 101#define SUBCC F3(2, 0x14)
101#define MUL F3(2, 0x0a) /* umul */ 102#define MUL F3(2, 0x0a) /* umul */
@@ -462,6 +463,9 @@ void bpf_jit_compile(struct sk_filter *fp)
462 case BPF_S_ALU_OR_K: /* A |= K */ 463 case BPF_S_ALU_OR_K: /* A |= K */
463 emit_alu_K(OR, K); 464 emit_alu_K(OR, K);
464 break; 465 break;
466 case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
467 emit_alu_X(XOR);
468 break;
465 case BPF_S_ALU_LSH_X: /* A <<= X */ 469 case BPF_S_ALU_LSH_X: /* A <<= X */
466 emit_alu_X(SLL); 470 emit_alu_X(SLL);
467 break; 471 break;
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 0597f95b6da..33643a8bcbb 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -309,6 +309,10 @@ void bpf_jit_compile(struct sk_filter *fp)
309 else 309 else
310 EMIT1_off32(0x0d, K); /* or imm32,%eax */ 310 EMIT1_off32(0x0d, K); /* or imm32,%eax */
311 break; 311 break;
312 case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
313 seen |= SEEN_XREG;
314 EMIT2(0x31, 0xd8); /* xor %ebx,%eax */
315 break;
312 case BPF_S_ALU_LSH_X: /* A <<= X; */ 316 case BPF_S_ALU_LSH_X: /* A <<= X; */
313 seen |= SEEN_XREG; 317 seen |= SEEN_XREG;
314 EMIT4(0x89, 0xd9, 0xd3, 0xe0); /* mov %ebx,%ecx; shl %cl,%eax */ 318 EMIT4(0x89, 0xd9, 0xd3, 0xe0); /* mov %ebx,%ecx; shl %cl,%eax */
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 5a37eadb4e5..ba2c611154a 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -496,9 +496,12 @@ static void crypto_netlink_rcv(struct sk_buff *skb)
496 496
497static int __init crypto_user_init(void) 497static int __init crypto_user_init(void)
498{ 498{
499 struct netlink_kernel_cfg cfg = {
500 .input = crypto_netlink_rcv,
501 };
502
499 crypto_nlsk = netlink_kernel_create(&init_net, NETLINK_CRYPTO, 503 crypto_nlsk = netlink_kernel_create(&init_net, NETLINK_CRYPTO,
500 0, crypto_netlink_rcv, 504 THIS_MODULE, &cfg);
501 NULL, THIS_MODULE);
502 if (!crypto_nlsk) 505 if (!crypto_nlsk)
503 return -ENOMEM; 506 return -ENOMEM;
504 507
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index 5ed0718fc66..a3420585d94 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -28,6 +28,12 @@ static const struct bcma_device_id_name bcma_arm_device_names[] = {
28 28
29static const struct bcma_device_id_name bcma_bcm_device_names[] = { 29static const struct bcma_device_id_name bcma_bcm_device_names[] = {
30 { BCMA_CORE_OOB_ROUTER, "OOB Router" }, 30 { BCMA_CORE_OOB_ROUTER, "OOB Router" },
31 { BCMA_CORE_4706_CHIPCOMMON, "BCM4706 ChipCommon" },
32 { BCMA_CORE_4706_SOC_RAM, "BCM4706 SOC RAM" },
33 { BCMA_CORE_4706_MAC_GBIT, "BCM4706 GBit MAC" },
34 { BCMA_CORE_AMEMC, "AMEMC (DDR)" },
35 { BCMA_CORE_ALTA, "ALTA (I2S)" },
36 { BCMA_CORE_4706_MAC_GBIT_COMMON, "BCM4706 GBit MAC Common" },
31 { BCMA_CORE_INVALID, "Invalid" }, 37 { BCMA_CORE_INVALID, "Invalid" },
32 { BCMA_CORE_CHIPCOMMON, "ChipCommon" }, 38 { BCMA_CORE_CHIPCOMMON, "ChipCommon" },
33 { BCMA_CORE_ILINE20, "ILine 20" }, 39 { BCMA_CORE_ILINE20, "ILine 20" },
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 1fcd9238035..585c88e0189 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -231,12 +231,12 @@ static void bluecard_write_wakeup(bluecard_info_t *info)
231 } 231 }
232 232
233 do { 233 do {
234 register unsigned int iobase = info->p_dev->resource[0]->start; 234 unsigned int iobase = info->p_dev->resource[0]->start;
235 register unsigned int offset; 235 unsigned int offset;
236 register unsigned char command; 236 unsigned char command;
237 register unsigned long ready_bit; 237 unsigned long ready_bit;
238 register struct sk_buff *skb; 238 register struct sk_buff *skb;
239 register int len; 239 int len;
240 240
241 clear_bit(XMIT_WAKEUP, &(info->tx_state)); 241 clear_bit(XMIT_WAKEUP, &(info->tx_state));
242 242
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 609861a53c2..29caaed2d71 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -470,7 +470,7 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
470 hdev->flush = bpa10x_flush; 470 hdev->flush = bpa10x_flush;
471 hdev->send = bpa10x_send_frame; 471 hdev->send = bpa10x_send_frame;
472 472
473 set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); 473 set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
474 474
475 err = hci_register_dev(hdev); 475 err = hci_register_dev(hdev);
476 if (err < 0) { 476 if (err < 0) {
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 308c8599ab5..b2b0fbbb43b 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -186,9 +186,9 @@ static void bt3c_write_wakeup(bt3c_info_t *info)
186 return; 186 return;
187 187
188 do { 188 do {
189 register unsigned int iobase = info->p_dev->resource[0]->start; 189 unsigned int iobase = info->p_dev->resource[0]->start;
190 register struct sk_buff *skb; 190 register struct sk_buff *skb;
191 register int len; 191 int len;
192 192
193 if (!pcmcia_dev_present(info->p_dev)) 193 if (!pcmcia_dev_present(info->p_dev))
194 break; 194 break;
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 0cd61d9f07c..cf7588edba0 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -110,6 +110,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
110 /* Marvell SD8787 Bluetooth device */ 110 /* Marvell SD8787 Bluetooth device */
111 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A), 111 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
112 .driver_data = (unsigned long) &btmrvl_sdio_sd8787 }, 112 .driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
113 /* Marvell SD8787 Bluetooth AMP device */
114 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911B),
115 .driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
113 /* Marvell SD8797 Bluetooth device */ 116 /* Marvell SD8797 Bluetooth device */
114 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A), 117 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
115 .driver_data = (unsigned long) &btmrvl_sdio_sd8797 }, 118 .driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index c4fc2f3fc32..65b8d996840 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -140,9 +140,9 @@ static void btuart_write_wakeup(btuart_info_t *info)
140 } 140 }
141 141
142 do { 142 do {
143 register unsigned int iobase = info->p_dev->resource[0]->start; 143 unsigned int iobase = info->p_dev->resource[0]->start;
144 register struct sk_buff *skb; 144 register struct sk_buff *skb;
145 register int len; 145 int len;
146 146
147 clear_bit(XMIT_WAKEUP, &(info->tx_state)); 147 clear_bit(XMIT_WAKEUP, &(info->tx_state));
148 148
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 83ebb241bfc..e2722141103 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -21,15 +21,7 @@
21 * 21 *
22 */ 22 */
23 23
24#include <linux/kernel.h>
25#include <linux/module.h> 24#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/slab.h>
28#include <linux/types.h>
29#include <linux/sched.h>
30#include <linux/errno.h>
31#include <linux/skbuff.h>
32
33#include <linux/usb.h> 25#include <linux/usb.h>
34 26
35#include <net/bluetooth/bluetooth.h> 27#include <net/bluetooth/bluetooth.h>
@@ -1028,7 +1020,7 @@ static int btusb_probe(struct usb_interface *intf,
1028 data->isoc = usb_ifnum_to_if(data->udev, 1); 1020 data->isoc = usb_ifnum_to_if(data->udev, 1);
1029 1021
1030 if (!reset) 1022 if (!reset)
1031 set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); 1023 set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
1032 1024
1033 if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) { 1025 if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) {
1034 if (!disable_scofix) 1026 if (!disable_scofix)
@@ -1040,7 +1032,7 @@ static int btusb_probe(struct usb_interface *intf,
1040 1032
1041 if (id->driver_info & BTUSB_DIGIANSWER) { 1033 if (id->driver_info & BTUSB_DIGIANSWER) {
1042 data->cmdreq_type = USB_TYPE_VENDOR; 1034 data->cmdreq_type = USB_TYPE_VENDOR;
1043 set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); 1035 set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
1044 } 1036 }
1045 1037
1046 if (id->driver_info & BTUSB_CSR) { 1038 if (id->driver_info & BTUSB_CSR) {
@@ -1048,7 +1040,7 @@ static int btusb_probe(struct usb_interface *intf,
1048 1040
1049 /* Old firmware would otherwise execute USB reset */ 1041 /* Old firmware would otherwise execute USB reset */
1050 if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117) 1042 if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117)
1051 set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); 1043 set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
1052 } 1044 }
1053 1045
1054 if (id->driver_info & BTUSB_SNIFFER) { 1046 if (id->driver_info & BTUSB_SNIFFER) {
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 6e8d9618968..b1b37ccd3cd 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -144,9 +144,9 @@ static void dtl1_write_wakeup(dtl1_info_t *info)
144 } 144 }
145 145
146 do { 146 do {
147 register unsigned int iobase = info->p_dev->resource[0]->start; 147 unsigned int iobase = info->p_dev->resource[0]->start;
148 register struct sk_buff *skb; 148 register struct sk_buff *skb;
149 register int len; 149 int len;
150 150
151 clear_bit(XMIT_WAKEUP, &(info->tx_state)); 151 clear_bit(XMIT_WAKEUP, &(info->tx_state));
152 152
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 661a8dc4d2f..57e502e0608 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -552,7 +552,7 @@ static u16 bscp_get_crc(struct bcsp_struct *bcsp)
552static int bcsp_recv(struct hci_uart *hu, void *data, int count) 552static int bcsp_recv(struct hci_uart *hu, void *data, int count)
553{ 553{
554 struct bcsp_struct *bcsp = hu->priv; 554 struct bcsp_struct *bcsp = hu->priv;
555 register unsigned char *ptr; 555 unsigned char *ptr;
556 556
557 BT_DBG("hu %p count %d rx_state %d rx_count %ld", 557 BT_DBG("hu %p count %d rx_state %d rx_count %ld",
558 hu, count, bcsp->rx_state, bcsp->rx_count); 558 hu, count, bcsp->rx_state, bcsp->rx_count);
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 748329468d2..c60623f206d 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -126,7 +126,7 @@ static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb)
126 126
127static inline int h4_check_data_len(struct h4_struct *h4, int len) 127static inline int h4_check_data_len(struct h4_struct *h4, int len)
128{ 128{
129 register int room = skb_tailroom(h4->rx_skb); 129 int room = skb_tailroom(h4->rx_skb);
130 130
131 BT_DBG("len %d room %d", len, room); 131 BT_DBG("len %d room %d", len, room);
132 132
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index e564579a611..2f9b796e106 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -394,7 +394,7 @@ static int hci_uart_register_dev(struct hci_uart *hu)
394 set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); 394 set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
395 395
396 if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags)) 396 if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags))
397 set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); 397 set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
398 398
399 if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags)) 399 if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
400 hdev->dev_type = HCI_AMP; 400 hdev->dev_type = HCI_AMP;
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index b874c0efde2..ff6d589c34a 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -348,7 +348,7 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
348 348
349static inline int ll_check_data_len(struct ll_struct *ll, int len) 349static inline int ll_check_data_len(struct ll_struct *ll, int len)
350{ 350{
351 register int room = skb_tailroom(ll->rx_skb); 351 int room = skb_tailroom(ll->rx_skb);
352 352
353 BT_DBG("len %d room %d", len, room); 353 BT_DBG("len %d room %d", len, room);
354 354
@@ -374,11 +374,11 @@ static inline int ll_check_data_len(struct ll_struct *ll, int len)
374static int ll_recv(struct hci_uart *hu, void *data, int count) 374static int ll_recv(struct hci_uart *hu, void *data, int count)
375{ 375{
376 struct ll_struct *ll = hu->priv; 376 struct ll_struct *ll = hu->priv;
377 register char *ptr; 377 char *ptr;
378 struct hci_event_hdr *eh; 378 struct hci_event_hdr *eh;
379 struct hci_acl_hdr *ah; 379 struct hci_acl_hdr *ah;
380 struct hci_sco_hdr *sh; 380 struct hci_sco_hdr *sh;
381 register int len, type, dlen; 381 int len, type, dlen;
382 382
383 BT_DBG("hu %p count %d rx_state %ld rx_count %ld", hu, count, ll->rx_state, ll->rx_count); 383 BT_DBG("hu %p count %d rx_state %ld rx_count %ld", hu, count, ll->rx_state, ll->rx_count);
384 384
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index dde6a0fad40..116cf8d0283 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -101,19 +101,19 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
101 if (!skb) 101 if (!skb)
102 return -ENOMEM; 102 return -ENOMEM;
103 103
104 nlh = NLMSG_PUT(skb, 0, msg->seq, NLMSG_DONE, size - sizeof(*nlh)); 104 nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size - sizeof(*nlh), 0);
105 if (!nlh) {
106 kfree_skb(skb);
107 return -EMSGSIZE;
108 }
105 109
106 data = NLMSG_DATA(nlh); 110 data = nlmsg_data(nlh);
107 111
108 memcpy(data, msg, sizeof(*data) + msg->len); 112 memcpy(data, msg, sizeof(*data) + msg->len);
109 113
110 NETLINK_CB(skb).dst_group = group; 114 NETLINK_CB(skb).dst_group = group;
111 115
112 return netlink_broadcast(dev->nls, skb, 0, group, gfp_mask); 116 return netlink_broadcast(dev->nls, skb, 0, group, gfp_mask);
113
114nlmsg_failure:
115 kfree_skb(skb);
116 return -EINVAL;
117} 117}
118EXPORT_SYMBOL_GPL(cn_netlink_send); 118EXPORT_SYMBOL_GPL(cn_netlink_send);
119 119
@@ -251,15 +251,20 @@ static const struct file_operations cn_file_ops = {
251 .release = single_release 251 .release = single_release
252}; 252};
253 253
254static struct cn_dev cdev = {
255 .input = cn_rx_skb,
256};
257
254static int __devinit cn_init(void) 258static int __devinit cn_init(void)
255{ 259{
256 struct cn_dev *dev = &cdev; 260 struct cn_dev *dev = &cdev;
257 261 struct netlink_kernel_cfg cfg = {
258 dev->input = cn_rx_skb; 262 .groups = CN_NETLINK_USERS + 0xf,
263 .input = dev->input,
264 };
259 265
260 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, 266 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR,
261 CN_NETLINK_USERS + 0xf, 267 THIS_MODULE, &cfg);
262 dev->input, NULL, THIS_MODULE);
263 if (!dev->nls) 268 if (!dev->nls)
264 return -EIO; 269 return -EIO;
265 270
diff --git a/drivers/ieee802154/Kconfig b/drivers/ieee802154/Kconfig
index 15c06407370..1fc4eefc20e 100644
--- a/drivers/ieee802154/Kconfig
+++ b/drivers/ieee802154/Kconfig
@@ -19,6 +19,7 @@ config IEEE802154_FAKEHARD
19 19
20 This driver can also be built as a module. To do so say M here. 20 This driver can also be built as a module. To do so say M here.
21 The module will be called 'fakehard'. 21 The module will be called 'fakehard'.
22
22config IEEE802154_FAKELB 23config IEEE802154_FAKELB
23 depends on IEEE802154_DRIVERS && MAC802154 24 depends on IEEE802154_DRIVERS && MAC802154
24 tristate "IEEE 802.15.4 loopback driver" 25 tristate "IEEE 802.15.4 loopback driver"
@@ -28,3 +29,8 @@ config IEEE802154_FAKELB
28 29
29 This driver can also be built as a module. To do so say M here. 30 This driver can also be built as a module. To do so say M here.
30 The module will be called 'fakelb'. 31 The module will be called 'fakelb'.
32
33config IEEE802154_AT86RF230
34 depends on IEEE802154_DRIVERS && MAC802154
35 tristate "AT86RF230/231 transceiver driver"
36 depends on SPI
diff --git a/drivers/ieee802154/Makefile b/drivers/ieee802154/Makefile
index ea784ea6f0f..4f4371d3aa7 100644
--- a/drivers/ieee802154/Makefile
+++ b/drivers/ieee802154/Makefile
@@ -1,2 +1,3 @@
1obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o 1obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o
2obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o 2obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
3obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
diff --git a/drivers/ieee802154/at86rf230.c b/drivers/ieee802154/at86rf230.c
new file mode 100644
index 00000000000..902e38bb382
--- /dev/null
+++ b/drivers/ieee802154/at86rf230.c
@@ -0,0 +1,964 @@
1/*
2 * AT86RF230/RF231 driver
3 *
4 * Copyright (C) 2009-2012 Siemens AG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Written by:
20 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
21 * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
22 */
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/gpio.h>
27#include <linux/delay.h>
28#include <linux/mutex.h>
29#include <linux/workqueue.h>
30#include <linux/spinlock.h>
31#include <linux/spi/spi.h>
32#include <linux/spi/at86rf230.h>
33#include <linux/skbuff.h>
34
35#include <net/mac802154.h>
36#include <net/wpan-phy.h>
37
38struct at86rf230_local {
39 struct spi_device *spi;
40 int rstn, slp_tr, dig2;
41
42 u8 part;
43 u8 vers;
44
45 u8 buf[2];
46 struct mutex bmux;
47
48 struct work_struct irqwork;
49 struct completion tx_complete;
50
51 struct ieee802154_dev *dev;
52
53 spinlock_t lock;
54 bool irq_disabled;
55 bool is_tx;
56};
57
58#define RG_TRX_STATUS (0x01)
59#define SR_TRX_STATUS 0x01, 0x1f, 0
60#define SR_RESERVED_01_3 0x01, 0x20, 5
61#define SR_CCA_STATUS 0x01, 0x40, 6
62#define SR_CCA_DONE 0x01, 0x80, 7
63#define RG_TRX_STATE (0x02)
64#define SR_TRX_CMD 0x02, 0x1f, 0
65#define SR_TRAC_STATUS 0x02, 0xe0, 5
66#define RG_TRX_CTRL_0 (0x03)
67#define SR_CLKM_CTRL 0x03, 0x07, 0
68#define SR_CLKM_SHA_SEL 0x03, 0x08, 3
69#define SR_PAD_IO_CLKM 0x03, 0x30, 4
70#define SR_PAD_IO 0x03, 0xc0, 6
71#define RG_TRX_CTRL_1 (0x04)
72#define SR_IRQ_POLARITY 0x04, 0x01, 0
73#define SR_IRQ_MASK_MODE 0x04, 0x02, 1
74#define SR_SPI_CMD_MODE 0x04, 0x0c, 2
75#define SR_RX_BL_CTRL 0x04, 0x10, 4
76#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5
77#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6
78#define SR_PA_EXT_EN 0x04, 0x80, 7
79#define RG_PHY_TX_PWR (0x05)
80#define SR_TX_PWR 0x05, 0x0f, 0
81#define SR_PA_LT 0x05, 0x30, 4
82#define SR_PA_BUF_LT 0x05, 0xc0, 6
83#define RG_PHY_RSSI (0x06)
84#define SR_RSSI 0x06, 0x1f, 0
85#define SR_RND_VALUE 0x06, 0x60, 5
86#define SR_RX_CRC_VALID 0x06, 0x80, 7
87#define RG_PHY_ED_LEVEL (0x07)
88#define SR_ED_LEVEL 0x07, 0xff, 0
89#define RG_PHY_CC_CCA (0x08)
90#define SR_CHANNEL 0x08, 0x1f, 0
91#define SR_CCA_MODE 0x08, 0x60, 5
92#define SR_CCA_REQUEST 0x08, 0x80, 7
93#define RG_CCA_THRES (0x09)
94#define SR_CCA_ED_THRES 0x09, 0x0f, 0
95#define SR_RESERVED_09_1 0x09, 0xf0, 4
96#define RG_RX_CTRL (0x0a)
97#define SR_PDT_THRES 0x0a, 0x0f, 0
98#define SR_RESERVED_0a_1 0x0a, 0xf0, 4
99#define RG_SFD_VALUE (0x0b)
100#define SR_SFD_VALUE 0x0b, 0xff, 0
101#define RG_TRX_CTRL_2 (0x0c)
102#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0
103#define SR_RESERVED_0c_2 0x0c, 0x7c, 2
104#define SR_RX_SAFE_MODE 0x0c, 0x80, 7
105#define RG_ANT_DIV (0x0d)
106#define SR_ANT_CTRL 0x0d, 0x03, 0
107#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2
108#define SR_ANT_DIV_EN 0x0d, 0x08, 3
109#define SR_RESERVED_0d_2 0x0d, 0x70, 4
110#define SR_ANT_SEL 0x0d, 0x80, 7
111#define RG_IRQ_MASK (0x0e)
112#define SR_IRQ_MASK 0x0e, 0xff, 0
113#define RG_IRQ_STATUS (0x0f)
114#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0
115#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1
116#define SR_IRQ_2_RX_START 0x0f, 0x04, 2
117#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3
118#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4
119#define SR_IRQ_5_AMI 0x0f, 0x20, 5
120#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6
121#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7
122#define RG_VREG_CTRL (0x10)
123#define SR_RESERVED_10_6 0x10, 0x03, 0
124#define SR_DVDD_OK 0x10, 0x04, 2
125#define SR_DVREG_EXT 0x10, 0x08, 3
126#define SR_RESERVED_10_3 0x10, 0x30, 4
127#define SR_AVDD_OK 0x10, 0x40, 6
128#define SR_AVREG_EXT 0x10, 0x80, 7
129#define RG_BATMON (0x11)
130#define SR_BATMON_VTH 0x11, 0x0f, 0
131#define SR_BATMON_HR 0x11, 0x10, 4
132#define SR_BATMON_OK 0x11, 0x20, 5
133#define SR_RESERVED_11_1 0x11, 0xc0, 6
134#define RG_XOSC_CTRL (0x12)
135#define SR_XTAL_TRIM 0x12, 0x0f, 0
136#define SR_XTAL_MODE 0x12, 0xf0, 4
137#define RG_RX_SYN (0x15)
138#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0
139#define SR_RESERVED_15_2 0x15, 0x70, 4
140#define SR_RX_PDT_DIS 0x15, 0x80, 7
141#define RG_XAH_CTRL_1 (0x17)
142#define SR_RESERVED_17_8 0x17, 0x01, 0
143#define SR_AACK_PROM_MODE 0x17, 0x02, 1
144#define SR_AACK_ACK_TIME 0x17, 0x04, 2
145#define SR_RESERVED_17_5 0x17, 0x08, 3
146#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4
147#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5
148#define SR_RESERVED_17_2 0x17, 0x40, 6
149#define SR_RESERVED_17_1 0x17, 0x80, 7
150#define RG_FTN_CTRL (0x18)
151#define SR_RESERVED_18_2 0x18, 0x7f, 0
152#define SR_FTN_START 0x18, 0x80, 7
153#define RG_PLL_CF (0x1a)
154#define SR_RESERVED_1a_2 0x1a, 0x7f, 0
155#define SR_PLL_CF_START 0x1a, 0x80, 7
156#define RG_PLL_DCU (0x1b)
157#define SR_RESERVED_1b_3 0x1b, 0x3f, 0
158#define SR_RESERVED_1b_2 0x1b, 0x40, 6
159#define SR_PLL_DCU_START 0x1b, 0x80, 7
160#define RG_PART_NUM (0x1c)
161#define SR_PART_NUM 0x1c, 0xff, 0
162#define RG_VERSION_NUM (0x1d)
163#define SR_VERSION_NUM 0x1d, 0xff, 0
164#define RG_MAN_ID_0 (0x1e)
165#define SR_MAN_ID_0 0x1e, 0xff, 0
166#define RG_MAN_ID_1 (0x1f)
167#define SR_MAN_ID_1 0x1f, 0xff, 0
168#define RG_SHORT_ADDR_0 (0x20)
169#define SR_SHORT_ADDR_0 0x20, 0xff, 0
170#define RG_SHORT_ADDR_1 (0x21)
171#define SR_SHORT_ADDR_1 0x21, 0xff, 0
172#define RG_PAN_ID_0 (0x22)
173#define SR_PAN_ID_0 0x22, 0xff, 0
174#define RG_PAN_ID_1 (0x23)
175#define SR_PAN_ID_1 0x23, 0xff, 0
176#define RG_IEEE_ADDR_0 (0x24)
177#define SR_IEEE_ADDR_0 0x24, 0xff, 0
178#define RG_IEEE_ADDR_1 (0x25)
179#define SR_IEEE_ADDR_1 0x25, 0xff, 0
180#define RG_IEEE_ADDR_2 (0x26)
181#define SR_IEEE_ADDR_2 0x26, 0xff, 0
182#define RG_IEEE_ADDR_3 (0x27)
183#define SR_IEEE_ADDR_3 0x27, 0xff, 0
184#define RG_IEEE_ADDR_4 (0x28)
185#define SR_IEEE_ADDR_4 0x28, 0xff, 0
186#define RG_IEEE_ADDR_5 (0x29)
187#define SR_IEEE_ADDR_5 0x29, 0xff, 0
188#define RG_IEEE_ADDR_6 (0x2a)
189#define SR_IEEE_ADDR_6 0x2a, 0xff, 0
190#define RG_IEEE_ADDR_7 (0x2b)
191#define SR_IEEE_ADDR_7 0x2b, 0xff, 0
192#define RG_XAH_CTRL_0 (0x2c)
193#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0
194#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1
195#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4
196#define RG_CSMA_SEED_0 (0x2d)
197#define SR_CSMA_SEED_0 0x2d, 0xff, 0
198#define RG_CSMA_SEED_1 (0x2e)
199#define SR_CSMA_SEED_1 0x2e, 0x07, 0
200#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3
201#define SR_AACK_DIS_ACK 0x2e, 0x10, 4
202#define SR_AACK_SET_PD 0x2e, 0x20, 5
203#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6
204#define RG_CSMA_BE (0x2f)
205#define SR_MIN_BE 0x2f, 0x0f, 0
206#define SR_MAX_BE 0x2f, 0xf0, 4
207
208#define CMD_REG 0x80
209#define CMD_REG_MASK 0x3f
210#define CMD_WRITE 0x40
211#define CMD_FB 0x20
212
213#define IRQ_BAT_LOW (1 << 7)
214#define IRQ_TRX_UR (1 << 6)
215#define IRQ_AMI (1 << 5)
216#define IRQ_CCA_ED (1 << 4)
217#define IRQ_TRX_END (1 << 3)
218#define IRQ_RX_START (1 << 2)
219#define IRQ_PLL_UNL (1 << 1)
220#define IRQ_PLL_LOCK (1 << 0)
221
222#define STATE_P_ON 0x00 /* BUSY */
223#define STATE_BUSY_RX 0x01
224#define STATE_BUSY_TX 0x02
225#define STATE_FORCE_TRX_OFF 0x03
226#define STATE_FORCE_TX_ON 0x04 /* IDLE */
227/* 0x05 */ /* INVALID_PARAMETER */
228#define STATE_RX_ON 0x06
229/* 0x07 */ /* SUCCESS */
230#define STATE_TRX_OFF 0x08
231#define STATE_TX_ON 0x09
232/* 0x0a - 0x0e */ /* 0x0a - UNSUPPORTED_ATTRIBUTE */
233#define STATE_SLEEP 0x0F
234#define STATE_BUSY_RX_AACK 0x11
235#define STATE_BUSY_TX_ARET 0x12
236#define STATE_BUSY_RX_AACK_ON 0x16
237#define STATE_BUSY_TX_ARET_ON 0x19
238#define STATE_RX_ON_NOCLK 0x1C
239#define STATE_RX_AACK_ON_NOCLK 0x1D
240#define STATE_BUSY_RX_AACK_NOCLK 0x1E
241#define STATE_TRANSITION_IN_PROGRESS 0x1F
242
243static int
244__at86rf230_write(struct at86rf230_local *lp, u8 addr, u8 data)
245{
246 u8 *buf = lp->buf;
247 int status;
248 struct spi_message msg;
249 struct spi_transfer xfer = {
250 .len = 2,
251 .tx_buf = buf,
252 };
253
254 buf[0] = (addr & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
255 buf[1] = data;
256 dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
257 dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
258 spi_message_init(&msg);
259 spi_message_add_tail(&xfer, &msg);
260
261 status = spi_sync(lp->spi, &msg);
262 dev_vdbg(&lp->spi->dev, "status = %d\n", status);
263 if (msg.status)
264 status = msg.status;
265
266 dev_vdbg(&lp->spi->dev, "status = %d\n", status);
267 dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
268 dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
269
270 return status;
271}
272
273static int
274__at86rf230_read_subreg(struct at86rf230_local *lp,
275 u8 addr, u8 mask, int shift, u8 *data)
276{
277 u8 *buf = lp->buf;
278 int status;
279 struct spi_message msg;
280 struct spi_transfer xfer = {
281 .len = 2,
282 .tx_buf = buf,
283 .rx_buf = buf,
284 };
285
286 buf[0] = (addr & CMD_REG_MASK) | CMD_REG;
287 buf[1] = 0xff;
288 dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
289 spi_message_init(&msg);
290 spi_message_add_tail(&xfer, &msg);
291
292 status = spi_sync(lp->spi, &msg);
293 dev_vdbg(&lp->spi->dev, "status = %d\n", status);
294 if (msg.status)
295 status = msg.status;
296
297 dev_vdbg(&lp->spi->dev, "status = %d\n", status);
298 dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
299 dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
300
301 if (status == 0)
302 *data = buf[1];
303
304 return status;
305}
306
307static int
308at86rf230_read_subreg(struct at86rf230_local *lp,
309 u8 addr, u8 mask, int shift, u8 *data)
310{
311 int status;
312
313 mutex_lock(&lp->bmux);
314 status = __at86rf230_read_subreg(lp, addr, mask, shift, data);
315 mutex_unlock(&lp->bmux);
316
317 return status;
318}
319
320static int
321at86rf230_write_subreg(struct at86rf230_local *lp,
322 u8 addr, u8 mask, int shift, u8 data)
323{
324 int status;
325 u8 val;
326
327 mutex_lock(&lp->bmux);
328 status = __at86rf230_read_subreg(lp, addr, 0xff, 0, &val);
329 if (status)
330 goto out;
331
332 val &= ~mask;
333 val |= (data << shift) & mask;
334
335 status = __at86rf230_write(lp, addr, val);
336out:
337 mutex_unlock(&lp->bmux);
338
339 return status;
340}
341
342static int
343at86rf230_write_fbuf(struct at86rf230_local *lp, u8 *data, u8 len)
344{
345 u8 *buf = lp->buf;
346 int status;
347 struct spi_message msg;
348 struct spi_transfer xfer_head = {
349 .len = 2,
350 .tx_buf = buf,
351
352 };
353 struct spi_transfer xfer_buf = {
354 .len = len,
355 .tx_buf = data,
356 };
357
358 mutex_lock(&lp->bmux);
359 buf[0] = CMD_WRITE | CMD_FB;
360 buf[1] = len + 2; /* 2 bytes for CRC that isn't written */
361
362 dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
363 dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
364
365 spi_message_init(&msg);
366 spi_message_add_tail(&xfer_head, &msg);
367 spi_message_add_tail(&xfer_buf, &msg);
368
369 status = spi_sync(lp->spi, &msg);
370 dev_vdbg(&lp->spi->dev, "status = %d\n", status);
371 if (msg.status)
372 status = msg.status;
373
374 dev_vdbg(&lp->spi->dev, "status = %d\n", status);
375 dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
376 dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
377
378 mutex_unlock(&lp->bmux);
379 return status;
380}
381
382static int
383at86rf230_read_fbuf(struct at86rf230_local *lp, u8 *data, u8 *len, u8 *lqi)
384{
385 u8 *buf = lp->buf;
386 int status;
387 struct spi_message msg;
388 struct spi_transfer xfer_head = {
389 .len = 2,
390 .tx_buf = buf,
391 .rx_buf = buf,
392 };
393 struct spi_transfer xfer_head1 = {
394 .len = 2,
395 .tx_buf = buf,
396 .rx_buf = buf,
397 };
398 struct spi_transfer xfer_buf = {
399 .len = 0,
400 .rx_buf = data,
401 };
402
403 mutex_lock(&lp->bmux);
404
405 buf[0] = CMD_FB;
406 buf[1] = 0x00;
407
408 spi_message_init(&msg);
409 spi_message_add_tail(&xfer_head, &msg);
410
411 status = spi_sync(lp->spi, &msg);
412 dev_vdbg(&lp->spi->dev, "status = %d\n", status);
413
414 xfer_buf.len = *(buf + 1) + 1;
415 *len = buf[1];
416
417 buf[0] = CMD_FB;
418 buf[1] = 0x00;
419
420 spi_message_init(&msg);
421 spi_message_add_tail(&xfer_head1, &msg);
422 spi_message_add_tail(&xfer_buf, &msg);
423
424 status = spi_sync(lp->spi, &msg);
425
426 if (msg.status)
427 status = msg.status;
428
429 dev_vdbg(&lp->spi->dev, "status = %d\n", status);
430 dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
431 dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
432
433 if (status) {
434 if (lqi && (*len > lp->buf[1]))
435 *lqi = data[lp->buf[1]];
436 }
437 mutex_unlock(&lp->bmux);
438
439 return status;
440}
441
442static int
443at86rf230_ed(struct ieee802154_dev *dev, u8 *level)
444{
445 might_sleep();
446 BUG_ON(!level);
447 *level = 0xbe;
448 return 0;
449}
450
451static int
452at86rf230_state(struct ieee802154_dev *dev, int state)
453{
454 struct at86rf230_local *lp = dev->priv;
455 int rc;
456 u8 val;
457 u8 desired_status;
458
459 might_sleep();
460
461 if (state == STATE_FORCE_TX_ON)
462 desired_status = STATE_TX_ON;
463 else if (state == STATE_FORCE_TRX_OFF)
464 desired_status = STATE_TRX_OFF;
465 else
466 desired_status = state;
467
468 do {
469 rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val);
470 if (rc)
471 goto err;
472 } while (val == STATE_TRANSITION_IN_PROGRESS);
473
474 if (val == desired_status)
475 return 0;
476
477 /* state is equal to phy states */
478 rc = at86rf230_write_subreg(lp, SR_TRX_CMD, state);
479 if (rc)
480 goto err;
481
482 do {
483 rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val);
484 if (rc)
485 goto err;
486 } while (val == STATE_TRANSITION_IN_PROGRESS);
487
488
489 if (val == desired_status)
490 return 0;
491
492 pr_err("unexpected state change: %d, asked for %d\n", val, state);
493 return -EBUSY;
494
495err:
496 pr_err("error: %d\n", rc);
497 return rc;
498}
499
500static int
501at86rf230_start(struct ieee802154_dev *dev)
502{
503 struct at86rf230_local *lp = dev->priv;
504 u8 rc;
505
506 rc = at86rf230_write_subreg(lp, SR_RX_SAFE_MODE, 1);
507 if (rc)
508 return rc;
509
510 return at86rf230_state(dev, STATE_RX_ON);
511}
512
513static void
514at86rf230_stop(struct ieee802154_dev *dev)
515{
516 at86rf230_state(dev, STATE_FORCE_TRX_OFF);
517}
518
519static int
520at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
521{
522 struct at86rf230_local *lp = dev->priv;
523 int rc;
524
525 might_sleep();
526
527 if (page != 0 || channel < 11 || channel > 26) {
528 WARN_ON(1);
529 return -EINVAL;
530 }
531
532 rc = at86rf230_write_subreg(lp, SR_CHANNEL, channel);
533 msleep(1); /* Wait for PLL */
534 dev->phy->current_channel = channel;
535
536 return 0;
537}
538
539static int
540at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
541{
542 struct at86rf230_local *lp = dev->priv;
543 int rc;
544 unsigned long flags;
545
546 might_sleep();
547
548 rc = at86rf230_state(dev, STATE_FORCE_TX_ON);
549 if (rc)
550 goto err;
551
552 spin_lock_irqsave(&lp->lock, flags);
553 lp->is_tx = 1;
554 INIT_COMPLETION(lp->tx_complete);
555 spin_unlock_irqrestore(&lp->lock, flags);
556
557 rc = at86rf230_write_fbuf(lp, skb->data, skb->len);
558 if (rc)
559 goto err_rx;
560
561 rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_BUSY_TX);
562 if (rc)
563 goto err_rx;
564
565 rc = wait_for_completion_interruptible(&lp->tx_complete);
566 if (rc < 0)
567 goto err_rx;
568
569 rc = at86rf230_start(dev);
570
571 return rc;
572
573err_rx:
574 at86rf230_start(dev);
575err:
576 pr_err("error: %d\n", rc);
577
578 spin_lock_irqsave(&lp->lock, flags);
579 lp->is_tx = 0;
580 spin_unlock_irqrestore(&lp->lock, flags);
581
582 return rc;
583}
584
585static int at86rf230_rx(struct at86rf230_local *lp)
586{
587 u8 len = 128, lqi = 0;
588 struct sk_buff *skb;
589
590 skb = alloc_skb(len, GFP_KERNEL);
591
592 if (!skb)
593 return -ENOMEM;
594
595 if (at86rf230_write_subreg(lp, SR_RX_PDT_DIS, 1) ||
596 at86rf230_read_fbuf(lp, skb_put(skb, len), &len, &lqi) ||
597 at86rf230_write_subreg(lp, SR_RX_SAFE_MODE, 1) ||
598 at86rf230_write_subreg(lp, SR_RX_PDT_DIS, 0)) {
599 goto err;
600 }
601
602 if (len < 2)
603 goto err;
604
605 skb_trim(skb, len - 2); /* We do not put CRC into the frame */
606
607 ieee802154_rx_irqsafe(lp->dev, skb, lqi);
608
609 dev_dbg(&lp->spi->dev, "READ_FBUF: %d %x\n", len, lqi);
610
611 return 0;
612err:
613 pr_debug("received frame is too small\n");
614
615 kfree_skb(skb);
616 return -EINVAL;
617}
618
619static struct ieee802154_ops at86rf230_ops = {
620 .owner = THIS_MODULE,
621 .xmit = at86rf230_xmit,
622 .ed = at86rf230_ed,
623 .set_channel = at86rf230_channel,
624 .start = at86rf230_start,
625 .stop = at86rf230_stop,
626};
627
628static void at86rf230_irqwork(struct work_struct *work)
629{
630 struct at86rf230_local *lp =
631 container_of(work, struct at86rf230_local, irqwork);
632 u8 status = 0, val;
633 int rc;
634 unsigned long flags;
635
636 spin_lock_irqsave(&lp->lock, flags);
637 rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &val);
638 status |= val;
639
640 status &= ~IRQ_PLL_LOCK; /* ignore */
641 status &= ~IRQ_RX_START; /* ignore */
642 status &= ~IRQ_AMI; /* ignore */
643 status &= ~IRQ_TRX_UR; /* FIXME: possibly handle ???*/
644
645 if (status & IRQ_TRX_END) {
646 status &= ~IRQ_TRX_END;
647 if (lp->is_tx) {
648 lp->is_tx = 0;
649 complete(&lp->tx_complete);
650 } else {
651 at86rf230_rx(lp);
652 }
653 }
654
655 if (lp->irq_disabled) {
656 lp->irq_disabled = 0;
657 enable_irq(lp->spi->irq);
658 }
659 spin_unlock_irqrestore(&lp->lock, flags);
660}
661
662static irqreturn_t at86rf230_isr(int irq, void *data)
663{
664 struct at86rf230_local *lp = data;
665
666 spin_lock(&lp->lock);
667 if (!lp->irq_disabled) {
668 disable_irq_nosync(irq);
669 lp->irq_disabled = 1;
670 }
671 spin_unlock(&lp->lock);
672
673 schedule_work(&lp->irqwork);
674
675 return IRQ_HANDLED;
676}
677
678
679static int at86rf230_hw_init(struct at86rf230_local *lp)
680{
681 u8 status;
682 int rc;
683
684 rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
685 if (rc)
686 return rc;
687
688 dev_info(&lp->spi->dev, "Status: %02x\n", status);
689 if (status == STATE_P_ON) {
690 rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TRX_OFF);
691 if (rc)
692 return rc;
693 msleep(1);
694 rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
695 if (rc)
696 return rc;
697 dev_info(&lp->spi->dev, "Status: %02x\n", status);
698 }
699
700 rc = at86rf230_write_subreg(lp, SR_IRQ_MASK, 0xff); /* IRQ_TRX_UR |
701 * IRQ_CCA_ED |
702 * IRQ_TRX_END |
703 * IRQ_PLL_UNL |
704 * IRQ_PLL_LOCK
705 */
706 if (rc)
707 return rc;
708
709 /* CLKM changes are applied immediately */
710 rc = at86rf230_write_subreg(lp, SR_CLKM_SHA_SEL, 0x00);
711 if (rc)
712 return rc;
713
714 /* Turn CLKM Off */
715 rc = at86rf230_write_subreg(lp, SR_CLKM_CTRL, 0x00);
716 if (rc)
717 return rc;
718 /* Wait the next SLEEP cycle */
719 msleep(100);
720
721 rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ON);
722 if (rc)
723 return rc;
724 msleep(1);
725
726 rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
727 if (rc)
728 return rc;
729 dev_info(&lp->spi->dev, "Status: %02x\n", status);
730
731 rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status);
732 if (rc)
733 return rc;
734 if (!status) {
735 dev_err(&lp->spi->dev, "DVDD error\n");
736 return -EINVAL;
737 }
738
739 rc = at86rf230_read_subreg(lp, SR_AVDD_OK, &status);
740 if (rc)
741 return rc;
742 if (!status) {
743 dev_err(&lp->spi->dev, "AVDD error\n");
744 return -EINVAL;
745 }
746
747 return 0;
748}
749
750static int at86rf230_suspend(struct spi_device *spi, pm_message_t message)
751{
752 return 0;
753}
754
755static int at86rf230_resume(struct spi_device *spi)
756{
757 return 0;
758}
759
760static int at86rf230_fill_data(struct spi_device *spi)
761{
762 struct at86rf230_local *lp = spi_get_drvdata(spi);
763 struct at86rf230_platform_data *pdata = spi->dev.platform_data;
764
765 if (!pdata) {
766 dev_err(&spi->dev, "no platform_data\n");
767 return -EINVAL;
768 }
769
770 lp->rstn = pdata->rstn;
771 lp->slp_tr = pdata->slp_tr;
772 lp->dig2 = pdata->dig2;
773
774 return 0;
775}
776
777static int __devinit at86rf230_probe(struct spi_device *spi)
778{
779 struct ieee802154_dev *dev;
780 struct at86rf230_local *lp;
781 u8 man_id_0, man_id_1;
782 int rc;
783 const char *chip;
784 int supported = 0;
785
786 if (!spi->irq) {
787 dev_err(&spi->dev, "no IRQ specified\n");
788 return -EINVAL;
789 }
790
791 dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops);
792 if (!dev)
793 return -ENOMEM;
794
795 lp = dev->priv;
796 lp->dev = dev;
797
798 lp->spi = spi;
799
800 dev->priv = lp;
801 dev->parent = &spi->dev;
802 dev->extra_tx_headroom = 0;
803 /* We do support only 2.4 Ghz */
804 dev->phy->channels_supported[0] = 0x7FFF800;
805 dev->flags = IEEE802154_HW_OMIT_CKSUM;
806
807 mutex_init(&lp->bmux);
808 INIT_WORK(&lp->irqwork, at86rf230_irqwork);
809 spin_lock_init(&lp->lock);
810 init_completion(&lp->tx_complete);
811
812 spi_set_drvdata(spi, lp);
813
814 rc = at86rf230_fill_data(spi);
815 if (rc)
816 goto err_fill;
817
818 rc = gpio_request(lp->rstn, "rstn");
819 if (rc)
820 goto err_rstn;
821
822 if (gpio_is_valid(lp->slp_tr)) {
823 rc = gpio_request(lp->slp_tr, "slp_tr");
824 if (rc)
825 goto err_slp_tr;
826 }
827
828 rc = gpio_direction_output(lp->rstn, 1);
829 if (rc)
830 goto err_gpio_dir;
831
832 if (gpio_is_valid(lp->slp_tr)) {
833 rc = gpio_direction_output(lp->slp_tr, 0);
834 if (rc)
835 goto err_gpio_dir;
836 }
837
838 /* Reset */
839 msleep(1);
840 gpio_set_value(lp->rstn, 0);
841 msleep(1);
842 gpio_set_value(lp->rstn, 1);
843 msleep(1);
844
845 rc = at86rf230_read_subreg(lp, SR_MAN_ID_0, &man_id_0);
846 if (rc)
847 goto err_gpio_dir;
848 rc = at86rf230_read_subreg(lp, SR_MAN_ID_1, &man_id_1);
849 if (rc)
850 goto err_gpio_dir;
851
852 if (man_id_1 != 0x00 || man_id_0 != 0x1f) {
853 dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
854 man_id_1, man_id_0);
855 rc = -EINVAL;
856 goto err_gpio_dir;
857 }
858
859 rc = at86rf230_read_subreg(lp, SR_PART_NUM, &lp->part);
860 if (rc)
861 goto err_gpio_dir;
862
863 rc = at86rf230_read_subreg(lp, SR_VERSION_NUM, &lp->vers);
864 if (rc)
865 goto err_gpio_dir;
866
867 switch (lp->part) {
868 case 2:
869 chip = "at86rf230";
870 /* supported = 1; FIXME: should be easy to support; */
871 break;
872 case 3:
873 chip = "at86rf231";
874 supported = 1;
875 break;
876 default:
877 chip = "UNKNOWN";
878 break;
879 }
880
881 dev_info(&spi->dev, "Detected %s chip version %d\n", chip, lp->vers);
882 if (!supported) {
883 rc = -ENOTSUPP;
884 goto err_gpio_dir;
885 }
886
887 rc = at86rf230_hw_init(lp);
888 if (rc)
889 goto err_gpio_dir;
890
891 rc = request_irq(spi->irq, at86rf230_isr, IRQF_SHARED,
892 dev_name(&spi->dev), lp);
893 if (rc)
894 goto err_gpio_dir;
895
896 rc = ieee802154_register_device(lp->dev);
897 if (rc)
898 goto err_irq;
899
900 return rc;
901
902 ieee802154_unregister_device(lp->dev);
903err_irq:
904 free_irq(spi->irq, lp);
905 flush_work(&lp->irqwork);
906err_gpio_dir:
907 if (gpio_is_valid(lp->slp_tr))
908 gpio_free(lp->slp_tr);
909err_slp_tr:
910 gpio_free(lp->rstn);
911err_rstn:
912err_fill:
913 spi_set_drvdata(spi, NULL);
914 mutex_destroy(&lp->bmux);
915 ieee802154_free_device(lp->dev);
916 return rc;
917}
918
919static int __devexit at86rf230_remove(struct spi_device *spi)
920{
921 struct at86rf230_local *lp = spi_get_drvdata(spi);
922
923 ieee802154_unregister_device(lp->dev);
924
925 free_irq(spi->irq, lp);
926 flush_work(&lp->irqwork);
927
928 if (gpio_is_valid(lp->slp_tr))
929 gpio_free(lp->slp_tr);
930 gpio_free(lp->rstn);
931
932 spi_set_drvdata(spi, NULL);
933 mutex_destroy(&lp->bmux);
934 ieee802154_free_device(lp->dev);
935
936 dev_dbg(&spi->dev, "unregistered at86rf230\n");
937 return 0;
938}
939
940static struct spi_driver at86rf230_driver = {
941 .driver = {
942 .name = "at86rf230",
943 .owner = THIS_MODULE,
944 },
945 .probe = at86rf230_probe,
946 .remove = __devexit_p(at86rf230_remove),
947 .suspend = at86rf230_suspend,
948 .resume = at86rf230_resume,
949};
950
951static int __init at86rf230_init(void)
952{
953 return spi_register_driver(&at86rf230_driver);
954}
955module_init(at86rf230_init);
956
957static void __exit at86rf230_exit(void)
958{
959 spi_unregister_driver(&at86rf230_driver);
960}
961module_exit(at86rf230_exit);
962
963MODULE_DESCRIPTION("AT86RF230 Transceiver Driver");
964MODULE_LICENSE("GPL v2");
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index e497dfbee43..3ae2bfd3101 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -108,12 +108,14 @@ void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
108 unsigned char *prev_tail; 108 unsigned char *prev_tail;
109 109
110 prev_tail = skb_tail_pointer(skb); 110 prev_tail = skb_tail_pointer(skb);
111 *nlh = NLMSG_NEW(skb, 0, seq, RDMA_NL_GET_TYPE(client, op), 111 *nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op),
112 len, NLM_F_MULTI); 112 len, NLM_F_MULTI);
113 if (!*nlh)
114 goto out_nlmsg_trim;
113 (*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail; 115 (*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail;
114 return NLMSG_DATA(*nlh); 116 return nlmsg_data(*nlh);
115 117
116nlmsg_failure: 118out_nlmsg_trim:
117 nlmsg_trim(skb, prev_tail); 119 nlmsg_trim(skb, prev_tail);
118 return NULL; 120 return NULL;
119} 121}
@@ -171,8 +173,11 @@ static void ibnl_rcv(struct sk_buff *skb)
171 173
172int __init ibnl_init(void) 174int __init ibnl_init(void)
173{ 175{
174 nls = netlink_kernel_create(&init_net, NETLINK_RDMA, 0, ibnl_rcv, 176 struct netlink_kernel_cfg cfg = {
175 NULL, THIS_MODULE); 177 .input = ibnl_rcv,
178 };
179
180 nls = netlink_kernel_create(&init_net, NETLINK_RDMA, THIS_MODULE, &cfg);
176 if (!nls) { 181 if (!nls) {
177 pr_warn("Failed to create netlink socket\n"); 182 pr_warn("Failed to create netlink socket\n");
178 return -ENOMEM; 183 return -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 740dcc065cf..77b6b182778 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1374,7 +1374,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1374 goto reject; 1374 goto reject;
1375 } 1375 }
1376 dst = &rt->dst; 1376 dst = &rt->dst;
1377 l2t = t3_l2t_get(tdev, dst, NULL); 1377 l2t = t3_l2t_get(tdev, dst, NULL, &req->peer_ip);
1378 if (!l2t) { 1378 if (!l2t) {
1379 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 1379 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1380 __func__); 1380 __func__);
@@ -1942,7 +1942,8 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1942 goto fail3; 1942 goto fail3;
1943 } 1943 }
1944 ep->dst = &rt->dst; 1944 ep->dst = &rt->dst;
1945 ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL); 1945 ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL,
1946 &cm_id->remote_addr.sin_addr.s_addr);
1946 if (!ep->l2t) { 1947 if (!ep->l2t) {
1947 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 1948 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1948 err = -ENOMEM; 1949 err = -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 3530c41fcd1..8a3a2037b00 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -718,26 +718,53 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
718 return ret; 718 return ret;
719} 719}
720 720
721struct mlx4_ib_steering {
722 struct list_head list;
723 u64 reg_id;
724 union ib_gid gid;
725};
726
721static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 727static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
722{ 728{
723 int err; 729 int err;
724 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 730 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
725 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 731 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
732 u64 reg_id;
733 struct mlx4_ib_steering *ib_steering = NULL;
734
735 if (mdev->dev->caps.steering_mode ==
736 MLX4_STEERING_MODE_DEVICE_MANAGED) {
737 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
738 if (!ib_steering)
739 return -ENOMEM;
740 }
726 741
727 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, 742 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
728 !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), 743 !!(mqp->flags &
729 MLX4_PROT_IB_IPV6); 744 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
745 MLX4_PROT_IB_IPV6, &reg_id);
730 if (err) 746 if (err)
731 return err; 747 goto err_malloc;
732 748
733 err = add_gid_entry(ibqp, gid); 749 err = add_gid_entry(ibqp, gid);
734 if (err) 750 if (err)
735 goto err_add; 751 goto err_add;
736 752
753 if (ib_steering) {
754 memcpy(ib_steering->gid.raw, gid->raw, 16);
755 ib_steering->reg_id = reg_id;
756 mutex_lock(&mqp->mutex);
757 list_add(&ib_steering->list, &mqp->steering_rules);
758 mutex_unlock(&mqp->mutex);
759 }
737 return 0; 760 return 0;
738 761
739err_add: 762err_add:
740 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6); 763 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
764 MLX4_PROT_IB_IPV6, reg_id);
765err_malloc:
766 kfree(ib_steering);
767
741 return err; 768 return err;
742} 769}
743 770
@@ -765,9 +792,30 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
765 u8 mac[6]; 792 u8 mac[6];
766 struct net_device *ndev; 793 struct net_device *ndev;
767 struct mlx4_ib_gid_entry *ge; 794 struct mlx4_ib_gid_entry *ge;
795 u64 reg_id = 0;
796
797 if (mdev->dev->caps.steering_mode ==
798 MLX4_STEERING_MODE_DEVICE_MANAGED) {
799 struct mlx4_ib_steering *ib_steering;
800
801 mutex_lock(&mqp->mutex);
802 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
803 if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
804 list_del(&ib_steering->list);
805 break;
806 }
807 }
808 mutex_unlock(&mqp->mutex);
809 if (&ib_steering->list == &mqp->steering_rules) {
810 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
811 return -EINVAL;
812 }
813 reg_id = ib_steering->reg_id;
814 kfree(ib_steering);
815 }
768 816
769 err = mlx4_multicast_detach(mdev->dev, 817 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
770 &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6); 818 MLX4_PROT_IB_IPV6, reg_id);
771 if (err) 819 if (err)
772 return err; 820 return err;
773 821
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index ff36655d23d..42df4f7a6a5 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -163,6 +163,7 @@ struct mlx4_ib_qp {
163 u8 state; 163 u8 state;
164 int mlx_type; 164 int mlx_type;
165 struct list_head gid_list; 165 struct list_head gid_list;
166 struct list_head steering_rules;
166}; 167};
167 168
168struct mlx4_ib_srq { 169struct mlx4_ib_srq {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 8d4ed24aef9..6af19f6c2b1 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -495,6 +495,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
495 spin_lock_init(&qp->sq.lock); 495 spin_lock_init(&qp->sq.lock);
496 spin_lock_init(&qp->rq.lock); 496 spin_lock_init(&qp->rq.lock);
497 INIT_LIST_HEAD(&qp->gid_list); 497 INIT_LIST_HEAD(&qp->gid_list);
498 INIT_LIST_HEAD(&qp->steering_rules);
498 499
499 qp->state = IB_QPS_RESET; 500 qp->state = IB_QPS_RESET;
500 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 501 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 3974c290b66..bbee4b2d7a1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -715,7 +715,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
715 715
716 rcu_read_lock(); 716 rcu_read_lock();
717 if (likely(skb_dst(skb))) { 717 if (likely(skb_dst(skb))) {
718 n = dst_get_neighbour_noref(skb_dst(skb)); 718 n = dst_neigh_lookup_skb(skb_dst(skb), skb);
719 if (!n) { 719 if (!n) {
720 ++dev->stats.tx_dropped; 720 ++dev->stats.tx_dropped;
721 dev_kfree_skb_any(skb); 721 dev_kfree_skb_any(skb);
@@ -797,6 +797,8 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
797 } 797 }
798 } 798 }
799unlock: 799unlock:
800 if (n)
801 neigh_release(n);
800 rcu_read_unlock(); 802 rcu_read_unlock();
801 return NETDEV_TX_OK; 803 return NETDEV_TX_OK;
802} 804}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 20ebc6fd1bb..7cecb16d3d4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -658,9 +658,15 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
658void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) 658void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
659{ 659{
660 struct ipoib_dev_priv *priv = netdev_priv(dev); 660 struct ipoib_dev_priv *priv = netdev_priv(dev);
661 struct dst_entry *dst = skb_dst(skb);
661 struct ipoib_mcast *mcast; 662 struct ipoib_mcast *mcast;
663 struct neighbour *n;
662 unsigned long flags; 664 unsigned long flags;
663 665
666 n = NULL;
667 if (dst)
668 n = dst_neigh_lookup_skb(dst, skb);
669
664 spin_lock_irqsave(&priv->lock, flags); 670 spin_lock_irqsave(&priv->lock, flags);
665 671
666 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) || 672 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) ||
@@ -715,29 +721,28 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
715 721
716out: 722out:
717 if (mcast && mcast->ah) { 723 if (mcast && mcast->ah) {
718 struct dst_entry *dst = skb_dst(skb); 724 if (n) {
719 struct neighbour *n = NULL; 725 if (!*to_ipoib_neigh(n)) {
720 726 struct ipoib_neigh *neigh;
721 rcu_read_lock(); 727
722 if (dst) 728 neigh = ipoib_neigh_alloc(n, skb->dev);
723 n = dst_get_neighbour_noref(dst); 729 if (neigh) {
724 if (n && !*to_ipoib_neigh(n)) { 730 kref_get(&mcast->ah->ref);
725 struct ipoib_neigh *neigh = ipoib_neigh_alloc(n, 731 neigh->ah = mcast->ah;
726 skb->dev); 732 list_add_tail(&neigh->list,
727 733 &mcast->neigh_list);
728 if (neigh) { 734 }
729 kref_get(&mcast->ah->ref);
730 neigh->ah = mcast->ah;
731 list_add_tail(&neigh->list, &mcast->neigh_list);
732 } 735 }
736 neigh_release(n);
733 } 737 }
734 rcu_read_unlock();
735 spin_unlock_irqrestore(&priv->lock, flags); 738 spin_unlock_irqrestore(&priv->lock, flags);
736 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); 739 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
737 return; 740 return;
738 } 741 }
739 742
740unlock: 743unlock:
744 if (n)
745 neigh_release(n);
741 spin_unlock_irqrestore(&priv->lock, flags); 746 spin_unlock_irqrestore(&priv->lock, flags);
742} 747}
743 748
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index dd5e04813b7..545c09ed907 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -936,7 +936,7 @@ static int cops_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
936{ 936{
937 struct cops_local *lp = netdev_priv(dev); 937 struct cops_local *lp = netdev_priv(dev);
938 struct sockaddr_at *sa = (struct sockaddr_at *)&ifr->ifr_addr; 938 struct sockaddr_at *sa = (struct sockaddr_at *)&ifr->ifr_addr;
939 struct atalk_addr *aa = (struct atalk_addr *)&lp->node_addr; 939 struct atalk_addr *aa = &lp->node_addr;
940 940
941 switch(cmd) 941 switch(cmd)
942 { 942 {
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 3463b469e65..a030e635f00 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2454,24 +2454,27 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2454out: 2454out:
2455 if (res) { 2455 if (res) {
2456 /* no suitable interface, frame not sent */ 2456 /* no suitable interface, frame not sent */
2457 dev_kfree_skb(skb); 2457 kfree_skb(skb);
2458 } 2458 }
2459 2459
2460 return NETDEV_TX_OK; 2460 return NETDEV_TX_OK;
2461} 2461}
2462 2462
2463int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond, 2463int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
2464 struct slave *slave) 2464 struct slave *slave)
2465{ 2465{
2466 int ret = RX_HANDLER_ANOTHER; 2466 int ret = RX_HANDLER_ANOTHER;
2467 struct lacpdu *lacpdu, _lacpdu;
2468
2467 if (skb->protocol != PKT_TYPE_LACPDU) 2469 if (skb->protocol != PKT_TYPE_LACPDU)
2468 return ret; 2470 return ret;
2469 2471
2470 if (!pskb_may_pull(skb, sizeof(struct lacpdu))) 2472 lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu);
2473 if (!lacpdu)
2471 return ret; 2474 return ret;
2472 2475
2473 read_lock(&bond->lock); 2476 read_lock(&bond->lock);
2474 ret = bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len); 2477 ret = bond_3ad_rx_indication(lacpdu, slave, skb->len);
2475 read_unlock(&bond->lock); 2478 read_unlock(&bond->lock);
2476 return ret; 2479 return ret;
2477} 2480}
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 5ee7e3c45db..0cfaa4afdec 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -274,8 +274,8 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave);
274void bond_3ad_handle_link_change(struct slave *slave, char link); 274void bond_3ad_handle_link_change(struct slave *slave, char link);
275int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); 275int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
276int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev); 276int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
277int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond, 277int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
278 struct slave *slave); 278 struct slave *slave);
279int bond_3ad_set_carrier(struct bonding *bond); 279int bond_3ad_set_carrier(struct bonding *bond);
280void bond_3ad_update_lacp_rate(struct bonding *bond); 280void bond_3ad_update_lacp_rate(struct bonding *bond);
281#endif //__BOND_3AD_H__ 281#endif //__BOND_3AD_H__
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 0f59c1564e5..e15cc11edbb 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -342,27 +342,17 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
342 _unlock_rx_hashtbl_bh(bond); 342 _unlock_rx_hashtbl_bh(bond);
343} 343}
344 344
345static int rlb_arp_recv(struct sk_buff *skb, struct bonding *bond, 345static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond,
346 struct slave *slave) 346 struct slave *slave)
347{ 347{
348 struct arp_pkt *arp; 348 struct arp_pkt *arp, _arp;
349 349
350 if (skb->protocol != cpu_to_be16(ETH_P_ARP)) 350 if (skb->protocol != cpu_to_be16(ETH_P_ARP))
351 goto out; 351 goto out;
352 352
353 arp = (struct arp_pkt *) skb->data; 353 arp = skb_header_pointer(skb, 0, sizeof(_arp), &_arp);
354 if (!arp) { 354 if (!arp)
355 pr_debug("Packet has no ARP data\n");
356 goto out; 355 goto out;
357 }
358
359 if (!pskb_may_pull(skb, arp_hdr_len(bond->dev)))
360 goto out;
361
362 if (skb->len < sizeof(struct arp_pkt)) {
363 pr_debug("Packet is too small to be an ARP\n");
364 goto out;
365 }
366 356
367 if (arp->op_code == htons(ARPOP_REPLY)) { 357 if (arp->op_code == htons(ARPOP_REPLY)) {
368 /* update rx hash table for this ARP */ 358 /* update rx hash table for this ARP */
@@ -1356,12 +1346,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1356 } 1346 }
1357 } 1347 }
1358 1348
1349 read_unlock(&bond->curr_slave_lock);
1350
1359 if (res) { 1351 if (res) {
1360 /* no suitable interface, frame not sent */ 1352 /* no suitable interface, frame not sent */
1361 dev_kfree_skb(skb); 1353 kfree_skb(skb);
1362 } 1354 }
1363 read_unlock(&bond->curr_slave_lock);
1364
1365 return NETDEV_TX_OK; 1355 return NETDEV_TX_OK;
1366} 1356}
1367 1357
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2ee76993f05..4ddcc3e41da 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1445,8 +1445,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1445 struct sk_buff *skb = *pskb; 1445 struct sk_buff *skb = *pskb;
1446 struct slave *slave; 1446 struct slave *slave;
1447 struct bonding *bond; 1447 struct bonding *bond;
1448 int (*recv_probe)(struct sk_buff *, struct bonding *, 1448 int (*recv_probe)(const struct sk_buff *, struct bonding *,
1449 struct slave *); 1449 struct slave *);
1450 int ret = RX_HANDLER_ANOTHER; 1450 int ret = RX_HANDLER_ANOTHER;
1451 1451
1452 skb = skb_share_check(skb, GFP_ATOMIC); 1452 skb = skb_share_check(skb, GFP_ATOMIC);
@@ -1463,15 +1463,10 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1463 1463
1464 recv_probe = ACCESS_ONCE(bond->recv_probe); 1464 recv_probe = ACCESS_ONCE(bond->recv_probe);
1465 if (recv_probe) { 1465 if (recv_probe) {
1466 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 1466 ret = recv_probe(skb, bond, slave);
1467 1467 if (ret == RX_HANDLER_CONSUMED) {
1468 if (likely(nskb)) { 1468 consume_skb(skb);
1469 ret = recv_probe(nskb, bond, slave); 1469 return ret;
1470 dev_kfree_skb(nskb);
1471 if (ret == RX_HANDLER_CONSUMED) {
1472 consume_skb(skb);
1473 return ret;
1474 }
1475 } 1470 }
1476 } 1471 }
1477 1472
@@ -2738,25 +2733,31 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
2738 } 2733 }
2739} 2734}
2740 2735
2741static int bond_arp_rcv(struct sk_buff *skb, struct bonding *bond, 2736static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2742 struct slave *slave) 2737 struct slave *slave)
2743{ 2738{
2744 struct arphdr *arp; 2739 struct arphdr *arp = (struct arphdr *)skb->data;
2745 unsigned char *arp_ptr; 2740 unsigned char *arp_ptr;
2746 __be32 sip, tip; 2741 __be32 sip, tip;
2742 int alen;
2747 2743
2748 if (skb->protocol != __cpu_to_be16(ETH_P_ARP)) 2744 if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
2749 return RX_HANDLER_ANOTHER; 2745 return RX_HANDLER_ANOTHER;
2750 2746
2751 read_lock(&bond->lock); 2747 read_lock(&bond->lock);
2748 alen = arp_hdr_len(bond->dev);
2752 2749
2753 pr_debug("bond_arp_rcv: bond %s skb->dev %s\n", 2750 pr_debug("bond_arp_rcv: bond %s skb->dev %s\n",
2754 bond->dev->name, skb->dev->name); 2751 bond->dev->name, skb->dev->name);
2755 2752
2756 if (!pskb_may_pull(skb, arp_hdr_len(bond->dev))) 2753 if (alen > skb_headlen(skb)) {
2757 goto out_unlock; 2754 arp = kmalloc(alen, GFP_ATOMIC);
2755 if (!arp)
2756 goto out_unlock;
2757 if (skb_copy_bits(skb, 0, arp, alen) < 0)
2758 goto out_unlock;
2759 }
2758 2760
2759 arp = arp_hdr(skb);
2760 if (arp->ar_hln != bond->dev->addr_len || 2761 if (arp->ar_hln != bond->dev->addr_len ||
2761 skb->pkt_type == PACKET_OTHERHOST || 2762 skb->pkt_type == PACKET_OTHERHOST ||
2762 skb->pkt_type == PACKET_LOOPBACK || 2763 skb->pkt_type == PACKET_LOOPBACK ||
@@ -2791,6 +2792,8 @@ static int bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
2791 2792
2792out_unlock: 2793out_unlock:
2793 read_unlock(&bond->lock); 2794 read_unlock(&bond->lock);
2795 if (arp != (struct arphdr *)skb->data)
2796 kfree(arp);
2794 return RX_HANDLER_ANOTHER; 2797 return RX_HANDLER_ANOTHER;
2795} 2798}
2796 2799
@@ -3993,7 +3996,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
3993out: 3996out:
3994 if (res) { 3997 if (res) {
3995 /* no suitable interface, frame not sent */ 3998 /* no suitable interface, frame not sent */
3996 dev_kfree_skb(skb); 3999 kfree_skb(skb);
3997 } 4000 }
3998 4001
3999 return NETDEV_TX_OK; 4002 return NETDEV_TX_OK;
@@ -4015,11 +4018,11 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
4015 res = bond_dev_queue_xmit(bond, skb, 4018 res = bond_dev_queue_xmit(bond, skb,
4016 bond->curr_active_slave->dev); 4019 bond->curr_active_slave->dev);
4017 4020
4021 read_unlock(&bond->curr_slave_lock);
4022
4018 if (res) 4023 if (res)
4019 /* no suitable interface, frame not sent */ 4024 /* no suitable interface, frame not sent */
4020 dev_kfree_skb(skb); 4025 kfree_skb(skb);
4021
4022 read_unlock(&bond->curr_slave_lock);
4023 4026
4024 return NETDEV_TX_OK; 4027 return NETDEV_TX_OK;
4025} 4028}
@@ -4058,7 +4061,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
4058 4061
4059 if (res) { 4062 if (res) {
4060 /* no suitable interface, frame not sent */ 4063 /* no suitable interface, frame not sent */
4061 dev_kfree_skb(skb); 4064 kfree_skb(skb);
4062 } 4065 }
4063 4066
4064 return NETDEV_TX_OK; 4067 return NETDEV_TX_OK;
@@ -4096,7 +4099,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
4096 4099
4097 res = bond_dev_queue_xmit(bond, skb2, tx_dev); 4100 res = bond_dev_queue_xmit(bond, skb2, tx_dev);
4098 if (res) { 4101 if (res) {
4099 dev_kfree_skb(skb2); 4102 kfree_skb(skb2);
4100 continue; 4103 continue;
4101 } 4104 }
4102 } 4105 }
@@ -4110,7 +4113,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
4110out: 4113out:
4111 if (res) 4114 if (res)
4112 /* no suitable interface, frame not sent */ 4115 /* no suitable interface, frame not sent */
4113 dev_kfree_skb(skb); 4116 kfree_skb(skb);
4114 4117
4115 /* frame sent to all suitable interfaces */ 4118 /* frame sent to all suitable interfaces */
4116 return NETDEV_TX_OK; 4119 return NETDEV_TX_OK;
@@ -4216,7 +4219,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
4216 pr_err("%s: Error: Unknown bonding mode %d\n", 4219 pr_err("%s: Error: Unknown bonding mode %d\n",
4217 dev->name, bond->params.mode); 4220 dev->name, bond->params.mode);
4218 WARN_ON_ONCE(1); 4221 WARN_ON_ONCE(1);
4219 dev_kfree_skb(skb); 4222 kfree_skb(skb);
4220 return NETDEV_TX_OK; 4223 return NETDEV_TX_OK;
4221 } 4224 }
4222} 4225}
@@ -4238,7 +4241,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
4238 if (bond->slave_cnt) 4241 if (bond->slave_cnt)
4239 ret = __bond_start_xmit(skb, dev); 4242 ret = __bond_start_xmit(skb, dev);
4240 else 4243 else
4241 dev_kfree_skb(skb); 4244 kfree_skb(skb);
4242 4245
4243 read_unlock(&bond->lock); 4246 read_unlock(&bond->lock);
4244 4247
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 4581aa5ccab..f8af2fcd3d1 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -218,8 +218,8 @@ struct bonding {
218 struct slave *primary_slave; 218 struct slave *primary_slave;
219 bool force_primary; 219 bool force_primary;
220 s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ 220 s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
221 int (*recv_probe)(struct sk_buff *, struct bonding *, 221 int (*recv_probe)(const struct sk_buff *, struct bonding *,
222 struct slave *); 222 struct slave *);
223 rwlock_t lock; 223 rwlock_t lock;
224 rwlock_t curr_slave_lock; 224 rwlock_t curr_slave_lock;
225 u8 send_peer_notif; 225 u8 send_peer_notif;
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 4a27adb7ae6..0def8b3106f 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -11,7 +11,6 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/device.h> 13#include <linux/device.h>
14#include <linux/platform_device.h>
15#include <linux/netdevice.h> 14#include <linux/netdevice.h>
16#include <linux/string.h> 15#include <linux/string.h>
17#include <linux/list.h> 16#include <linux/list.h>
@@ -20,7 +19,7 @@
20#include <linux/sched.h> 19#include <linux/sched.h>
21#include <linux/if_arp.h> 20#include <linux/if_arp.h>
22#include <linux/timer.h> 21#include <linux/timer.h>
23#include <linux/rtnetlink.h> 22#include <net/rtnetlink.h>
24#include <linux/pkt_sched.h> 23#include <linux/pkt_sched.h>
25#include <net/caif/caif_layer.h> 24#include <net/caif/caif_layer.h>
26#include <net/caif/caif_hsi.h> 25#include <net/caif/caif_hsi.h>
@@ -33,59 +32,46 @@ MODULE_DESCRIPTION("CAIF HSI driver");
33#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\ 32#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
34 (((pow)-((x)&((pow)-1))))) 33 (((pow)-((x)&((pow)-1)))))
35 34
36static int inactivity_timeout = 1000; 35static const struct cfhsi_config hsi_default_config = {
37module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
38MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
39 36
40static int aggregation_timeout = 1; 37 /* Inactivity timeout on HSI, ms */
41module_param(aggregation_timeout, int, S_IRUGO | S_IWUSR); 38 .inactivity_timeout = HZ,
42MODULE_PARM_DESC(aggregation_timeout, "Aggregation timeout on HSI, ms.");
43 39
44/* 40 /* Aggregation timeout (ms) of zero means no aggregation is done*/
45 * HSI padding options. 41 .aggregation_timeout = 1,
46 * Warning: must be a base of 2 (& operation used) and can not be zero !
47 */
48static int hsi_head_align = 4;
49module_param(hsi_head_align, int, S_IRUGO);
50MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");
51 42
52static int hsi_tail_align = 4; 43 /*
53module_param(hsi_tail_align, int, S_IRUGO); 44 * HSI link layer flow-control thresholds.
54MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment."); 45 * Threshold values for the HSI packet queue. Flow-control will be
55 46 * asserted when the number of packets exceeds q_high_mark. It will
56/* 47 * not be de-asserted before the number of packets drops below
57 * HSI link layer flowcontrol thresholds. 48 * q_low_mark.
58 * Warning: A high threshold value migth increase throughput but it will at 49 * Warning: A high threshold value might increase throughput but it
59 * the same time prevent channel prioritization and increase the risk of 50 * will at the same time prevent channel prioritization and increase
60 * flooding the modem. The high threshold should be above the low. 51 * the risk of flooding the modem. The high threshold should be above
61 */ 52 * the low.
62static int hsi_high_threshold = 100; 53 */
63module_param(hsi_high_threshold, int, S_IRUGO); 54 .q_high_mark = 100,
64MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF)."); 55 .q_low_mark = 50,
65 56
66static int hsi_low_threshold = 50; 57 /*
67module_param(hsi_low_threshold, int, S_IRUGO); 58 * HSI padding options.
68MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON)."); 59 * Warning: must be a base of 2 (& operation used) and can not be zero !
60 */
61 .head_align = 4,
62 .tail_align = 4,
63};
69 64
70#define ON 1 65#define ON 1
71#define OFF 0 66#define OFF 0
72 67
73/*
74 * Threshold values for the HSI packet queue. Flowcontrol will be asserted
75 * when the number of packets exceeds HIGH_WATER_MARK. It will not be
76 * de-asserted before the number of packets drops below LOW_WATER_MARK.
77 */
78#define LOW_WATER_MARK hsi_low_threshold
79#define HIGH_WATER_MARK hsi_high_threshold
80
81static LIST_HEAD(cfhsi_list); 68static LIST_HEAD(cfhsi_list);
82static spinlock_t cfhsi_list_lock;
83 69
84static void cfhsi_inactivity_tout(unsigned long arg) 70static void cfhsi_inactivity_tout(unsigned long arg)
85{ 71{
86 struct cfhsi *cfhsi = (struct cfhsi *)arg; 72 struct cfhsi *cfhsi = (struct cfhsi *)arg;
87 73
88 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 74 netdev_dbg(cfhsi->ndev, "%s.\n",
89 __func__); 75 __func__);
90 76
91 /* Schedule power down work queue. */ 77 /* Schedule power down work queue. */
@@ -101,8 +87,8 @@ static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
101 int hpad, tpad, len; 87 int hpad, tpad, len;
102 88
103 info = (struct caif_payload_info *)&skb->cb; 89 info = (struct caif_payload_info *)&skb->cb;
104 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align); 90 hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
105 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align); 91 tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
106 len = skb->len + hpad + tpad; 92 len = skb->len + hpad + tpad;
107 93
108 if (direction > 0) 94 if (direction > 0)
@@ -115,7 +101,7 @@ static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
115{ 101{
116 int i; 102 int i;
117 103
118 if (cfhsi->aggregation_timeout < 0) 104 if (cfhsi->cfg.aggregation_timeout == 0)
119 return true; 105 return true;
120 106
121 for (i = 0; i < CFHSI_PRIO_BEBK; ++i) { 107 for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
@@ -171,7 +157,7 @@ static void cfhsi_abort_tx(struct cfhsi *cfhsi)
171 cfhsi->tx_state = CFHSI_TX_STATE_IDLE; 157 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
172 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 158 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
173 mod_timer(&cfhsi->inactivity_timer, 159 mod_timer(&cfhsi->inactivity_timer,
174 jiffies + cfhsi->inactivity_timeout); 160 jiffies + cfhsi->cfg.inactivity_timeout);
175 spin_unlock_bh(&cfhsi->lock); 161 spin_unlock_bh(&cfhsi->lock);
176} 162}
177 163
@@ -181,14 +167,14 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
181 size_t fifo_occupancy; 167 size_t fifo_occupancy;
182 int ret; 168 int ret;
183 169
184 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 170 netdev_dbg(cfhsi->ndev, "%s.\n",
185 __func__); 171 __func__);
186 172
187 do { 173 do {
188 ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, 174 ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
189 &fifo_occupancy); 175 &fifo_occupancy);
190 if (ret) { 176 if (ret) {
191 dev_warn(&cfhsi->ndev->dev, 177 netdev_warn(cfhsi->ndev,
192 "%s: can't get FIFO occupancy: %d.\n", 178 "%s: can't get FIFO occupancy: %d.\n",
193 __func__, ret); 179 __func__, ret);
194 break; 180 break;
@@ -198,11 +184,11 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
198 184
199 fifo_occupancy = min(sizeof(buffer), fifo_occupancy); 185 fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
200 set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits); 186 set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
201 ret = cfhsi->dev->cfhsi_rx(buffer, fifo_occupancy, 187 ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy,
202 cfhsi->dev); 188 cfhsi->ops);
203 if (ret) { 189 if (ret) {
204 clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits); 190 clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
205 dev_warn(&cfhsi->ndev->dev, 191 netdev_warn(cfhsi->ndev,
206 "%s: can't read data: %d.\n", 192 "%s: can't read data: %d.\n",
207 __func__, ret); 193 __func__, ret);
208 break; 194 break;
@@ -213,13 +199,13 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
213 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret); 199 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
214 200
215 if (ret < 0) { 201 if (ret < 0) {
216 dev_warn(&cfhsi->ndev->dev, 202 netdev_warn(cfhsi->ndev,
217 "%s: can't wait for flush complete: %d.\n", 203 "%s: can't wait for flush complete: %d.\n",
218 __func__, ret); 204 __func__, ret);
219 break; 205 break;
220 } else if (!ret) { 206 } else if (!ret) {
221 ret = -ETIMEDOUT; 207 ret = -ETIMEDOUT;
222 dev_warn(&cfhsi->ndev->dev, 208 netdev_warn(cfhsi->ndev,
223 "%s: timeout waiting for flush complete.\n", 209 "%s: timeout waiting for flush complete.\n",
224 __func__); 210 __func__);
225 break; 211 break;
@@ -246,14 +232,14 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
246 /* Check if we can embed a CAIF frame. */ 232 /* Check if we can embed a CAIF frame. */
247 if (skb->len < CFHSI_MAX_EMB_FRM_SZ) { 233 if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
248 struct caif_payload_info *info; 234 struct caif_payload_info *info;
249 int hpad = 0; 235 int hpad;
250 int tpad = 0; 236 int tpad;
251 237
252 /* Calculate needed head alignment and tail alignment. */ 238 /* Calculate needed head alignment and tail alignment. */
253 info = (struct caif_payload_info *)&skb->cb; 239 info = (struct caif_payload_info *)&skb->cb;
254 240
255 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align); 241 hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
256 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align); 242 tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
257 243
258 /* Check if frame still fits with added alignment. */ 244 /* Check if frame still fits with added alignment. */
259 if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) { 245 if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
@@ -282,8 +268,8 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
282 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ; 268 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
283 while (nfrms < CFHSI_MAX_PKTS) { 269 while (nfrms < CFHSI_MAX_PKTS) {
284 struct caif_payload_info *info; 270 struct caif_payload_info *info;
285 int hpad = 0; 271 int hpad;
286 int tpad = 0; 272 int tpad;
287 273
288 if (!skb) 274 if (!skb)
289 skb = cfhsi_dequeue(cfhsi); 275 skb = cfhsi_dequeue(cfhsi);
@@ -294,8 +280,8 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
294 /* Calculate needed head alignment and tail alignment. */ 280 /* Calculate needed head alignment and tail alignment. */
295 info = (struct caif_payload_info *)&skb->cb; 281 info = (struct caif_payload_info *)&skb->cb;
296 282
297 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align); 283 hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
298 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align); 284 tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
299 285
300 /* Fill in CAIF frame length in descriptor. */ 286 /* Fill in CAIF frame length in descriptor. */
301 desc->cffrm_len[nfrms] = hpad + skb->len + tpad; 287 desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
@@ -348,7 +334,7 @@ static void cfhsi_start_tx(struct cfhsi *cfhsi)
348 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf; 334 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
349 int len, res; 335 int len, res;
350 336
351 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); 337 netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
352 338
353 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 339 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
354 return; 340 return;
@@ -366,22 +352,22 @@ static void cfhsi_start_tx(struct cfhsi *cfhsi)
366 cfhsi->tx_state = CFHSI_TX_STATE_IDLE; 352 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
367 /* Start inactivity timer. */ 353 /* Start inactivity timer. */
368 mod_timer(&cfhsi->inactivity_timer, 354 mod_timer(&cfhsi->inactivity_timer,
369 jiffies + cfhsi->inactivity_timeout); 355 jiffies + cfhsi->cfg.inactivity_timeout);
370 spin_unlock_bh(&cfhsi->lock); 356 spin_unlock_bh(&cfhsi->lock);
371 break; 357 break;
372 } 358 }
373 359
374 /* Set up new transfer. */ 360 /* Set up new transfer. */
375 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev); 361 res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
376 if (WARN_ON(res < 0)) 362 if (WARN_ON(res < 0))
377 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n", 363 netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
378 __func__, res); 364 __func__, res);
379 } while (res < 0); 365 } while (res < 0);
380} 366}
381 367
382static void cfhsi_tx_done(struct cfhsi *cfhsi) 368static void cfhsi_tx_done(struct cfhsi *cfhsi)
383{ 369{
384 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); 370 netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
385 371
386 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 372 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
387 return; 373 return;
@@ -392,7 +378,7 @@ static void cfhsi_tx_done(struct cfhsi *cfhsi)
392 */ 378 */
393 spin_lock_bh(&cfhsi->lock); 379 spin_lock_bh(&cfhsi->lock);
394 if (cfhsi->flow_off_sent && 380 if (cfhsi->flow_off_sent &&
395 cfhsi_tx_queue_len(cfhsi) <= cfhsi->q_low_mark && 381 cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark &&
396 cfhsi->cfdev.flowctrl) { 382 cfhsi->cfdev.flowctrl) {
397 383
398 cfhsi->flow_off_sent = 0; 384 cfhsi->flow_off_sent = 0;
@@ -404,19 +390,19 @@ static void cfhsi_tx_done(struct cfhsi *cfhsi)
404 cfhsi_start_tx(cfhsi); 390 cfhsi_start_tx(cfhsi);
405 } else { 391 } else {
406 mod_timer(&cfhsi->aggregation_timer, 392 mod_timer(&cfhsi->aggregation_timer,
407 jiffies + cfhsi->aggregation_timeout); 393 jiffies + cfhsi->cfg.aggregation_timeout);
408 spin_unlock_bh(&cfhsi->lock); 394 spin_unlock_bh(&cfhsi->lock);
409 } 395 }
410 396
411 return; 397 return;
412} 398}
413 399
414static void cfhsi_tx_done_cb(struct cfhsi_drv *drv) 400static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops)
415{ 401{
416 struct cfhsi *cfhsi; 402 struct cfhsi *cfhsi;
417 403
418 cfhsi = container_of(drv, struct cfhsi, drv); 404 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
419 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 405 netdev_dbg(cfhsi->ndev, "%s.\n",
420 __func__); 406 __func__);
421 407
422 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 408 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
@@ -433,7 +419,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
433 419
434 if ((desc->header & ~CFHSI_PIGGY_DESC) || 420 if ((desc->header & ~CFHSI_PIGGY_DESC) ||
435 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) { 421 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
436 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n", 422 netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
437 __func__); 423 __func__);
438 return -EPROTO; 424 return -EPROTO;
439 } 425 }
@@ -455,7 +441,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
455 441
456 /* Sanity check length of CAIF frame. */ 442 /* Sanity check length of CAIF frame. */
457 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) { 443 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
458 dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n", 444 netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
459 __func__); 445 __func__);
460 return -EPROTO; 446 return -EPROTO;
461 } 447 }
@@ -463,7 +449,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
463 /* Allocate SKB (OK even in IRQ context). */ 449 /* Allocate SKB (OK even in IRQ context). */
464 skb = alloc_skb(len + 1, GFP_ATOMIC); 450 skb = alloc_skb(len + 1, GFP_ATOMIC);
465 if (!skb) { 451 if (!skb) {
466 dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n", 452 netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
467 __func__); 453 __func__);
468 return -ENOMEM; 454 return -ENOMEM;
469 } 455 }
@@ -477,8 +463,8 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
477 skb->dev = cfhsi->ndev; 463 skb->dev = cfhsi->ndev;
478 464
479 /* 465 /*
480 * We are called from a arch specific platform device. 466 * We are in a callback handler and
481 * Unfortunately we don't know what context we're 467 * unfortunately we don't know what context we're
482 * running in. 468 * running in.
483 */ 469 */
484 if (in_interrupt()) 470 if (in_interrupt())
@@ -504,7 +490,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
504 xfer_sz += CFHSI_DESC_SZ; 490 xfer_sz += CFHSI_DESC_SZ;
505 491
506 if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) { 492 if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
507 dev_err(&cfhsi->ndev->dev, 493 netdev_err(cfhsi->ndev,
508 "%s: Invalid payload len: %d, ignored.\n", 494 "%s: Invalid payload len: %d, ignored.\n",
509 __func__, xfer_sz); 495 __func__, xfer_sz);
510 return -EPROTO; 496 return -EPROTO;
@@ -551,7 +537,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
551 /* Sanity check header and offset. */ 537 /* Sanity check header and offset. */
552 if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) || 538 if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
553 (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) { 539 (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
554 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n", 540 netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
555 __func__); 541 __func__);
556 return -EPROTO; 542 return -EPROTO;
557 } 543 }
@@ -573,7 +559,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
573 struct sk_buff *skb; 559 struct sk_buff *skb;
574 u8 *dst = NULL; 560 u8 *dst = NULL;
575 u8 *pcffrm = NULL; 561 u8 *pcffrm = NULL;
576 int len = 0; 562 int len;
577 563
578 /* CAIF frame starts after head padding. */ 564 /* CAIF frame starts after head padding. */
579 pcffrm = pfrm + *pfrm + 1; 565 pcffrm = pfrm + *pfrm + 1;
@@ -585,7 +571,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
585 571
586 /* Sanity check length of CAIF frames. */ 572 /* Sanity check length of CAIF frames. */
587 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) { 573 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
588 dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n", 574 netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
589 __func__); 575 __func__);
590 return -EPROTO; 576 return -EPROTO;
591 } 577 }
@@ -593,7 +579,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
593 /* Allocate SKB (OK even in IRQ context). */ 579 /* Allocate SKB (OK even in IRQ context). */
594 skb = alloc_skb(len + 1, GFP_ATOMIC); 580 skb = alloc_skb(len + 1, GFP_ATOMIC);
595 if (!skb) { 581 if (!skb) {
596 dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n", 582 netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
597 __func__); 583 __func__);
598 cfhsi->rx_state.nfrms = nfrms; 584 cfhsi->rx_state.nfrms = nfrms;
599 return -ENOMEM; 585 return -ENOMEM;
@@ -608,7 +594,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
608 skb->dev = cfhsi->ndev; 594 skb->dev = cfhsi->ndev;
609 595
610 /* 596 /*
611 * We're called from a platform device, 597 * We're called in callback from HSI
612 * and don't know the context we're running in. 598 * and don't know the context we're running in.
613 */ 599 */
614 if (in_interrupt()) 600 if (in_interrupt())
@@ -639,7 +625,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
639 625
640 desc = (struct cfhsi_desc *)cfhsi->rx_buf; 626 desc = (struct cfhsi_desc *)cfhsi->rx_buf;
641 627
642 dev_dbg(&cfhsi->ndev->dev, "%s\n", __func__); 628 netdev_dbg(cfhsi->ndev, "%s\n", __func__);
643 629
644 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 630 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
645 return; 631 return;
@@ -647,7 +633,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
647 /* Update inactivity timer if pending. */ 633 /* Update inactivity timer if pending. */
648 spin_lock_bh(&cfhsi->lock); 634 spin_lock_bh(&cfhsi->lock);
649 mod_timer_pending(&cfhsi->inactivity_timer, 635 mod_timer_pending(&cfhsi->inactivity_timer,
650 jiffies + cfhsi->inactivity_timeout); 636 jiffies + cfhsi->cfg.inactivity_timeout);
651 spin_unlock_bh(&cfhsi->lock); 637 spin_unlock_bh(&cfhsi->lock);
652 638
653 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) { 639 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
@@ -680,12 +666,11 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
680 if (desc_pld_len < 0) 666 if (desc_pld_len < 0)
681 goto out_of_sync; 667 goto out_of_sync;
682 668
683 if (desc_pld_len > 0) 669 if (desc_pld_len > 0) {
684 rx_len = desc_pld_len; 670 rx_len = desc_pld_len;
685 671 if (piggy_desc->header & CFHSI_PIGGY_DESC)
686 if (desc_pld_len > 0 && 672 rx_len += CFHSI_DESC_SZ;
687 (piggy_desc->header & CFHSI_PIGGY_DESC)) 673 }
688 rx_len += CFHSI_DESC_SZ;
689 674
690 /* 675 /*
691 * Copy needed information from the piggy-backed 676 * Copy needed information from the piggy-backed
@@ -693,8 +678,6 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
693 */ 678 */
694 memcpy(rx_buf, (u8 *)piggy_desc, 679 memcpy(rx_buf, (u8 *)piggy_desc,
695 CFHSI_DESC_SHORT_SZ); 680 CFHSI_DESC_SHORT_SZ);
696 if (desc_pld_len == -EPROTO)
697 goto out_of_sync;
698 } 681 }
699 } 682 }
700 683
@@ -710,13 +693,13 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
710 /* Initiate next read */ 693 /* Initiate next read */
711 if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) { 694 if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
712 /* Set up new transfer. */ 695 /* Set up new transfer. */
713 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", 696 netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
714 __func__); 697 __func__);
715 698
716 res = cfhsi->dev->cfhsi_rx(rx_ptr, rx_len, 699 res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len,
717 cfhsi->dev); 700 cfhsi->ops);
718 if (WARN_ON(res < 0)) { 701 if (WARN_ON(res < 0)) {
719 dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n", 702 netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
720 __func__, res); 703 __func__, res);
721 cfhsi->ndev->stats.rx_errors++; 704 cfhsi->ndev->stats.rx_errors++;
722 cfhsi->ndev->stats.rx_dropped++; 705 cfhsi->ndev->stats.rx_dropped++;
@@ -753,7 +736,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
753 return; 736 return;
754 737
755out_of_sync: 738out_of_sync:
756 dev_err(&cfhsi->ndev->dev, "%s: Out of sync.\n", __func__); 739 netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
757 print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE, 740 print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
758 cfhsi->rx_buf, CFHSI_DESC_SZ); 741 cfhsi->rx_buf, CFHSI_DESC_SZ);
759 schedule_work(&cfhsi->out_of_sync_work); 742 schedule_work(&cfhsi->out_of_sync_work);
@@ -763,18 +746,18 @@ static void cfhsi_rx_slowpath(unsigned long arg)
763{ 746{
764 struct cfhsi *cfhsi = (struct cfhsi *)arg; 747 struct cfhsi *cfhsi = (struct cfhsi *)arg;
765 748
766 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 749 netdev_dbg(cfhsi->ndev, "%s.\n",
767 __func__); 750 __func__);
768 751
769 cfhsi_rx_done(cfhsi); 752 cfhsi_rx_done(cfhsi);
770} 753}
771 754
772static void cfhsi_rx_done_cb(struct cfhsi_drv *drv) 755static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops)
773{ 756{
774 struct cfhsi *cfhsi; 757 struct cfhsi *cfhsi;
775 758
776 cfhsi = container_of(drv, struct cfhsi, drv); 759 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
777 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 760 netdev_dbg(cfhsi->ndev, "%s.\n",
778 __func__); 761 __func__);
779 762
780 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 763 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
@@ -807,9 +790,9 @@ static void cfhsi_wake_up(struct work_struct *work)
807 } 790 }
808 791
809 /* Activate wake line. */ 792 /* Activate wake line. */
810 cfhsi->dev->cfhsi_wake_up(cfhsi->dev); 793 cfhsi->ops->cfhsi_wake_up(cfhsi->ops);
811 794
812 dev_dbg(&cfhsi->ndev->dev, "%s: Start waiting.\n", 795 netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
813 __func__); 796 __func__);
814 797
815 /* Wait for acknowledge. */ 798 /* Wait for acknowledge. */
@@ -819,33 +802,33 @@ static void cfhsi_wake_up(struct work_struct *work)
819 &cfhsi->bits), ret); 802 &cfhsi->bits), ret);
820 if (unlikely(ret < 0)) { 803 if (unlikely(ret < 0)) {
821 /* Interrupted by signal. */ 804 /* Interrupted by signal. */
822 dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n", 805 netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
823 __func__, ret); 806 __func__, ret);
824 807
825 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); 808 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
826 cfhsi->dev->cfhsi_wake_down(cfhsi->dev); 809 cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
827 return; 810 return;
828 } else if (!ret) { 811 } else if (!ret) {
829 bool ca_wake = false; 812 bool ca_wake = false;
830 size_t fifo_occupancy = 0; 813 size_t fifo_occupancy = 0;
831 814
832 /* Wakeup timeout */ 815 /* Wakeup timeout */
833 dev_dbg(&cfhsi->ndev->dev, "%s: Timeout.\n", 816 netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
834 __func__); 817 __func__);
835 818
836 /* Check FIFO to check if modem has sent something. */ 819 /* Check FIFO to check if modem has sent something. */
837 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, 820 WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
838 &fifo_occupancy)); 821 &fifo_occupancy));
839 822
840 dev_dbg(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n", 823 netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
841 __func__, (unsigned) fifo_occupancy); 824 __func__, (unsigned) fifo_occupancy);
842 825
843 /* Check if we misssed the interrupt. */ 826 /* Check if we misssed the interrupt. */
844 WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev, 827 WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
845 &ca_wake)); 828 &ca_wake));
846 829
847 if (ca_wake) { 830 if (ca_wake) {
848 dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n", 831 netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
849 __func__); 832 __func__);
850 833
851 /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */ 834 /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
@@ -856,11 +839,11 @@ static void cfhsi_wake_up(struct work_struct *work)
856 } 839 }
857 840
858 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); 841 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
859 cfhsi->dev->cfhsi_wake_down(cfhsi->dev); 842 cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
860 return; 843 return;
861 } 844 }
862wake_ack: 845wake_ack:
863 dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n", 846 netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
864 __func__); 847 __func__);
865 848
866 /* Clear power up bit. */ 849 /* Clear power up bit. */
@@ -868,11 +851,11 @@ wake_ack:
868 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); 851 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
869 852
870 /* Resume read operation. */ 853 /* Resume read operation. */
871 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", __func__); 854 netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
872 res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->dev); 855 res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops);
873 856
874 if (WARN_ON(res < 0)) 857 if (WARN_ON(res < 0))
875 dev_err(&cfhsi->ndev->dev, "%s: RX err %d.\n", __func__, res); 858 netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
876 859
877 /* Clear power up acknowledment. */ 860 /* Clear power up acknowledment. */
878 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); 861 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
@@ -881,16 +864,16 @@ wake_ack:
881 864
882 /* Resume transmit if queues are not empty. */ 865 /* Resume transmit if queues are not empty. */
883 if (!cfhsi_tx_queue_len(cfhsi)) { 866 if (!cfhsi_tx_queue_len(cfhsi)) {
884 dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n", 867 netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
885 __func__); 868 __func__);
886 /* Start inactivity timer. */ 869 /* Start inactivity timer. */
887 mod_timer(&cfhsi->inactivity_timer, 870 mod_timer(&cfhsi->inactivity_timer,
888 jiffies + cfhsi->inactivity_timeout); 871 jiffies + cfhsi->cfg.inactivity_timeout);
889 spin_unlock_bh(&cfhsi->lock); 872 spin_unlock_bh(&cfhsi->lock);
890 return; 873 return;
891 } 874 }
892 875
893 dev_dbg(&cfhsi->ndev->dev, "%s: Host wake.\n", 876 netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
894 __func__); 877 __func__);
895 878
896 spin_unlock_bh(&cfhsi->lock); 879 spin_unlock_bh(&cfhsi->lock);
@@ -900,14 +883,14 @@ wake_ack:
900 883
901 if (likely(len > 0)) { 884 if (likely(len > 0)) {
902 /* Set up new transfer. */ 885 /* Set up new transfer. */
903 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev); 886 res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
904 if (WARN_ON(res < 0)) { 887 if (WARN_ON(res < 0)) {
905 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n", 888 netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
906 __func__, res); 889 __func__, res);
907 cfhsi_abort_tx(cfhsi); 890 cfhsi_abort_tx(cfhsi);
908 } 891 }
909 } else { 892 } else {
910 dev_err(&cfhsi->ndev->dev, 893 netdev_err(cfhsi->ndev,
911 "%s: Failed to create HSI frame: %d.\n", 894 "%s: Failed to create HSI frame: %d.\n",
912 __func__, len); 895 __func__, len);
913 } 896 }
@@ -921,13 +904,13 @@ static void cfhsi_wake_down(struct work_struct *work)
921 int retry = CFHSI_WAKE_TOUT; 904 int retry = CFHSI_WAKE_TOUT;
922 905
923 cfhsi = container_of(work, struct cfhsi, wake_down_work); 906 cfhsi = container_of(work, struct cfhsi, wake_down_work);
924 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); 907 netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
925 908
926 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 909 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
927 return; 910 return;
928 911
929 /* Deactivate wake line. */ 912 /* Deactivate wake line. */
930 cfhsi->dev->cfhsi_wake_down(cfhsi->dev); 913 cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
931 914
932 /* Wait for acknowledge. */ 915 /* Wait for acknowledge. */
933 ret = CFHSI_WAKE_TOUT; 916 ret = CFHSI_WAKE_TOUT;
@@ -936,26 +919,26 @@ static void cfhsi_wake_down(struct work_struct *work)
936 &cfhsi->bits), ret); 919 &cfhsi->bits), ret);
937 if (ret < 0) { 920 if (ret < 0) {
938 /* Interrupted by signal. */ 921 /* Interrupted by signal. */
939 dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n", 922 netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
940 __func__, ret); 923 __func__, ret);
941 return; 924 return;
942 } else if (!ret) { 925 } else if (!ret) {
943 bool ca_wake = true; 926 bool ca_wake = true;
944 927
945 /* Timeout */ 928 /* Timeout */
946 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", __func__); 929 netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
947 930
948 /* Check if we misssed the interrupt. */ 931 /* Check if we misssed the interrupt. */
949 WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev, 932 WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
950 &ca_wake)); 933 &ca_wake));
951 if (!ca_wake) 934 if (!ca_wake)
952 dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n", 935 netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
953 __func__); 936 __func__);
954 } 937 }
955 938
956 /* Check FIFO occupancy. */ 939 /* Check FIFO occupancy. */
957 while (retry) { 940 while (retry) {
958 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, 941 WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
959 &fifo_occupancy)); 942 &fifo_occupancy));
960 943
961 if (!fifo_occupancy) 944 if (!fifo_occupancy)
@@ -967,14 +950,13 @@ static void cfhsi_wake_down(struct work_struct *work)
967 } 950 }
968 951
969 if (!retry) 952 if (!retry)
970 dev_err(&cfhsi->ndev->dev, "%s: FIFO Timeout.\n", __func__); 953 netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
971 954
972 /* Clear AWAKE condition. */ 955 /* Clear AWAKE condition. */
973 clear_bit(CFHSI_AWAKE, &cfhsi->bits); 956 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
974 957
975 /* Cancel pending RX requests. */ 958 /* Cancel pending RX requests. */
976 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev); 959 cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
977
978} 960}
979 961
980static void cfhsi_out_of_sync(struct work_struct *work) 962static void cfhsi_out_of_sync(struct work_struct *work)
@@ -988,12 +970,12 @@ static void cfhsi_out_of_sync(struct work_struct *work)
988 rtnl_unlock(); 970 rtnl_unlock();
989} 971}
990 972
991static void cfhsi_wake_up_cb(struct cfhsi_drv *drv) 973static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops)
992{ 974{
993 struct cfhsi *cfhsi = NULL; 975 struct cfhsi *cfhsi = NULL;
994 976
995 cfhsi = container_of(drv, struct cfhsi, drv); 977 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
996 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 978 netdev_dbg(cfhsi->ndev, "%s.\n",
997 __func__); 979 __func__);
998 980
999 set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); 981 set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
@@ -1007,12 +989,12 @@ static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
1007 queue_work(cfhsi->wq, &cfhsi->wake_up_work); 989 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
1008} 990}
1009 991
1010static void cfhsi_wake_down_cb(struct cfhsi_drv *drv) 992static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
1011{ 993{
1012 struct cfhsi *cfhsi = NULL; 994 struct cfhsi *cfhsi = NULL;
1013 995
1014 cfhsi = container_of(drv, struct cfhsi, drv); 996 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
1015 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 997 netdev_dbg(cfhsi->ndev, "%s.\n",
1016 __func__); 998 __func__);
1017 999
1018 /* Initiating low power is only permitted by the host (us). */ 1000 /* Initiating low power is only permitted by the host (us). */
@@ -1024,7 +1006,7 @@ static void cfhsi_aggregation_tout(unsigned long arg)
1024{ 1006{
1025 struct cfhsi *cfhsi = (struct cfhsi *)arg; 1007 struct cfhsi *cfhsi = (struct cfhsi *)arg;
1026 1008
1027 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 1009 netdev_dbg(cfhsi->ndev, "%s.\n",
1028 __func__); 1010 __func__);
1029 1011
1030 cfhsi_start_tx(cfhsi); 1012 cfhsi_start_tx(cfhsi);
@@ -1077,7 +1059,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
1077 1059
1078 /* Send flow off if number of packets is above high water mark. */ 1060 /* Send flow off if number of packets is above high water mark. */
1079 if (!cfhsi->flow_off_sent && 1061 if (!cfhsi->flow_off_sent &&
1080 cfhsi_tx_queue_len(cfhsi) > cfhsi->q_high_mark && 1062 cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark &&
1081 cfhsi->cfdev.flowctrl) { 1063 cfhsi->cfdev.flowctrl) {
1082 cfhsi->flow_off_sent = 1; 1064 cfhsi->flow_off_sent = 1;
1083 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF); 1065 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
@@ -1114,9 +1096,9 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
1114 WARN_ON(!len); 1096 WARN_ON(!len);
1115 1097
1116 /* Set up new transfer. */ 1098 /* Set up new transfer. */
1117 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev); 1099 res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
1118 if (WARN_ON(res < 0)) { 1100 if (WARN_ON(res < 0)) {
1119 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n", 1101 netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
1120 __func__, res); 1102 __func__, res);
1121 cfhsi_abort_tx(cfhsi); 1103 cfhsi_abort_tx(cfhsi);
1122 } 1104 }
@@ -1129,19 +1111,19 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
1129 return 0; 1111 return 0;
1130} 1112}
1131 1113
1132static const struct net_device_ops cfhsi_ops; 1114static const struct net_device_ops cfhsi_netdevops;
1133 1115
1134static void cfhsi_setup(struct net_device *dev) 1116static void cfhsi_setup(struct net_device *dev)
1135{ 1117{
1136 int i; 1118 int i;
1137 struct cfhsi *cfhsi = netdev_priv(dev); 1119 struct cfhsi *cfhsi = netdev_priv(dev);
1138 dev->features = 0; 1120 dev->features = 0;
1139 dev->netdev_ops = &cfhsi_ops;
1140 dev->type = ARPHRD_CAIF; 1121 dev->type = ARPHRD_CAIF;
1141 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 1122 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1142 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; 1123 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
1143 dev->tx_queue_len = 0; 1124 dev->tx_queue_len = 0;
1144 dev->destructor = free_netdev; 1125 dev->destructor = free_netdev;
1126 dev->netdev_ops = &cfhsi_netdevops;
1145 for (i = 0; i < CFHSI_PRIO_LAST; ++i) 1127 for (i = 0; i < CFHSI_PRIO_LAST; ++i)
1146 skb_queue_head_init(&cfhsi->qhead[i]); 1128 skb_queue_head_init(&cfhsi->qhead[i]);
1147 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; 1129 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
@@ -1149,43 +1131,7 @@ static void cfhsi_setup(struct net_device *dev)
1149 cfhsi->cfdev.use_stx = false; 1131 cfhsi->cfdev.use_stx = false;
1150 cfhsi->cfdev.use_fcs = false; 1132 cfhsi->cfdev.use_fcs = false;
1151 cfhsi->ndev = dev; 1133 cfhsi->ndev = dev;
1152} 1134 cfhsi->cfg = hsi_default_config;
1153
1154int cfhsi_probe(struct platform_device *pdev)
1155{
1156 struct cfhsi *cfhsi = NULL;
1157 struct net_device *ndev;
1158
1159 int res;
1160
1161 ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
1162 if (!ndev)
1163 return -ENODEV;
1164
1165 cfhsi = netdev_priv(ndev);
1166 cfhsi->ndev = ndev;
1167 cfhsi->pdev = pdev;
1168
1169 /* Assign the HSI device. */
1170 cfhsi->dev = pdev->dev.platform_data;
1171
1172 /* Assign the driver to this HSI device. */
1173 cfhsi->dev->drv = &cfhsi->drv;
1174
1175 /* Register network device. */
1176 res = register_netdev(ndev);
1177 if (res) {
1178 dev_err(&ndev->dev, "%s: Registration error: %d.\n",
1179 __func__, res);
1180 free_netdev(ndev);
1181 return -ENODEV;
1182 }
1183 /* Add CAIF HSI device to list. */
1184 spin_lock(&cfhsi_list_lock);
1185 list_add_tail(&cfhsi->list, &cfhsi_list);
1186 spin_unlock(&cfhsi_list_lock);
1187
1188 return res;
1189} 1135}
1190 1136
1191static int cfhsi_open(struct net_device *ndev) 1137static int cfhsi_open(struct net_device *ndev)
@@ -1201,9 +1147,6 @@ static int cfhsi_open(struct net_device *ndev)
1201 1147
1202 /* Set flow info */ 1148 /* Set flow info */
1203 cfhsi->flow_off_sent = 0; 1149 cfhsi->flow_off_sent = 0;
1204 cfhsi->q_low_mark = LOW_WATER_MARK;
1205 cfhsi->q_high_mark = HIGH_WATER_MARK;
1206
1207 1150
1208 /* 1151 /*
1209 * Allocate a TX buffer with the size of a HSI packet descriptors 1152 * Allocate a TX buffer with the size of a HSI packet descriptors
@@ -1231,20 +1174,8 @@ static int cfhsi_open(struct net_device *ndev)
1231 goto err_alloc_rx_flip; 1174 goto err_alloc_rx_flip;
1232 } 1175 }
1233 1176
1234 /* Pre-calculate inactivity timeout. */
1235 if (inactivity_timeout != -1) {
1236 cfhsi->inactivity_timeout =
1237 inactivity_timeout * HZ / 1000;
1238 if (!cfhsi->inactivity_timeout)
1239 cfhsi->inactivity_timeout = 1;
1240 else if (cfhsi->inactivity_timeout > NEXT_TIMER_MAX_DELTA)
1241 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1242 } else {
1243 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1244 }
1245
1246 /* Initialize aggregation timeout */ 1177 /* Initialize aggregation timeout */
1247 cfhsi->aggregation_timeout = aggregation_timeout; 1178 cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout;
1248 1179
1249 /* Initialize recieve vaiables. */ 1180 /* Initialize recieve vaiables. */
1250 cfhsi->rx_ptr = cfhsi->rx_buf; 1181 cfhsi->rx_ptr = cfhsi->rx_buf;
@@ -1254,10 +1185,10 @@ static int cfhsi_open(struct net_device *ndev)
1254 spin_lock_init(&cfhsi->lock); 1185 spin_lock_init(&cfhsi->lock);
1255 1186
1256 /* Set up the driver. */ 1187 /* Set up the driver. */
1257 cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb; 1188 cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb;
1258 cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb; 1189 cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb;
1259 cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb; 1190 cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb;
1260 cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb; 1191 cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb;
1261 1192
1262 /* Initialize the work queues. */ 1193 /* Initialize the work queues. */
1263 INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up); 1194 INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
@@ -1271,9 +1202,9 @@ static int cfhsi_open(struct net_device *ndev)
1271 clear_bit(CFHSI_AWAKE, &cfhsi->bits); 1202 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
1272 1203
1273 /* Create work thread. */ 1204 /* Create work thread. */
1274 cfhsi->wq = create_singlethread_workqueue(cfhsi->pdev->name); 1205 cfhsi->wq = create_singlethread_workqueue(cfhsi->ndev->name);
1275 if (!cfhsi->wq) { 1206 if (!cfhsi->wq) {
1276 dev_err(&cfhsi->ndev->dev, "%s: Failed to create work queue.\n", 1207 netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n",
1277 __func__); 1208 __func__);
1278 res = -ENODEV; 1209 res = -ENODEV;
1279 goto err_create_wq; 1210 goto err_create_wq;
@@ -1298,9 +1229,9 @@ static int cfhsi_open(struct net_device *ndev)
1298 cfhsi->aggregation_timer.function = cfhsi_aggregation_tout; 1229 cfhsi->aggregation_timer.function = cfhsi_aggregation_tout;
1299 1230
1300 /* Activate HSI interface. */ 1231 /* Activate HSI interface. */
1301 res = cfhsi->dev->cfhsi_up(cfhsi->dev); 1232 res = cfhsi->ops->cfhsi_up(cfhsi->ops);
1302 if (res) { 1233 if (res) {
1303 dev_err(&cfhsi->ndev->dev, 1234 netdev_err(cfhsi->ndev,
1304 "%s: can't activate HSI interface: %d.\n", 1235 "%s: can't activate HSI interface: %d.\n",
1305 __func__, res); 1236 __func__, res);
1306 goto err_activate; 1237 goto err_activate;
@@ -1309,14 +1240,14 @@ static int cfhsi_open(struct net_device *ndev)
1309 /* Flush FIFO */ 1240 /* Flush FIFO */
1310 res = cfhsi_flush_fifo(cfhsi); 1241 res = cfhsi_flush_fifo(cfhsi);
1311 if (res) { 1242 if (res) {
1312 dev_err(&cfhsi->ndev->dev, "%s: Can't flush FIFO: %d.\n", 1243 netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n",
1313 __func__, res); 1244 __func__, res);
1314 goto err_net_reg; 1245 goto err_net_reg;
1315 } 1246 }
1316 return res; 1247 return res;
1317 1248
1318 err_net_reg: 1249 err_net_reg:
1319 cfhsi->dev->cfhsi_down(cfhsi->dev); 1250 cfhsi->ops->cfhsi_down(cfhsi->ops);
1320 err_activate: 1251 err_activate:
1321 destroy_workqueue(cfhsi->wq); 1252 destroy_workqueue(cfhsi->wq);
1322 err_create_wq: 1253 err_create_wq:
@@ -1346,7 +1277,7 @@ static int cfhsi_close(struct net_device *ndev)
1346 del_timer_sync(&cfhsi->aggregation_timer); 1277 del_timer_sync(&cfhsi->aggregation_timer);
1347 1278
1348 /* Cancel pending RX request (if any) */ 1279 /* Cancel pending RX request (if any) */
1349 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev); 1280 cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
1350 1281
1351 /* Destroy workqueue */ 1282 /* Destroy workqueue */
1352 destroy_workqueue(cfhsi->wq); 1283 destroy_workqueue(cfhsi->wq);
@@ -1359,7 +1290,7 @@ static int cfhsi_close(struct net_device *ndev)
1359 cfhsi_abort_tx(cfhsi); 1290 cfhsi_abort_tx(cfhsi);
1360 1291
1361 /* Deactivate interface */ 1292 /* Deactivate interface */
1362 cfhsi->dev->cfhsi_down(cfhsi->dev); 1293 cfhsi->ops->cfhsi_down(cfhsi->ops);
1363 1294
1364 /* Free buffers. */ 1295 /* Free buffers. */
1365 kfree(tx_buf); 1296 kfree(tx_buf);
@@ -1368,85 +1299,184 @@ static int cfhsi_close(struct net_device *ndev)
1368 return 0; 1299 return 0;
1369} 1300}
1370 1301
1371static const struct net_device_ops cfhsi_ops = { 1302static void cfhsi_uninit(struct net_device *dev)
1303{
1304 struct cfhsi *cfhsi = netdev_priv(dev);
1305 ASSERT_RTNL();
1306 symbol_put(cfhsi_get_device);
1307 list_del(&cfhsi->list);
1308}
1309
1310static const struct net_device_ops cfhsi_netdevops = {
1311 .ndo_uninit = cfhsi_uninit,
1372 .ndo_open = cfhsi_open, 1312 .ndo_open = cfhsi_open,
1373 .ndo_stop = cfhsi_close, 1313 .ndo_stop = cfhsi_close,
1374 .ndo_start_xmit = cfhsi_xmit 1314 .ndo_start_xmit = cfhsi_xmit
1375}; 1315};
1376 1316
1377int cfhsi_remove(struct platform_device *pdev) 1317static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi)
1378{ 1318{
1379 struct list_head *list_node; 1319 int i;
1380 struct list_head *n;
1381 struct cfhsi *cfhsi = NULL;
1382 struct cfhsi_dev *dev;
1383 1320
1384 dev = (struct cfhsi_dev *)pdev->dev.platform_data; 1321 if (!data) {
1385 spin_lock(&cfhsi_list_lock); 1322 pr_debug("no params data found\n");
1386 list_for_each_safe(list_node, n, &cfhsi_list) { 1323 return;
1387 cfhsi = list_entry(list_node, struct cfhsi, list);
1388 /* Find the corresponding device. */
1389 if (cfhsi->dev == dev) {
1390 /* Remove from list. */
1391 list_del(list_node);
1392 spin_unlock(&cfhsi_list_lock);
1393 return 0;
1394 }
1395 } 1324 }
1396 spin_unlock(&cfhsi_list_lock); 1325
1397 return -ENODEV; 1326 i = __IFLA_CAIF_HSI_INACTIVITY_TOUT;
1327 /*
1328 * Inactivity timeout in millisecs. Lowest possible value is 1,
1329 * and highest possible is NEXT_TIMER_MAX_DELTA.
1330 */
1331 if (data[i]) {
1332 u32 inactivity_timeout = nla_get_u32(data[i]);
1333 /* Pre-calculate inactivity timeout. */
1334 cfhsi->cfg.inactivity_timeout = inactivity_timeout * HZ / 1000;
1335 if (cfhsi->cfg.inactivity_timeout == 0)
1336 cfhsi->cfg.inactivity_timeout = 1;
1337 else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA)
1338 cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1339 }
1340
1341 i = __IFLA_CAIF_HSI_AGGREGATION_TOUT;
1342 if (data[i])
1343 cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]);
1344
1345 i = __IFLA_CAIF_HSI_HEAD_ALIGN;
1346 if (data[i])
1347 cfhsi->cfg.head_align = nla_get_u32(data[i]);
1348
1349 i = __IFLA_CAIF_HSI_TAIL_ALIGN;
1350 if (data[i])
1351 cfhsi->cfg.tail_align = nla_get_u32(data[i]);
1352
1353 i = __IFLA_CAIF_HSI_QHIGH_WATERMARK;
1354 if (data[i])
1355 cfhsi->cfg.q_high_mark = nla_get_u32(data[i]);
1356
1357 i = __IFLA_CAIF_HSI_QLOW_WATERMARK;
1358 if (data[i])
1359 cfhsi->cfg.q_low_mark = nla_get_u32(data[i]);
1360}
1361
1362static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[],
1363 struct nlattr *data[])
1364{
1365 cfhsi_netlink_parms(data, netdev_priv(dev));
1366 netdev_state_change(dev);
1367 return 0;
1398} 1368}
1399 1369
1400struct platform_driver cfhsi_plat_drv = { 1370static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = {
1401 .probe = cfhsi_probe, 1371 [__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 },
1402 .remove = cfhsi_remove, 1372 [__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 },
1403 .driver = { 1373 [__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 },
1404 .name = "cfhsi", 1374 [__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 },
1405 .owner = THIS_MODULE, 1375 [__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 },
1406 }, 1376 [__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 },
1407}; 1377};
1408 1378
1409static void __exit cfhsi_exit_module(void) 1379static size_t caif_hsi_get_size(const struct net_device *dev)
1380{
1381 int i;
1382 size_t s = 0;
1383 for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++)
1384 s += nla_total_size(caif_hsi_policy[i].len);
1385 return s;
1386}
1387
1388static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev)
1389{
1390 struct cfhsi *cfhsi = netdev_priv(dev);
1391
1392 if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT,
1393 cfhsi->cfg.inactivity_timeout) ||
1394 nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT,
1395 cfhsi->cfg.aggregation_timeout) ||
1396 nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN,
1397 cfhsi->cfg.head_align) ||
1398 nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN,
1399 cfhsi->cfg.tail_align) ||
1400 nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK,
1401 cfhsi->cfg.q_high_mark) ||
1402 nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK,
1403 cfhsi->cfg.q_low_mark))
1404 return -EMSGSIZE;
1405
1406 return 0;
1407}
1408
1409static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
1410 struct nlattr *tb[], struct nlattr *data[])
1410{ 1411{
1411 struct list_head *list_node;
1412 struct list_head *n;
1413 struct cfhsi *cfhsi = NULL; 1412 struct cfhsi *cfhsi = NULL;
1413 struct cfhsi_ops *(*get_ops)(void);
1414 1414
1415 spin_lock(&cfhsi_list_lock); 1415 ASSERT_RTNL();
1416 list_for_each_safe(list_node, n, &cfhsi_list) {
1417 cfhsi = list_entry(list_node, struct cfhsi, list);
1418 1416
1419 /* Remove from list. */ 1417 cfhsi = netdev_priv(dev);
1420 list_del(list_node); 1418 cfhsi_netlink_parms(data, cfhsi);
1421 spin_unlock(&cfhsi_list_lock); 1419 dev_net_set(cfhsi->ndev, src_net);
1420
1421 get_ops = symbol_get(cfhsi_get_ops);
1422 if (!get_ops) {
1423 pr_err("%s: failed to get the cfhsi_ops\n", __func__);
1424 return -ENODEV;
1425 }
1422 1426
1423 unregister_netdevice(cfhsi->ndev); 1427 /* Assign the HSI device. */
1428 cfhsi->ops = (*get_ops)();
1429 if (!cfhsi->ops) {
1430 pr_err("%s: failed to get the cfhsi_ops\n", __func__);
1431 goto err;
1432 }
1424 1433
1425 spin_lock(&cfhsi_list_lock); 1434 /* Assign the driver to this HSI device. */
1435 cfhsi->ops->cb_ops = &cfhsi->cb_ops;
1436 if (register_netdevice(dev)) {
1437 pr_warn("%s: caif_hsi device registration failed\n", __func__);
1438 goto err;
1426 } 1439 }
1427 spin_unlock(&cfhsi_list_lock); 1440 /* Add CAIF HSI device to list. */
1441 list_add_tail(&cfhsi->list, &cfhsi_list);
1428 1442
1429 /* Unregister platform driver. */ 1443 return 0;
1430 platform_driver_unregister(&cfhsi_plat_drv); 1444err:
1445 symbol_put(cfhsi_get_ops);
1446 return -ENODEV;
1431} 1447}
1432 1448
1433static int __init cfhsi_init_module(void) 1449static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
1450 .kind = "cfhsi",
1451 .priv_size = sizeof(struct cfhsi),
1452 .setup = cfhsi_setup,
1453 .maxtype = __IFLA_CAIF_HSI_MAX,
1454 .policy = caif_hsi_policy,
1455 .newlink = caif_hsi_newlink,
1456 .changelink = caif_hsi_changelink,
1457 .get_size = caif_hsi_get_size,
1458 .fill_info = caif_hsi_fill_info,
1459};
1460
1461static void __exit cfhsi_exit_module(void)
1434{ 1462{
1435 int result; 1463 struct list_head *list_node;
1464 struct list_head *n;
1465 struct cfhsi *cfhsi;
1436 1466
1437 /* Initialize spin lock. */ 1467 rtnl_link_unregister(&caif_hsi_link_ops);
1438 spin_lock_init(&cfhsi_list_lock);
1439 1468
1440 /* Register platform driver. */ 1469 rtnl_lock();
1441 result = platform_driver_register(&cfhsi_plat_drv); 1470 list_for_each_safe(list_node, n, &cfhsi_list) {
1442 if (result) { 1471 cfhsi = list_entry(list_node, struct cfhsi, list);
1443 printk(KERN_ERR "Could not register platform HSI driver: %d.\n", 1472 unregister_netdev(cfhsi->ndev);
1444 result);
1445 goto err_dev_register;
1446 } 1473 }
1474 rtnl_unlock();
1475}
1447 1476
1448 err_dev_register: 1477static int __init cfhsi_init_module(void)
1449 return result; 1478{
1479 return rtnl_link_register(&caif_hsi_link_ops);
1450} 1480}
1451 1481
1452module_init(cfhsi_init_module); 1482module_init(cfhsi_init_module);
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 3f88473423e..ea3143895e6 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -597,7 +597,7 @@ static int __devinit bfin_can_probe(struct platform_device *pdev)
597 dev_info(&pdev->dev, 597 dev_info(&pdev->dev,
598 "%s device registered" 598 "%s device registered"
599 "(&reg_base=%p, rx_irq=%d, tx_irq=%d, err_irq=%d, sclk=%d)\n", 599 "(&reg_base=%p, rx_irq=%d, tx_irq=%d, err_irq=%d, sclk=%d)\n",
600 DRV_NAME, (void *)priv->membase, priv->rx_irq, 600 DRV_NAME, priv->membase, priv->rx_irq,
601 priv->tx_irq, priv->err_irq, priv->can.clock.freq); 601 priv->tx_irq, priv->err_irq, priv->can.clock.freq);
602 return 0; 602 return 0;
603 603
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
index ffb9773d102..3b83bafcd94 100644
--- a/drivers/net/can/c_can/Kconfig
+++ b/drivers/net/can/c_can/Kconfig
@@ -1,15 +1,23 @@
1menuconfig CAN_C_CAN 1menuconfig CAN_C_CAN
2 tristate "Bosch C_CAN devices" 2 tristate "Bosch C_CAN/D_CAN devices"
3 depends on CAN_DEV && HAS_IOMEM 3 depends on CAN_DEV && HAS_IOMEM
4 4
5if CAN_C_CAN 5if CAN_C_CAN
6 6
7config CAN_C_CAN_PLATFORM 7config CAN_C_CAN_PLATFORM
8 tristate "Generic Platform Bus based C_CAN driver" 8 tristate "Generic Platform Bus based C_CAN/D_CAN driver"
9 ---help--- 9 ---help---
10 This driver adds support for the C_CAN chips connected to 10 This driver adds support for the C_CAN/D_CAN chips connected
11 the "platform bus" (Linux abstraction for directly to the 11 to the "platform bus" (Linux abstraction for directly to the
12 processor attached devices) which can be found on various 12 processor attached devices) which can be found on various
13 boards from ST Microelectronics (http://www.st.com) 13 boards from ST Microelectronics (http://www.st.com) like the
14 like the SPEAr1310 and SPEAr320 evaluation boards. 14 SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com)
15 boards like am335x, dm814x, dm813x and dm811x.
16
17config CAN_C_CAN_PCI
18 tristate "Generic PCI Bus based C_CAN/D_CAN driver"
19 depends on PCI
20 ---help---
21 This driver adds support for the C_CAN/D_CAN chips connected
22 to the PCI bus.
15endif 23endif
diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile
index 9273f6d5c4b..ad1cc842170 100644
--- a/drivers/net/can/c_can/Makefile
+++ b/drivers/net/can/c_can/Makefile
@@ -4,5 +4,6 @@
4 4
5obj-$(CONFIG_CAN_C_CAN) += c_can.o 5obj-$(CONFIG_CAN_C_CAN) += c_can.o
6obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o 6obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o
7obj-$(CONFIG_CAN_C_CAN_PCI) += c_can_pci.o
7 8
8ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 9ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 86cd532c78f..eea660800a0 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -41,6 +41,10 @@
41 41
42#include "c_can.h" 42#include "c_can.h"
43 43
44/* Number of interface registers */
45#define IF_ENUM_REG_LEN 11
46#define C_CAN_IFACE(reg, iface) (C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN)
47
44/* control register */ 48/* control register */
45#define CONTROL_TEST BIT(7) 49#define CONTROL_TEST BIT(7)
46#define CONTROL_CCE BIT(6) 50#define CONTROL_CCE BIT(6)
@@ -209,10 +213,10 @@ static inline int get_tx_echo_msg_obj(const struct c_can_priv *priv)
209 C_CAN_MSG_OBJ_TX_FIRST; 213 C_CAN_MSG_OBJ_TX_FIRST;
210} 214}
211 215
212static u32 c_can_read_reg32(struct c_can_priv *priv, void *reg) 216static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index)
213{ 217{
214 u32 val = priv->read_reg(priv, reg); 218 u32 val = priv->read_reg(priv, index);
215 val |= ((u32) priv->read_reg(priv, reg + 2)) << 16; 219 val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
216 return val; 220 return val;
217} 221}
218 222
@@ -220,14 +224,14 @@ static void c_can_enable_all_interrupts(struct c_can_priv *priv,
220 int enable) 224 int enable)
221{ 225{
222 unsigned int cntrl_save = priv->read_reg(priv, 226 unsigned int cntrl_save = priv->read_reg(priv,
223 &priv->regs->control); 227 C_CAN_CTRL_REG);
224 228
225 if (enable) 229 if (enable)
226 cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE); 230 cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE);
227 else 231 else
228 cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE); 232 cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
229 233
230 priv->write_reg(priv, &priv->regs->control, cntrl_save); 234 priv->write_reg(priv, C_CAN_CTRL_REG, cntrl_save);
231} 235}
232 236
233static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface) 237static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
@@ -235,7 +239,7 @@ static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
235 int count = MIN_TIMEOUT_VALUE; 239 int count = MIN_TIMEOUT_VALUE;
236 240
237 while (count && priv->read_reg(priv, 241 while (count && priv->read_reg(priv,
238 &priv->regs->ifregs[iface].com_req) & 242 C_CAN_IFACE(COMREQ_REG, iface)) &
239 IF_COMR_BUSY) { 243 IF_COMR_BUSY) {
240 count--; 244 count--;
241 udelay(1); 245 udelay(1);
@@ -258,9 +262,9 @@ static inline void c_can_object_get(struct net_device *dev,
258 * register and message RAM must be complete in 6 CAN-CLK 262 * register and message RAM must be complete in 6 CAN-CLK
259 * period. 263 * period.
260 */ 264 */
261 priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask, 265 priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
262 IFX_WRITE_LOW_16BIT(mask)); 266 IFX_WRITE_LOW_16BIT(mask));
263 priv->write_reg(priv, &priv->regs->ifregs[iface].com_req, 267 priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
264 IFX_WRITE_LOW_16BIT(objno)); 268 IFX_WRITE_LOW_16BIT(objno));
265 269
266 if (c_can_msg_obj_is_busy(priv, iface)) 270 if (c_can_msg_obj_is_busy(priv, iface))
@@ -278,9 +282,9 @@ static inline void c_can_object_put(struct net_device *dev,
278 * register and message RAM must be complete in 6 CAN-CLK 282 * register and message RAM must be complete in 6 CAN-CLK
279 * period. 283 * period.
280 */ 284 */
281 priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask, 285 priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
282 (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask))); 286 (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
283 priv->write_reg(priv, &priv->regs->ifregs[iface].com_req, 287 priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
284 IFX_WRITE_LOW_16BIT(objno)); 288 IFX_WRITE_LOW_16BIT(objno));
285 289
286 if (c_can_msg_obj_is_busy(priv, iface)) 290 if (c_can_msg_obj_is_busy(priv, iface))
@@ -306,18 +310,18 @@ static void c_can_write_msg_object(struct net_device *dev,
306 310
307 flags |= IF_ARB_MSGVAL; 311 flags |= IF_ARB_MSGVAL;
308 312
309 priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, 313 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
310 IFX_WRITE_LOW_16BIT(id)); 314 IFX_WRITE_LOW_16BIT(id));
311 priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, flags | 315 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), flags |
312 IFX_WRITE_HIGH_16BIT(id)); 316 IFX_WRITE_HIGH_16BIT(id));
313 317
314 for (i = 0; i < frame->can_dlc; i += 2) { 318 for (i = 0; i < frame->can_dlc; i += 2) {
315 priv->write_reg(priv, &priv->regs->ifregs[iface].data[i / 2], 319 priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
316 frame->data[i] | (frame->data[i + 1] << 8)); 320 frame->data[i] | (frame->data[i + 1] << 8));
317 } 321 }
318 322
319 /* enable interrupt for this message object */ 323 /* enable interrupt for this message object */
320 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 324 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
321 IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB | 325 IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
322 frame->can_dlc); 326 frame->can_dlc);
323 c_can_object_put(dev, iface, objno, IF_COMM_ALL); 327 c_can_object_put(dev, iface, objno, IF_COMM_ALL);
@@ -329,7 +333,7 @@ static inline void c_can_mark_rx_msg_obj(struct net_device *dev,
329{ 333{
330 struct c_can_priv *priv = netdev_priv(dev); 334 struct c_can_priv *priv = netdev_priv(dev);
331 335
332 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 336 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
333 ctrl_mask & ~(IF_MCONT_MSGLST | IF_MCONT_INTPND)); 337 ctrl_mask & ~(IF_MCONT_MSGLST | IF_MCONT_INTPND));
334 c_can_object_put(dev, iface, obj, IF_COMM_CONTROL); 338 c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
335 339
@@ -343,7 +347,7 @@ static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
343 struct c_can_priv *priv = netdev_priv(dev); 347 struct c_can_priv *priv = netdev_priv(dev);
344 348
345 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) { 349 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) {
346 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 350 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
347 ctrl_mask & ~(IF_MCONT_MSGLST | 351 ctrl_mask & ~(IF_MCONT_MSGLST |
348 IF_MCONT_INTPND | IF_MCONT_NEWDAT)); 352 IF_MCONT_INTPND | IF_MCONT_NEWDAT));
349 c_can_object_put(dev, iface, i, IF_COMM_CONTROL); 353 c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
@@ -356,7 +360,7 @@ static inline void c_can_activate_rx_msg_obj(struct net_device *dev,
356{ 360{
357 struct c_can_priv *priv = netdev_priv(dev); 361 struct c_can_priv *priv = netdev_priv(dev);
358 362
359 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 363 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
360 ctrl_mask & ~(IF_MCONT_MSGLST | 364 ctrl_mask & ~(IF_MCONT_MSGLST |
361 IF_MCONT_INTPND | IF_MCONT_NEWDAT)); 365 IF_MCONT_INTPND | IF_MCONT_NEWDAT));
362 c_can_object_put(dev, iface, obj, IF_COMM_CONTROL); 366 c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
@@ -374,7 +378,7 @@ static void c_can_handle_lost_msg_obj(struct net_device *dev,
374 378
375 c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST); 379 c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
376 380
377 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 381 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
378 IF_MCONT_CLR_MSGLST); 382 IF_MCONT_CLR_MSGLST);
379 383
380 c_can_object_put(dev, 0, objno, IF_COMM_CONTROL); 384 c_can_object_put(dev, 0, objno, IF_COMM_CONTROL);
@@ -410,8 +414,8 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
410 414
411 frame->can_dlc = get_can_dlc(ctrl & 0x0F); 415 frame->can_dlc = get_can_dlc(ctrl & 0x0F);
412 416
413 flags = priv->read_reg(priv, &priv->regs->ifregs[iface].arb2); 417 flags = priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface));
414 val = priv->read_reg(priv, &priv->regs->ifregs[iface].arb1) | 418 val = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface)) |
415 (flags << 16); 419 (flags << 16);
416 420
417 if (flags & IF_ARB_MSGXTD) 421 if (flags & IF_ARB_MSGXTD)
@@ -424,7 +428,7 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
424 else { 428 else {
425 for (i = 0; i < frame->can_dlc; i += 2) { 429 for (i = 0; i < frame->can_dlc; i += 2) {
426 data = priv->read_reg(priv, 430 data = priv->read_reg(priv,
427 &priv->regs->ifregs[iface].data[i / 2]); 431 C_CAN_IFACE(DATA1_REG, iface) + i / 2);
428 frame->data[i] = data; 432 frame->data[i] = data;
429 frame->data[i + 1] = data >> 8; 433 frame->data[i + 1] = data >> 8;
430 } 434 }
@@ -444,40 +448,40 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
444{ 448{
445 struct c_can_priv *priv = netdev_priv(dev); 449 struct c_can_priv *priv = netdev_priv(dev);
446 450
447 priv->write_reg(priv, &priv->regs->ifregs[iface].mask1, 451 priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
448 IFX_WRITE_LOW_16BIT(mask)); 452 IFX_WRITE_LOW_16BIT(mask));
449 priv->write_reg(priv, &priv->regs->ifregs[iface].mask2, 453 priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
450 IFX_WRITE_HIGH_16BIT(mask)); 454 IFX_WRITE_HIGH_16BIT(mask));
451 455
452 priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, 456 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
453 IFX_WRITE_LOW_16BIT(id)); 457 IFX_WRITE_LOW_16BIT(id));
454 priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, 458 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface),
455 (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id))); 459 (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
456 460
457 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, mcont); 461 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
458 c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST); 462 c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
459 463
460 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno, 464 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
461 c_can_read_reg32(priv, &priv->regs->msgval1)); 465 c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
462} 466}
463 467
464static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno) 468static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
465{ 469{
466 struct c_can_priv *priv = netdev_priv(dev); 470 struct c_can_priv *priv = netdev_priv(dev);
467 471
468 priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, 0); 472 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
469 priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, 0); 473 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
470 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 0); 474 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
471 475
472 c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL); 476 c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
473 477
474 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno, 478 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
475 c_can_read_reg32(priv, &priv->regs->msgval1)); 479 c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
476} 480}
477 481
478static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno) 482static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
479{ 483{
480 int val = c_can_read_reg32(priv, &priv->regs->txrqst1); 484 int val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
481 485
482 /* 486 /*
483 * as transmission request register's bit n-1 corresponds to 487 * as transmission request register's bit n-1 corresponds to
@@ -540,12 +544,12 @@ static int c_can_set_bittiming(struct net_device *dev)
540 netdev_info(dev, 544 netdev_info(dev,
541 "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe); 545 "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
542 546
543 ctrl_save = priv->read_reg(priv, &priv->regs->control); 547 ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG);
544 priv->write_reg(priv, &priv->regs->control, 548 priv->write_reg(priv, C_CAN_CTRL_REG,
545 ctrl_save | CONTROL_CCE | CONTROL_INIT); 549 ctrl_save | CONTROL_CCE | CONTROL_INIT);
546 priv->write_reg(priv, &priv->regs->btr, reg_btr); 550 priv->write_reg(priv, C_CAN_BTR_REG, reg_btr);
547 priv->write_reg(priv, &priv->regs->brp_ext, reg_brpe); 551 priv->write_reg(priv, C_CAN_BRPEXT_REG, reg_brpe);
548 priv->write_reg(priv, &priv->regs->control, ctrl_save); 552 priv->write_reg(priv, C_CAN_CTRL_REG, ctrl_save);
549 553
550 return 0; 554 return 0;
551} 555}
@@ -587,36 +591,36 @@ static void c_can_chip_config(struct net_device *dev)
587 struct c_can_priv *priv = netdev_priv(dev); 591 struct c_can_priv *priv = netdev_priv(dev);
588 592
589 /* enable automatic retransmission */ 593 /* enable automatic retransmission */
590 priv->write_reg(priv, &priv->regs->control, 594 priv->write_reg(priv, C_CAN_CTRL_REG,
591 CONTROL_ENABLE_AR); 595 CONTROL_ENABLE_AR);
592 596
593 if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) && 597 if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
594 (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) { 598 (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
595 /* loopback + silent mode : useful for hot self-test */ 599 /* loopback + silent mode : useful for hot self-test */
596 priv->write_reg(priv, &priv->regs->control, CONTROL_EIE | 600 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
597 CONTROL_SIE | CONTROL_IE | CONTROL_TEST); 601 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
598 priv->write_reg(priv, &priv->regs->test, 602 priv->write_reg(priv, C_CAN_TEST_REG,
599 TEST_LBACK | TEST_SILENT); 603 TEST_LBACK | TEST_SILENT);
600 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { 604 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
601 /* loopback mode : useful for self-test function */ 605 /* loopback mode : useful for self-test function */
602 priv->write_reg(priv, &priv->regs->control, CONTROL_EIE | 606 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
603 CONTROL_SIE | CONTROL_IE | CONTROL_TEST); 607 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
604 priv->write_reg(priv, &priv->regs->test, TEST_LBACK); 608 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK);
605 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) { 609 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
606 /* silent mode : bus-monitoring mode */ 610 /* silent mode : bus-monitoring mode */
607 priv->write_reg(priv, &priv->regs->control, CONTROL_EIE | 611 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
608 CONTROL_SIE | CONTROL_IE | CONTROL_TEST); 612 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
609 priv->write_reg(priv, &priv->regs->test, TEST_SILENT); 613 priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT);
610 } else 614 } else
611 /* normal mode*/ 615 /* normal mode*/
612 priv->write_reg(priv, &priv->regs->control, 616 priv->write_reg(priv, C_CAN_CTRL_REG,
613 CONTROL_EIE | CONTROL_SIE | CONTROL_IE); 617 CONTROL_EIE | CONTROL_SIE | CONTROL_IE);
614 618
615 /* configure message objects */ 619 /* configure message objects */
616 c_can_configure_msg_objects(dev); 620 c_can_configure_msg_objects(dev);
617 621
618 /* set a `lec` value so that we can check for updates later */ 622 /* set a `lec` value so that we can check for updates later */
619 priv->write_reg(priv, &priv->regs->status, LEC_UNUSED); 623 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
620 624
621 /* set bittiming params */ 625 /* set bittiming params */
622 c_can_set_bittiming(dev); 626 c_can_set_bittiming(dev);
@@ -669,7 +673,7 @@ static int c_can_get_berr_counter(const struct net_device *dev,
669 unsigned int reg_err_counter; 673 unsigned int reg_err_counter;
670 struct c_can_priv *priv = netdev_priv(dev); 674 struct c_can_priv *priv = netdev_priv(dev);
671 675
672 reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt); 676 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
673 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >> 677 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
674 ERR_CNT_REC_SHIFT; 678 ERR_CNT_REC_SHIFT;
675 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK; 679 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
@@ -697,12 +701,12 @@ static void c_can_do_tx(struct net_device *dev)
697 701
698 for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { 702 for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
699 msg_obj_no = get_tx_echo_msg_obj(priv); 703 msg_obj_no = get_tx_echo_msg_obj(priv);
700 val = c_can_read_reg32(priv, &priv->regs->txrqst1); 704 val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
701 if (!(val & (1 << (msg_obj_no - 1)))) { 705 if (!(val & (1 << (msg_obj_no - 1)))) {
702 can_get_echo_skb(dev, 706 can_get_echo_skb(dev,
703 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); 707 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
704 stats->tx_bytes += priv->read_reg(priv, 708 stats->tx_bytes += priv->read_reg(priv,
705 &priv->regs->ifregs[0].msg_cntrl) 709 C_CAN_IFACE(MSGCTRL_REG, 0))
706 & IF_MCONT_DLC_MASK; 710 & IF_MCONT_DLC_MASK;
707 stats->tx_packets++; 711 stats->tx_packets++;
708 c_can_inval_msg_object(dev, 0, msg_obj_no); 712 c_can_inval_msg_object(dev, 0, msg_obj_no);
@@ -744,11 +748,11 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
744 u32 num_rx_pkts = 0; 748 u32 num_rx_pkts = 0;
745 unsigned int msg_obj, msg_ctrl_save; 749 unsigned int msg_obj, msg_ctrl_save;
746 struct c_can_priv *priv = netdev_priv(dev); 750 struct c_can_priv *priv = netdev_priv(dev);
747 u32 val = c_can_read_reg32(priv, &priv->regs->intpnd1); 751 u32 val = c_can_read_reg32(priv, C_CAN_INTPND1_REG);
748 752
749 for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST; 753 for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST;
750 msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0; 754 msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0;
751 val = c_can_read_reg32(priv, &priv->regs->intpnd1), 755 val = c_can_read_reg32(priv, C_CAN_INTPND1_REG),
752 msg_obj++) { 756 msg_obj++) {
753 /* 757 /*
754 * as interrupt pending register's bit n-1 corresponds to 758 * as interrupt pending register's bit n-1 corresponds to
@@ -758,7 +762,7 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
758 c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL & 762 c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL &
759 ~IF_COMM_TXRQST); 763 ~IF_COMM_TXRQST);
760 msg_ctrl_save = priv->read_reg(priv, 764 msg_ctrl_save = priv->read_reg(priv,
761 &priv->regs->ifregs[0].msg_cntrl); 765 C_CAN_IFACE(MSGCTRL_REG, 0));
762 766
763 if (msg_ctrl_save & IF_MCONT_EOB) 767 if (msg_ctrl_save & IF_MCONT_EOB)
764 return num_rx_pkts; 768 return num_rx_pkts;
@@ -819,7 +823,7 @@ static int c_can_handle_state_change(struct net_device *dev,
819 return 0; 823 return 0;
820 824
821 c_can_get_berr_counter(dev, &bec); 825 c_can_get_berr_counter(dev, &bec);
822 reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt); 826 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
823 rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >> 827 rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
824 ERR_CNT_RP_SHIFT; 828 ERR_CNT_RP_SHIFT;
825 829
@@ -935,7 +939,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
935 } 939 }
936 940
937 /* set a `lec` value so that we can check for updates later */ 941 /* set a `lec` value so that we can check for updates later */
938 priv->write_reg(priv, &priv->regs->status, LEC_UNUSED); 942 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
939 943
940 netif_receive_skb(skb); 944 netif_receive_skb(skb);
941 stats->rx_packets++; 945 stats->rx_packets++;
@@ -959,15 +963,15 @@ static int c_can_poll(struct napi_struct *napi, int quota)
959 /* status events have the highest priority */ 963 /* status events have the highest priority */
960 if (irqstatus == STATUS_INTERRUPT) { 964 if (irqstatus == STATUS_INTERRUPT) {
961 priv->current_status = priv->read_reg(priv, 965 priv->current_status = priv->read_reg(priv,
962 &priv->regs->status); 966 C_CAN_STS_REG);
963 967
964 /* handle Tx/Rx events */ 968 /* handle Tx/Rx events */
965 if (priv->current_status & STATUS_TXOK) 969 if (priv->current_status & STATUS_TXOK)
966 priv->write_reg(priv, &priv->regs->status, 970 priv->write_reg(priv, C_CAN_STS_REG,
967 priv->current_status & ~STATUS_TXOK); 971 priv->current_status & ~STATUS_TXOK);
968 972
969 if (priv->current_status & STATUS_RXOK) 973 if (priv->current_status & STATUS_RXOK)
970 priv->write_reg(priv, &priv->regs->status, 974 priv->write_reg(priv, C_CAN_STS_REG,
971 priv->current_status & ~STATUS_RXOK); 975 priv->current_status & ~STATUS_RXOK);
972 976
973 /* handle state changes */ 977 /* handle state changes */
@@ -1033,7 +1037,7 @@ static irqreturn_t c_can_isr(int irq, void *dev_id)
1033 struct net_device *dev = (struct net_device *)dev_id; 1037 struct net_device *dev = (struct net_device *)dev_id;
1034 struct c_can_priv *priv = netdev_priv(dev); 1038 struct c_can_priv *priv = netdev_priv(dev);
1035 1039
1036 priv->irqstatus = priv->read_reg(priv, &priv->regs->interrupt); 1040 priv->irqstatus = priv->read_reg(priv, C_CAN_INT_REG);
1037 if (!priv->irqstatus) 1041 if (!priv->irqstatus)
1038 return IRQ_NONE; 1042 return IRQ_NONE;
1039 1043
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 5f32d34af50..01a7049ab99 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -22,43 +22,129 @@
22#ifndef C_CAN_H 22#ifndef C_CAN_H
23#define C_CAN_H 23#define C_CAN_H
24 24
25/* c_can IF registers */ 25enum reg {
26struct c_can_if_regs { 26 C_CAN_CTRL_REG = 0,
27 u16 com_req; 27 C_CAN_STS_REG,
28 u16 com_mask; 28 C_CAN_ERR_CNT_REG,
29 u16 mask1; 29 C_CAN_BTR_REG,
30 u16 mask2; 30 C_CAN_INT_REG,
31 u16 arb1; 31 C_CAN_TEST_REG,
32 u16 arb2; 32 C_CAN_BRPEXT_REG,
33 u16 msg_cntrl; 33 C_CAN_IF1_COMREQ_REG,
34 u16 data[4]; 34 C_CAN_IF1_COMMSK_REG,
35 u16 _reserved[13]; 35 C_CAN_IF1_MASK1_REG,
36 C_CAN_IF1_MASK2_REG,
37 C_CAN_IF1_ARB1_REG,
38 C_CAN_IF1_ARB2_REG,
39 C_CAN_IF1_MSGCTRL_REG,
40 C_CAN_IF1_DATA1_REG,
41 C_CAN_IF1_DATA2_REG,
42 C_CAN_IF1_DATA3_REG,
43 C_CAN_IF1_DATA4_REG,
44 C_CAN_IF2_COMREQ_REG,
45 C_CAN_IF2_COMMSK_REG,
46 C_CAN_IF2_MASK1_REG,
47 C_CAN_IF2_MASK2_REG,
48 C_CAN_IF2_ARB1_REG,
49 C_CAN_IF2_ARB2_REG,
50 C_CAN_IF2_MSGCTRL_REG,
51 C_CAN_IF2_DATA1_REG,
52 C_CAN_IF2_DATA2_REG,
53 C_CAN_IF2_DATA3_REG,
54 C_CAN_IF2_DATA4_REG,
55 C_CAN_TXRQST1_REG,
56 C_CAN_TXRQST2_REG,
57 C_CAN_NEWDAT1_REG,
58 C_CAN_NEWDAT2_REG,
59 C_CAN_INTPND1_REG,
60 C_CAN_INTPND2_REG,
61 C_CAN_MSGVAL1_REG,
62 C_CAN_MSGVAL2_REG,
36}; 63};
37 64
38/* c_can hardware registers */ 65static const u16 reg_map_c_can[] = {
39struct c_can_regs { 66 [C_CAN_CTRL_REG] = 0x00,
40 u16 control; 67 [C_CAN_STS_REG] = 0x02,
41 u16 status; 68 [C_CAN_ERR_CNT_REG] = 0x04,
42 u16 err_cnt; 69 [C_CAN_BTR_REG] = 0x06,
43 u16 btr; 70 [C_CAN_INT_REG] = 0x08,
44 u16 interrupt; 71 [C_CAN_TEST_REG] = 0x0A,
45 u16 test; 72 [C_CAN_BRPEXT_REG] = 0x0C,
46 u16 brp_ext; 73 [C_CAN_IF1_COMREQ_REG] = 0x10,
47 u16 _reserved1; 74 [C_CAN_IF1_COMMSK_REG] = 0x12,
48 struct c_can_if_regs ifregs[2]; /* [0] = IF1 and [1] = IF2 */ 75 [C_CAN_IF1_MASK1_REG] = 0x14,
49 u16 _reserved2[8]; 76 [C_CAN_IF1_MASK2_REG] = 0x16,
50 u16 txrqst1; 77 [C_CAN_IF1_ARB1_REG] = 0x18,
51 u16 txrqst2; 78 [C_CAN_IF1_ARB2_REG] = 0x1A,
52 u16 _reserved3[6]; 79 [C_CAN_IF1_MSGCTRL_REG] = 0x1C,
53 u16 newdat1; 80 [C_CAN_IF1_DATA1_REG] = 0x1E,
54 u16 newdat2; 81 [C_CAN_IF1_DATA2_REG] = 0x20,
55 u16 _reserved4[6]; 82 [C_CAN_IF1_DATA3_REG] = 0x22,
56 u16 intpnd1; 83 [C_CAN_IF1_DATA4_REG] = 0x24,
57 u16 intpnd2; 84 [C_CAN_IF2_COMREQ_REG] = 0x40,
58 u16 _reserved5[6]; 85 [C_CAN_IF2_COMMSK_REG] = 0x42,
59 u16 msgval1; 86 [C_CAN_IF2_MASK1_REG] = 0x44,
60 u16 msgval2; 87 [C_CAN_IF2_MASK2_REG] = 0x46,
61 u16 _reserved6[6]; 88 [C_CAN_IF2_ARB1_REG] = 0x48,
89 [C_CAN_IF2_ARB2_REG] = 0x4A,
90 [C_CAN_IF2_MSGCTRL_REG] = 0x4C,
91 [C_CAN_IF2_DATA1_REG] = 0x4E,
92 [C_CAN_IF2_DATA2_REG] = 0x50,
93 [C_CAN_IF2_DATA3_REG] = 0x52,
94 [C_CAN_IF2_DATA4_REG] = 0x54,
95 [C_CAN_TXRQST1_REG] = 0x80,
96 [C_CAN_TXRQST2_REG] = 0x82,
97 [C_CAN_NEWDAT1_REG] = 0x90,
98 [C_CAN_NEWDAT2_REG] = 0x92,
99 [C_CAN_INTPND1_REG] = 0xA0,
100 [C_CAN_INTPND2_REG] = 0xA2,
101 [C_CAN_MSGVAL1_REG] = 0xB0,
102 [C_CAN_MSGVAL2_REG] = 0xB2,
103};
104
105static const u16 reg_map_d_can[] = {
106 [C_CAN_CTRL_REG] = 0x00,
107 [C_CAN_STS_REG] = 0x04,
108 [C_CAN_ERR_CNT_REG] = 0x08,
109 [C_CAN_BTR_REG] = 0x0C,
110 [C_CAN_BRPEXT_REG] = 0x0E,
111 [C_CAN_INT_REG] = 0x10,
112 [C_CAN_TEST_REG] = 0x14,
113 [C_CAN_TXRQST1_REG] = 0x88,
114 [C_CAN_TXRQST2_REG] = 0x8A,
115 [C_CAN_NEWDAT1_REG] = 0x9C,
116 [C_CAN_NEWDAT2_REG] = 0x9E,
117 [C_CAN_INTPND1_REG] = 0xB0,
118 [C_CAN_INTPND2_REG] = 0xB2,
119 [C_CAN_MSGVAL1_REG] = 0xC4,
120 [C_CAN_MSGVAL2_REG] = 0xC6,
121 [C_CAN_IF1_COMREQ_REG] = 0x100,
122 [C_CAN_IF1_COMMSK_REG] = 0x102,
123 [C_CAN_IF1_MASK1_REG] = 0x104,
124 [C_CAN_IF1_MASK2_REG] = 0x106,
125 [C_CAN_IF1_ARB1_REG] = 0x108,
126 [C_CAN_IF1_ARB2_REG] = 0x10A,
127 [C_CAN_IF1_MSGCTRL_REG] = 0x10C,
128 [C_CAN_IF1_DATA1_REG] = 0x110,
129 [C_CAN_IF1_DATA2_REG] = 0x112,
130 [C_CAN_IF1_DATA3_REG] = 0x114,
131 [C_CAN_IF1_DATA4_REG] = 0x116,
132 [C_CAN_IF2_COMREQ_REG] = 0x120,
133 [C_CAN_IF2_COMMSK_REG] = 0x122,
134 [C_CAN_IF2_MASK1_REG] = 0x124,
135 [C_CAN_IF2_MASK2_REG] = 0x126,
136 [C_CAN_IF2_ARB1_REG] = 0x128,
137 [C_CAN_IF2_ARB2_REG] = 0x12A,
138 [C_CAN_IF2_MSGCTRL_REG] = 0x12C,
139 [C_CAN_IF2_DATA1_REG] = 0x130,
140 [C_CAN_IF2_DATA2_REG] = 0x132,
141 [C_CAN_IF2_DATA3_REG] = 0x134,
142 [C_CAN_IF2_DATA4_REG] = 0x136,
143};
144
145enum c_can_dev_id {
146 C_CAN_DEVTYPE,
147 D_CAN_DEVTYPE,
62}; 148};
63 149
64/* c_can private data structure */ 150/* c_can private data structure */
@@ -69,9 +155,10 @@ struct c_can_priv {
69 int tx_object; 155 int tx_object;
70 int current_status; 156 int current_status;
71 int last_status; 157 int last_status;
72 u16 (*read_reg) (struct c_can_priv *priv, void *reg); 158 u16 (*read_reg) (struct c_can_priv *priv, enum reg index);
73 void (*write_reg) (struct c_can_priv *priv, void *reg, u16 val); 159 void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val);
74 struct c_can_regs __iomem *regs; 160 void __iomem *base;
161 const u16 *regs;
75 unsigned long irq_flags; /* for request_irq() */ 162 unsigned long irq_flags; /* for request_irq() */
76 unsigned int tx_next; 163 unsigned int tx_next;
77 unsigned int tx_echo; 164 unsigned int tx_echo;
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
new file mode 100644
index 00000000000..1011146ea51
--- /dev/null
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -0,0 +1,221 @@
1/*
2 * PCI bus driver for Bosch C_CAN/D_CAN controller
3 *
4 * Copyright (C) 2012 Federico Vaga <federico.vaga@gmail.com>
5 *
6 * Borrowed from c_can_platform.c
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/netdevice.h>
16#include <linux/pci.h>
17
18#include <linux/can/dev.h>
19
20#include "c_can.h"
21
22enum c_can_pci_reg_align {
23 C_CAN_REG_ALIGN_16,
24 C_CAN_REG_ALIGN_32,
25};
26
27struct c_can_pci_data {
28 /* Specify if is C_CAN or D_CAN */
29 enum c_can_dev_id type;
30 /* Set the register alignment in the memory */
31 enum c_can_pci_reg_align reg_align;
32 /* Set the frequency */
33 unsigned int freq;
34};
35
36/*
37 * 16-bit c_can registers can be arranged differently in the memory
38 * architecture of different implementations. For example: 16-bit
39 * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
40 * Handle the same by providing a common read/write interface.
41 */
42static u16 c_can_pci_read_reg_aligned_to_16bit(struct c_can_priv *priv,
43 enum reg index)
44{
45 return readw(priv->base + priv->regs[index]);
46}
47
48static void c_can_pci_write_reg_aligned_to_16bit(struct c_can_priv *priv,
49 enum reg index, u16 val)
50{
51 writew(val, priv->base + priv->regs[index]);
52}
53
54static u16 c_can_pci_read_reg_aligned_to_32bit(struct c_can_priv *priv,
55 enum reg index)
56{
57 return readw(priv->base + 2 * priv->regs[index]);
58}
59
60static void c_can_pci_write_reg_aligned_to_32bit(struct c_can_priv *priv,
61 enum reg index, u16 val)
62{
63 writew(val, priv->base + 2 * priv->regs[index]);
64}
65
66static int __devinit c_can_pci_probe(struct pci_dev *pdev,
67 const struct pci_device_id *ent)
68{
69 struct c_can_pci_data *c_can_pci_data = (void *)ent->driver_data;
70 struct c_can_priv *priv;
71 struct net_device *dev;
72 void __iomem *addr;
73 int ret;
74
75 ret = pci_enable_device(pdev);
76 if (ret) {
77 dev_err(&pdev->dev, "pci_enable_device FAILED\n");
78 goto out;
79 }
80
81 ret = pci_request_regions(pdev, KBUILD_MODNAME);
82 if (ret) {
83 dev_err(&pdev->dev, "pci_request_regions FAILED\n");
84 goto out_disable_device;
85 }
86
87 pci_set_master(pdev);
88 pci_enable_msi(pdev);
89
90 addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
91 if (!addr) {
92 dev_err(&pdev->dev,
93 "device has no PCI memory resources, "
94 "failing adapter\n");
95 ret = -ENOMEM;
96 goto out_release_regions;
97 }
98
99 /* allocate the c_can device */
100 dev = alloc_c_can_dev();
101 if (!dev) {
102 ret = -ENOMEM;
103 goto out_iounmap;
104 }
105
106 priv = netdev_priv(dev);
107 pci_set_drvdata(pdev, dev);
108 SET_NETDEV_DEV(dev, &pdev->dev);
109
110 dev->irq = pdev->irq;
111 priv->base = addr;
112
113 if (!c_can_pci_data->freq) {
114 dev_err(&pdev->dev, "no clock frequency defined\n");
115 ret = -ENODEV;
116 goto out_free_c_can;
117 } else {
118 priv->can.clock.freq = c_can_pci_data->freq;
119 }
120
121 /* Configure CAN type */
122 switch (c_can_pci_data->type) {
123 case C_CAN_DEVTYPE:
124 priv->regs = reg_map_c_can;
125 break;
126 case D_CAN_DEVTYPE:
127 priv->regs = reg_map_d_can;
128 priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
129 break;
130 default:
131 ret = -EINVAL;
132 goto out_free_c_can;
133 }
134
135 /* Configure access to registers */
136 switch (c_can_pci_data->reg_align) {
137 case C_CAN_REG_ALIGN_32:
138 priv->read_reg = c_can_pci_read_reg_aligned_to_32bit;
139 priv->write_reg = c_can_pci_write_reg_aligned_to_32bit;
140 break;
141 case C_CAN_REG_ALIGN_16:
142 priv->read_reg = c_can_pci_read_reg_aligned_to_16bit;
143 priv->write_reg = c_can_pci_write_reg_aligned_to_16bit;
144 break;
145 default:
146 ret = -EINVAL;
147 goto out_free_c_can;
148 }
149
150 ret = register_c_can_dev(dev);
151 if (ret) {
152 dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
153 KBUILD_MODNAME, ret);
154 goto out_free_c_can;
155 }
156
157 dev_dbg(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
158 KBUILD_MODNAME, priv->regs, dev->irq);
159
160 return 0;
161
162out_free_c_can:
163 pci_set_drvdata(pdev, NULL);
164 free_c_can_dev(dev);
165out_iounmap:
166 pci_iounmap(pdev, addr);
167out_release_regions:
168 pci_disable_msi(pdev);
169 pci_clear_master(pdev);
170 pci_release_regions(pdev);
171out_disable_device:
172 pci_disable_device(pdev);
173out:
174 return ret;
175}
176
177static void __devexit c_can_pci_remove(struct pci_dev *pdev)
178{
179 struct net_device *dev = pci_get_drvdata(pdev);
180 struct c_can_priv *priv = netdev_priv(dev);
181
182 unregister_c_can_dev(dev);
183
184 pci_set_drvdata(pdev, NULL);
185 free_c_can_dev(dev);
186
187 pci_iounmap(pdev, priv->base);
188 pci_disable_msi(pdev);
189 pci_clear_master(pdev);
190 pci_release_regions(pdev);
191 pci_disable_device(pdev);
192}
193
194static struct c_can_pci_data c_can_sta2x11= {
195 .type = C_CAN_DEVTYPE,
196 .reg_align = C_CAN_REG_ALIGN_32,
197 .freq = 52000000, /* 52 Mhz */
198};
199
200#define C_CAN_ID(_vend, _dev, _driverdata) { \
201 PCI_DEVICE(_vend, _dev), \
202 .driver_data = (unsigned long)&_driverdata, \
203}
204static DEFINE_PCI_DEVICE_TABLE(c_can_pci_tbl) = {
205 C_CAN_ID(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_CAN,
206 c_can_sta2x11),
207 {},
208};
209static struct pci_driver c_can_pci_driver = {
210 .name = KBUILD_MODNAME,
211 .id_table = c_can_pci_tbl,
212 .probe = c_can_pci_probe,
213 .remove = __devexit_p(c_can_pci_remove),
214};
215
216module_pci_driver(c_can_pci_driver);
217
218MODULE_AUTHOR("Federico Vaga <federico.vaga@gmail.com>");
219MODULE_LICENSE("GPL v2");
220MODULE_DESCRIPTION("PCI CAN bus driver for Bosch C_CAN/D_CAN controller");
221MODULE_DEVICE_TABLE(pci, c_can_pci_tbl);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 5e1a5ff6476..f0921d16f0a 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -42,27 +42,27 @@
42 * Handle the same by providing a common read/write interface. 42 * Handle the same by providing a common read/write interface.
43 */ 43 */
44static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv, 44static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv,
45 void *reg) 45 enum reg index)
46{ 46{
47 return readw(reg); 47 return readw(priv->base + priv->regs[index]);
48} 48}
49 49
50static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv, 50static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv,
51 void *reg, u16 val) 51 enum reg index, u16 val)
52{ 52{
53 writew(val, reg); 53 writew(val, priv->base + priv->regs[index]);
54} 54}
55 55
56static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv, 56static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv,
57 void *reg) 57 enum reg index)
58{ 58{
59 return readw(reg + (long)reg - (long)priv->regs); 59 return readw(priv->base + 2 * priv->regs[index]);
60} 60}
61 61
62static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv, 62static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
63 void *reg, u16 val) 63 enum reg index, u16 val)
64{ 64{
65 writew(val, reg + (long)reg - (long)priv->regs); 65 writew(val, priv->base + 2 * priv->regs[index]);
66} 66}
67 67
68static int __devinit c_can_plat_probe(struct platform_device *pdev) 68static int __devinit c_can_plat_probe(struct platform_device *pdev)
@@ -71,6 +71,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
71 void __iomem *addr; 71 void __iomem *addr;
72 struct net_device *dev; 72 struct net_device *dev;
73 struct c_can_priv *priv; 73 struct c_can_priv *priv;
74 const struct platform_device_id *id;
74 struct resource *mem; 75 struct resource *mem;
75 int irq; 76 int irq;
76#ifdef CONFIG_HAVE_CLK 77#ifdef CONFIG_HAVE_CLK
@@ -115,26 +116,40 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
115 } 116 }
116 117
117 priv = netdev_priv(dev); 118 priv = netdev_priv(dev);
119 id = platform_get_device_id(pdev);
120 switch (id->driver_data) {
121 case C_CAN_DEVTYPE:
122 priv->regs = reg_map_c_can;
123 switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
124 case IORESOURCE_MEM_32BIT:
125 priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
126 priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
127 break;
128 case IORESOURCE_MEM_16BIT:
129 default:
130 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
131 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
132 break;
133 }
134 break;
135 case D_CAN_DEVTYPE:
136 priv->regs = reg_map_d_can;
137 priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
138 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
139 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
140 break;
141 default:
142 ret = -EINVAL;
143 goto exit_free_device;
144 }
118 145
119 dev->irq = irq; 146 dev->irq = irq;
120 priv->regs = addr; 147 priv->base = addr;
121#ifdef CONFIG_HAVE_CLK 148#ifdef CONFIG_HAVE_CLK
122 priv->can.clock.freq = clk_get_rate(clk); 149 priv->can.clock.freq = clk_get_rate(clk);
123 priv->priv = clk; 150 priv->priv = clk;
124#endif 151#endif
125 152
126 switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
127 case IORESOURCE_MEM_32BIT:
128 priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
129 priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
130 break;
131 case IORESOURCE_MEM_16BIT:
132 default:
133 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
134 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
135 break;
136 }
137
138 platform_set_drvdata(pdev, dev); 153 platform_set_drvdata(pdev, dev);
139 SET_NETDEV_DEV(dev, &pdev->dev); 154 SET_NETDEV_DEV(dev, &pdev->dev);
140 155
@@ -146,7 +161,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
146 } 161 }
147 162
148 dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", 163 dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
149 KBUILD_MODNAME, priv->regs, dev->irq); 164 KBUILD_MODNAME, priv->base, dev->irq);
150 return 0; 165 return 0;
151 166
152exit_free_device: 167exit_free_device:
@@ -176,7 +191,7 @@ static int __devexit c_can_plat_remove(struct platform_device *pdev)
176 platform_set_drvdata(pdev, NULL); 191 platform_set_drvdata(pdev, NULL);
177 192
178 free_c_can_dev(dev); 193 free_c_can_dev(dev);
179 iounmap(priv->regs); 194 iounmap(priv->base);
180 195
181 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 196 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
182 release_mem_region(mem->start, resource_size(mem)); 197 release_mem_region(mem->start, resource_size(mem));
@@ -188,6 +203,20 @@ static int __devexit c_can_plat_remove(struct platform_device *pdev)
188 return 0; 203 return 0;
189} 204}
190 205
206static const struct platform_device_id c_can_id_table[] = {
207 {
208 .name = KBUILD_MODNAME,
209 .driver_data = C_CAN_DEVTYPE,
210 }, {
211 .name = "c_can",
212 .driver_data = C_CAN_DEVTYPE,
213 }, {
214 .name = "d_can",
215 .driver_data = D_CAN_DEVTYPE,
216 }, {
217 }
218};
219
191static struct platform_driver c_can_plat_driver = { 220static struct platform_driver c_can_plat_driver = {
192 .driver = { 221 .driver = {
193 .name = KBUILD_MODNAME, 222 .name = KBUILD_MODNAME,
@@ -195,6 +224,7 @@ static struct platform_driver c_can_plat_driver = {
195 }, 224 },
196 .probe = c_can_plat_probe, 225 .probe = c_can_plat_probe,
197 .remove = __devexit_p(c_can_plat_remove), 226 .remove = __devexit_p(c_can_plat_remove),
227 .id_table = c_can_id_table,
198}; 228};
199 229
200module_platform_driver(c_can_plat_driver); 230module_platform_driver(c_can_plat_driver);
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index d42a6a7396f..a138db11cbf 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -695,7 +695,7 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
695 netif_wake_queue(dev); 695 netif_wake_queue(dev);
696} 696}
697 697
698irqreturn_t cc770_interrupt(int irq, void *dev_id) 698static irqreturn_t cc770_interrupt(int irq, void *dev_id)
699{ 699{
700 struct net_device *dev = (struct net_device *)dev_id; 700 struct net_device *dev = (struct net_device *)dev_id;
701 struct cc770_priv *priv = netdev_priv(dev); 701 struct cc770_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index f03d7a481a8..963e2ccd10d 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -33,6 +33,39 @@ MODULE_DESCRIPTION(MOD_DESC);
33MODULE_LICENSE("GPL v2"); 33MODULE_LICENSE("GPL v2");
34MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); 34MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
35 35
36/* CAN DLC to real data length conversion helpers */
37
38static const u8 dlc2len[] = {0, 1, 2, 3, 4, 5, 6, 7,
39 8, 12, 16, 20, 24, 32, 48, 64};
40
41/* get data length from can_dlc with sanitized can_dlc */
42u8 can_dlc2len(u8 can_dlc)
43{
44 return dlc2len[can_dlc & 0x0F];
45}
46EXPORT_SYMBOL_GPL(can_dlc2len);
47
48static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */
49 9, 9, 9, 9, /* 9 - 12 */
50 10, 10, 10, 10, /* 13 - 16 */
51 11, 11, 11, 11, /* 17 - 20 */
52 12, 12, 12, 12, /* 21 - 24 */
53 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */
54 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */
55 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */
56 15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */
57 15, 15, 15, 15, 15, 15, 15, 15}; /* 57 - 64 */
58
59/* map the sanitized data length to an appropriate data length code */
60u8 can_len2dlc(u8 len)
61{
62 if (unlikely(len > 64))
63 return 0xF;
64
65 return len2dlc[len];
66}
67EXPORT_SYMBOL_GPL(can_len2dlc);
68
36#ifdef CONFIG_CAN_CALC_BITTIMING 69#ifdef CONFIG_CAN_CALC_BITTIMING
37#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */ 70#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
38 71
@@ -368,7 +401,7 @@ EXPORT_SYMBOL_GPL(can_free_echo_skb);
368/* 401/*
369 * CAN device restart for bus-off recovery 402 * CAN device restart for bus-off recovery
370 */ 403 */
371void can_restart(unsigned long data) 404static void can_restart(unsigned long data)
372{ 405{
373 struct net_device *dev = (struct net_device *)data; 406 struct net_device *dev = (struct net_device *)data;
374 struct can_priv *priv = netdev_priv(dev); 407 struct can_priv *priv = netdev_priv(dev);
@@ -454,7 +487,7 @@ EXPORT_SYMBOL_GPL(can_bus_off);
454static void can_setup(struct net_device *dev) 487static void can_setup(struct net_device *dev)
455{ 488{
456 dev->type = ARPHRD_CAN; 489 dev->type = ARPHRD_CAN;
457 dev->mtu = sizeof(struct can_frame); 490 dev->mtu = CAN_MTU;
458 dev->hard_header_len = 0; 491 dev->hard_header_len = 0;
459 dev->addr_len = 0; 492 dev->addr_len = 0;
460 dev->tx_queue_len = 10; 493 dev->tx_queue_len = 10;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 81d47410237..81324a11a50 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -34,6 +34,7 @@
34#include <linux/list.h> 34#include <linux/list.h>
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/of.h> 36#include <linux/of.h>
37#include <linux/of_device.h>
37#include <linux/platform_device.h> 38#include <linux/platform_device.h>
38#include <linux/pinctrl/consumer.h> 39#include <linux/pinctrl/consumer.h>
39 40
@@ -165,10 +166,21 @@ struct flexcan_regs {
165 u32 imask1; /* 0x28 */ 166 u32 imask1; /* 0x28 */
166 u32 iflag2; /* 0x2c */ 167 u32 iflag2; /* 0x2c */
167 u32 iflag1; /* 0x30 */ 168 u32 iflag1; /* 0x30 */
168 u32 _reserved2[19]; 169 u32 crl2; /* 0x34 */
170 u32 esr2; /* 0x38 */
171 u32 imeur; /* 0x3c */
172 u32 lrfr; /* 0x40 */
173 u32 crcr; /* 0x44 */
174 u32 rxfgmask; /* 0x48 */
175 u32 rxfir; /* 0x4c */
176 u32 _reserved3[12];
169 struct flexcan_mb cantxfg[64]; 177 struct flexcan_mb cantxfg[64];
170}; 178};
171 179
180struct flexcan_devtype_data {
181 u32 hw_ver; /* hardware controller version */
182};
183
172struct flexcan_priv { 184struct flexcan_priv {
173 struct can_priv can; 185 struct can_priv can;
174 struct net_device *dev; 186 struct net_device *dev;
@@ -180,6 +192,15 @@ struct flexcan_priv {
180 192
181 struct clk *clk; 193 struct clk *clk;
182 struct flexcan_platform_data *pdata; 194 struct flexcan_platform_data *pdata;
195 struct flexcan_devtype_data *devtype_data;
196};
197
198static struct flexcan_devtype_data fsl_p1010_devtype_data = {
199 .hw_ver = 3,
200};
201
202static struct flexcan_devtype_data fsl_imx6q_devtype_data = {
203 .hw_ver = 10,
183}; 204};
184 205
185static struct can_bittiming_const flexcan_bittiming_const = { 206static struct can_bittiming_const flexcan_bittiming_const = {
@@ -750,6 +771,9 @@ static int flexcan_chip_start(struct net_device *dev)
750 flexcan_write(0x0, &regs->rx14mask); 771 flexcan_write(0x0, &regs->rx14mask);
751 flexcan_write(0x0, &regs->rx15mask); 772 flexcan_write(0x0, &regs->rx15mask);
752 773
774 if (priv->devtype_data->hw_ver >= 10)
775 flexcan_write(0x0, &regs->rxfgmask);
776
753 flexcan_transceiver_switch(priv, 1); 777 flexcan_transceiver_switch(priv, 1);
754 778
755 /* synchronize with the can bus */ 779 /* synchronize with the can bus */
@@ -922,8 +946,21 @@ static void __devexit unregister_flexcandev(struct net_device *dev)
922 unregister_candev(dev); 946 unregister_candev(dev);
923} 947}
924 948
949static const struct of_device_id flexcan_of_match[] = {
950 { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
951 { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
952 { /* sentinel */ },
953};
954
955static const struct platform_device_id flexcan_id_table[] = {
956 { .name = "flexcan", .driver_data = (kernel_ulong_t)&fsl_p1010_devtype_data, },
957 { /* sentinel */ },
958};
959
925static int __devinit flexcan_probe(struct platform_device *pdev) 960static int __devinit flexcan_probe(struct platform_device *pdev)
926{ 961{
962 const struct of_device_id *of_id;
963 struct flexcan_devtype_data *devtype_data;
927 struct net_device *dev; 964 struct net_device *dev;
928 struct flexcan_priv *priv; 965 struct flexcan_priv *priv;
929 struct resource *mem; 966 struct resource *mem;
@@ -938,14 +975,9 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
938 if (IS_ERR(pinctrl)) 975 if (IS_ERR(pinctrl))
939 return PTR_ERR(pinctrl); 976 return PTR_ERR(pinctrl);
940 977
941 if (pdev->dev.of_node) { 978 if (pdev->dev.of_node)
942 const __be32 *clock_freq_p; 979 of_property_read_u32(pdev->dev.of_node,
943 980 "clock-frequency", &clock_freq);
944 clock_freq_p = of_get_property(pdev->dev.of_node,
945 "clock-frequency", NULL);
946 if (clock_freq_p)
947 clock_freq = be32_to_cpup(clock_freq_p);
948 }
949 981
950 if (!clock_freq) { 982 if (!clock_freq) {
951 clk = clk_get(&pdev->dev, NULL); 983 clk = clk_get(&pdev->dev, NULL);
@@ -982,6 +1014,17 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
982 goto failed_alloc; 1014 goto failed_alloc;
983 } 1015 }
984 1016
1017 of_id = of_match_device(flexcan_of_match, &pdev->dev);
1018 if (of_id) {
1019 devtype_data = of_id->data;
1020 } else if (pdev->id_entry->driver_data) {
1021 devtype_data = (struct flexcan_devtype_data *)
1022 pdev->id_entry->driver_data;
1023 } else {
1024 err = -ENODEV;
1025 goto failed_devtype;
1026 }
1027
985 dev->netdev_ops = &flexcan_netdev_ops; 1028 dev->netdev_ops = &flexcan_netdev_ops;
986 dev->irq = irq; 1029 dev->irq = irq;
987 dev->flags |= IFF_ECHO; 1030 dev->flags |= IFF_ECHO;
@@ -998,6 +1041,7 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
998 priv->dev = dev; 1041 priv->dev = dev;
999 priv->clk = clk; 1042 priv->clk = clk;
1000 priv->pdata = pdev->dev.platform_data; 1043 priv->pdata = pdev->dev.platform_data;
1044 priv->devtype_data = devtype_data;
1001 1045
1002 netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); 1046 netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
1003 1047
@@ -1016,6 +1060,7 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
1016 return 0; 1060 return 0;
1017 1061
1018 failed_register: 1062 failed_register:
1063 failed_devtype:
1019 free_candev(dev); 1064 free_candev(dev);
1020 failed_alloc: 1065 failed_alloc:
1021 iounmap(base); 1066 iounmap(base);
@@ -1049,12 +1094,41 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
1049 return 0; 1094 return 0;
1050} 1095}
1051 1096
1052static struct of_device_id flexcan_of_match[] = { 1097#ifdef CONFIG_PM
1053 { 1098static int flexcan_suspend(struct platform_device *pdev, pm_message_t state)
1054 .compatible = "fsl,p1010-flexcan", 1099{
1055 }, 1100 struct net_device *dev = platform_get_drvdata(pdev);
1056 {}, 1101 struct flexcan_priv *priv = netdev_priv(dev);
1057}; 1102
1103 flexcan_chip_disable(priv);
1104
1105 if (netif_running(dev)) {
1106 netif_stop_queue(dev);
1107 netif_device_detach(dev);
1108 }
1109 priv->can.state = CAN_STATE_SLEEPING;
1110
1111 return 0;
1112}
1113
1114static int flexcan_resume(struct platform_device *pdev)
1115{
1116 struct net_device *dev = platform_get_drvdata(pdev);
1117 struct flexcan_priv *priv = netdev_priv(dev);
1118
1119 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1120 if (netif_running(dev)) {
1121 netif_device_attach(dev);
1122 netif_start_queue(dev);
1123 }
1124 flexcan_chip_enable(priv);
1125
1126 return 0;
1127}
1128#else
1129#define flexcan_suspend NULL
1130#define flexcan_resume NULL
1131#endif
1058 1132
1059static struct platform_driver flexcan_driver = { 1133static struct platform_driver flexcan_driver = {
1060 .driver = { 1134 .driver = {
@@ -1064,6 +1138,9 @@ static struct platform_driver flexcan_driver = {
1064 }, 1138 },
1065 .probe = flexcan_probe, 1139 .probe = flexcan_probe,
1066 .remove = __devexit_p(flexcan_remove), 1140 .remove = __devexit_p(flexcan_remove),
1141 .suspend = flexcan_suspend,
1142 .resume = flexcan_resume,
1143 .id_table = flexcan_id_table,
1067}; 1144};
1068 1145
1069module_platform_driver(flexcan_driver); 1146module_platform_driver(flexcan_driver);
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 346785c56a2..9120a36ec70 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -1020,8 +1020,7 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
1020 GFP_DMA); 1020 GFP_DMA);
1021 1021
1022 if (priv->spi_tx_buf) { 1022 if (priv->spi_tx_buf) {
1023 priv->spi_rx_buf = (u8 *)(priv->spi_tx_buf + 1023 priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2));
1024 (PAGE_SIZE / 2));
1025 priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma + 1024 priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma +
1026 (PAGE_SIZE / 2)); 1025 (PAGE_SIZE / 2));
1027 } else { 1026 } else {
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index ea2d9428593..4f93c0be005 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -70,13 +70,12 @@ MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
70 70
71static void vcan_rx(struct sk_buff *skb, struct net_device *dev) 71static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
72{ 72{
73 struct can_frame *cf = (struct can_frame *)skb->data; 73 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
74 struct net_device_stats *stats = &dev->stats; 74 struct net_device_stats *stats = &dev->stats;
75 75
76 stats->rx_packets++; 76 stats->rx_packets++;
77 stats->rx_bytes += cf->can_dlc; 77 stats->rx_bytes += cfd->len;
78 78
79 skb->protocol = htons(ETH_P_CAN);
80 skb->pkt_type = PACKET_BROADCAST; 79 skb->pkt_type = PACKET_BROADCAST;
81 skb->dev = dev; 80 skb->dev = dev;
82 skb->ip_summed = CHECKSUM_UNNECESSARY; 81 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -86,7 +85,7 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
86 85
87static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev) 86static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
88{ 87{
89 struct can_frame *cf = (struct can_frame *)skb->data; 88 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
90 struct net_device_stats *stats = &dev->stats; 89 struct net_device_stats *stats = &dev->stats;
91 int loop; 90 int loop;
92 91
@@ -94,7 +93,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
94 return NETDEV_TX_OK; 93 return NETDEV_TX_OK;
95 94
96 stats->tx_packets++; 95 stats->tx_packets++;
97 stats->tx_bytes += cf->can_dlc; 96 stats->tx_bytes += cfd->len;
98 97
99 /* set flag whether this packet has to be looped back */ 98 /* set flag whether this packet has to be looped back */
100 loop = skb->pkt_type == PACKET_LOOPBACK; 99 loop = skb->pkt_type == PACKET_LOOPBACK;
@@ -108,7 +107,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
108 * CAN core already did the echo for us 107 * CAN core already did the echo for us
109 */ 108 */
110 stats->rx_packets++; 109 stats->rx_packets++;
111 stats->rx_bytes += cf->can_dlc; 110 stats->rx_bytes += cfd->len;
112 } 111 }
113 kfree_skb(skb); 112 kfree_skb(skb);
114 return NETDEV_TX_OK; 113 return NETDEV_TX_OK;
@@ -133,14 +132,28 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
133 return NETDEV_TX_OK; 132 return NETDEV_TX_OK;
134} 133}
135 134
135static int vcan_change_mtu(struct net_device *dev, int new_mtu)
136{
137 /* Do not allow changing the MTU while running */
138 if (dev->flags & IFF_UP)
139 return -EBUSY;
140
141 if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
142 return -EINVAL;
143
144 dev->mtu = new_mtu;
145 return 0;
146}
147
136static const struct net_device_ops vcan_netdev_ops = { 148static const struct net_device_ops vcan_netdev_ops = {
137 .ndo_start_xmit = vcan_tx, 149 .ndo_start_xmit = vcan_tx,
150 .ndo_change_mtu = vcan_change_mtu,
138}; 151};
139 152
140static void vcan_setup(struct net_device *dev) 153static void vcan_setup(struct net_device *dev)
141{ 154{
142 dev->type = ARPHRD_CAN; 155 dev->type = ARPHRD_CAN;
143 dev->mtu = sizeof(struct can_frame); 156 dev->mtu = CAN_MTU;
144 dev->hard_header_len = 0; 157 dev->hard_header_len = 0;
145 dev->addr_len = 0; 158 dev->addr_len = 0;
146 dev->tx_queue_len = 0; 159 dev->tx_queue_len = 0;
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 9c755db6b16..f0c8bd54ce2 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1008,7 +1008,7 @@ e100_send_mdio_bit(unsigned char bit)
1008} 1008}
1009 1009
1010static unsigned char 1010static unsigned char
1011e100_receive_mdio_bit() 1011e100_receive_mdio_bit(void)
1012{ 1012{
1013 unsigned char bit; 1013 unsigned char bit;
1014 *R_NETWORK_MGM_CTRL = 0; 1014 *R_NETWORK_MGM_CTRL = 0;
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index bab0158f1cc..9d6a0677466 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -40,18 +40,6 @@
40 40
41static int numdummies = 1; 41static int numdummies = 1;
42 42
43static int dummy_set_address(struct net_device *dev, void *p)
44{
45 struct sockaddr *sa = p;
46
47 if (!is_valid_ether_addr(sa->sa_data))
48 return -EADDRNOTAVAIL;
49
50 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
51 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
52 return 0;
53}
54
55/* fake multicast ability */ 43/* fake multicast ability */
56static void set_multicast_list(struct net_device *dev) 44static void set_multicast_list(struct net_device *dev)
57{ 45{
@@ -118,7 +106,7 @@ static const struct net_device_ops dummy_netdev_ops = {
118 .ndo_start_xmit = dummy_xmit, 106 .ndo_start_xmit = dummy_xmit,
119 .ndo_validate_addr = eth_validate_addr, 107 .ndo_validate_addr = eth_validate_addr,
120 .ndo_set_rx_mode = set_multicast_list, 108 .ndo_set_rx_mode = set_multicast_list,
121 .ndo_set_mac_address = dummy_set_address, 109 .ndo_set_mac_address = eth_mac_addr,
122 .ndo_get_stats64 = dummy_get_stats64, 110 .ndo_get_stats64 = dummy_get_stats64,
123}; 111};
124 112
@@ -134,6 +122,7 @@ static void dummy_setup(struct net_device *dev)
134 dev->tx_queue_len = 0; 122 dev->tx_queue_len = 0;
135 dev->flags |= IFF_NOARP; 123 dev->flags |= IFF_NOARP;
136 dev->flags &= ~IFF_MULTICAST; 124 dev->flags &= ~IFF_MULTICAST;
125 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
137 dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO; 126 dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
138 dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX; 127 dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
139 eth_hw_addr_random(dev); 128 eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/3com/3c501.c b/drivers/net/ethernet/3com/3c501.c
index bf73e1a0229..2038eaabaea 100644
--- a/drivers/net/ethernet/3com/3c501.c
+++ b/drivers/net/ethernet/3com/3c501.c
@@ -143,7 +143,7 @@ static int irq = 5;
143static int mem_start; 143static int mem_start;
144 144
145/** 145/**
146 * el1_probe: - probe for a 3c501 146 * el1_probe - probe for a 3c501
147 * @dev: The device structure passed in to probe. 147 * @dev: The device structure passed in to probe.
148 * 148 *
149 * This can be called from two places. The network layer will probe using 149 * This can be called from two places. The network layer will probe using
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index 923959275a8..912ed7a5f33 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -454,7 +454,7 @@ apne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int rin
454 buf[count-1] = inb(NE_BASE + NE_DATAPORT); 454 buf[count-1] = inb(NE_BASE + NE_DATAPORT);
455 } 455 }
456 } else { 456 } else {
457 ptrc = (char*)buf; 457 ptrc = buf;
458 for (cnt = 0; cnt < count; cnt++) 458 for (cnt = 0; cnt < count; cnt++)
459 *ptrc++ = inb(NE_BASE + NE_DATAPORT); 459 *ptrc++ = inb(NE_BASE + NE_DATAPORT);
460 } 460 }
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 34850117808..9c77c736f17 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1014,7 +1014,7 @@ static int greth_set_mac_add(struct net_device *dev, void *p)
1014 struct greth_regs *regs; 1014 struct greth_regs *regs;
1015 1015
1016 greth = netdev_priv(dev); 1016 greth = netdev_priv(dev);
1017 regs = (struct greth_regs *) greth->regs; 1017 regs = greth->regs;
1018 1018
1019 if (!is_valid_ether_addr(addr->sa_data)) 1019 if (!is_valid_ether_addr(addr->sa_data))
1020 return -EADDRNOTAVAIL; 1020 return -EADDRNOTAVAIL;
@@ -1036,7 +1036,7 @@ static void greth_set_hash_filter(struct net_device *dev)
1036{ 1036{
1037 struct netdev_hw_addr *ha; 1037 struct netdev_hw_addr *ha;
1038 struct greth_private *greth = netdev_priv(dev); 1038 struct greth_private *greth = netdev_priv(dev);
1039 struct greth_regs *regs = (struct greth_regs *) greth->regs; 1039 struct greth_regs *regs = greth->regs;
1040 u32 mc_filter[2]; 1040 u32 mc_filter[2];
1041 unsigned int bitnr; 1041 unsigned int bitnr;
1042 1042
@@ -1055,7 +1055,7 @@ static void greth_set_multicast_list(struct net_device *dev)
1055{ 1055{
1056 int cfg; 1056 int cfg;
1057 struct greth_private *greth = netdev_priv(dev); 1057 struct greth_private *greth = netdev_priv(dev);
1058 struct greth_regs *regs = (struct greth_regs *) greth->regs; 1058 struct greth_regs *regs = greth->regs;
1059 1059
1060 cfg = GRETH_REGLOAD(regs->control); 1060 cfg = GRETH_REGLOAD(regs->control);
1061 if (dev->flags & IFF_PROMISC) 1061 if (dev->flags & IFF_PROMISC)
@@ -1414,7 +1414,7 @@ static int __devinit greth_of_probe(struct platform_device *ofdev)
1414 goto error1; 1414 goto error1;
1415 } 1415 }
1416 1416
1417 regs = (struct greth_regs *) greth->regs; 1417 regs = greth->regs;
1418 greth->irq = ofdev->archdata.irqs[0]; 1418 greth->irq = ofdev->archdata.irqs[0];
1419 1419
1420 dev_set_drvdata(greth->dev, dev); 1420 dev_set_drvdata(greth->dev, dev);
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 75299f500ee..7203b522f23 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -623,7 +623,7 @@ static int lance_rx(struct net_device *dev)
623 skb_put(skb, len); /* make room */ 623 skb_put(skb, len); /* make room */
624 624
625 cp_from_buf(lp->type, skb->data, 625 cp_from_buf(lp->type, skb->data,
626 (char *)lp->rx_buf_ptr_cpu[entry], len); 626 lp->rx_buf_ptr_cpu[entry], len);
627 627
628 skb->protocol = eth_type_trans(skb, dev); 628 skb->protocol = eth_type_trans(skb, dev);
629 netif_rx(skb); 629 netif_rx(skb);
@@ -919,7 +919,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
919 *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len); 919 *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);
920 *lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0; 920 *lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;
921 921
922 cp_to_buf(lp->type, (char *)lp->tx_buf_ptr_cpu[entry], skb->data, len); 922 cp_to_buf(lp->type, lp->tx_buf_ptr_cpu[entry], skb->data, len);
923 923
924 /* Now, give the packet to the lance */ 924 /* Now, give the packet to the lance */
925 *lib_ptr(ib, btx_ring[entry].tmd1, lp->type) = 925 *lib_ptr(ib, btx_ring[entry].tmd1, lp->type) =
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index a6e2e840884..5c728436b85 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -873,10 +873,9 @@ lance_init_ring(struct net_device *dev, gfp_t gfp)
873 873
874 skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp); 874 skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
875 lp->rx_skbuff[i] = skb; 875 lp->rx_skbuff[i] = skb;
876 if (skb) { 876 if (skb)
877 skb->dev = dev;
878 rx_buff = skb->data; 877 rx_buff = skb->data;
879 } else 878 else
880 rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp); 879 rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
881 if (rx_buff == NULL) 880 if (rx_buff == NULL)
882 lp->rx_ring[i].base = 0; 881 lp->rx_ring[i].base = 0;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index ab7ff8645ab..a92ddee7f66 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -228,7 +228,7 @@ static int __devinit mace_probe(struct platform_device *pdev)
228 * bits are reversed. 228 * bits are reversed.
229 */ 229 */
230 230
231 addr = (void *)MACE_PROM; 231 addr = MACE_PROM;
232 232
233 for (j = 0; j < 6; ++j) { 233 for (j = 0; j < 6; ++j) {
234 u8 v = bitrev8(addr[j<<4]); 234 u8 v = bitrev8(addr[j<<4]);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index ff9c73859d4..801f0126512 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -602,7 +602,7 @@ int atl1c_phy_reset(struct atl1c_hw *hw)
602 602
603int atl1c_phy_init(struct atl1c_hw *hw) 603int atl1c_phy_init(struct atl1c_hw *hw)
604{ 604{
605 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; 605 struct atl1c_adapter *adapter = hw->adapter;
606 struct pci_dev *pdev = adapter->pdev; 606 struct pci_dev *pdev = adapter->pdev;
607 int ret_val; 607 int ret_val;
608 u16 mii_bmcr_data = BMCR_RESET; 608 u16 mii_bmcr_data = BMCR_RESET;
@@ -696,7 +696,7 @@ int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex)
696/* select one link mode to get lower power consumption */ 696/* select one link mode to get lower power consumption */
697int atl1c_phy_to_ps_link(struct atl1c_hw *hw) 697int atl1c_phy_to_ps_link(struct atl1c_hw *hw)
698{ 698{
699 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; 699 struct atl1c_adapter *adapter = hw->adapter;
700 struct pci_dev *pdev = adapter->pdev; 700 struct pci_dev *pdev = adapter->pdev;
701 int ret = 0; 701 int ret = 0;
702 u16 autoneg_advertised = ADVERTISED_10baseT_Half; 702 u16 autoneg_advertised = ADVERTISED_10baseT_Half;
@@ -768,7 +768,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw)
768 768
769int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc) 769int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc)
770{ 770{
771 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; 771 struct atl1c_adapter *adapter = hw->adapter;
772 struct pci_dev *pdev = adapter->pdev; 772 struct pci_dev *pdev = adapter->pdev;
773 u32 master_ctrl, mac_ctrl, phy_ctrl; 773 u32 master_ctrl, mac_ctrl, phy_ctrl;
774 u32 wol_ctrl, speed; 774 u32 wol_ctrl, speed;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 1f78b63d5ef..36d3783ebfa 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -166,7 +166,7 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
166 msleep(5); 166 msleep(5);
167} 167}
168 168
169/* 169/**
170 * atl1c_irq_enable - Enable default interrupt generation settings 170 * atl1c_irq_enable - Enable default interrupt generation settings
171 * @adapter: board private structure 171 * @adapter: board private structure
172 */ 172 */
@@ -179,7 +179,7 @@ static inline void atl1c_irq_enable(struct atl1c_adapter *adapter)
179 } 179 }
180} 180}
181 181
182/* 182/**
183 * atl1c_irq_disable - Mask off interrupt generation on the NIC 183 * atl1c_irq_disable - Mask off interrupt generation on the NIC
184 * @adapter: board private structure 184 * @adapter: board private structure
185 */ 185 */
@@ -192,7 +192,7 @@ static inline void atl1c_irq_disable(struct atl1c_adapter *adapter)
192 synchronize_irq(adapter->pdev->irq); 192 synchronize_irq(adapter->pdev->irq);
193} 193}
194 194
195/* 195/**
196 * atl1c_irq_reset - reset interrupt confiure on the NIC 196 * atl1c_irq_reset - reset interrupt confiure on the NIC
197 * @adapter: board private structure 197 * @adapter: board private structure
198 */ 198 */
@@ -220,7 +220,7 @@ static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
220 return data; 220 return data;
221} 221}
222 222
223/* 223/**
224 * atl1c_phy_config - Timer Call-back 224 * atl1c_phy_config - Timer Call-back
225 * @data: pointer to netdev cast into an unsigned long 225 * @data: pointer to netdev cast into an unsigned long
226 */ 226 */
@@ -360,7 +360,7 @@ static void atl1c_del_timer(struct atl1c_adapter *adapter)
360} 360}
361 361
362 362
363/* 363/**
364 * atl1c_tx_timeout - Respond to a Tx Hang 364 * atl1c_tx_timeout - Respond to a Tx Hang
365 * @netdev: network interface device structure 365 * @netdev: network interface device structure
366 */ 366 */
@@ -373,7 +373,7 @@ static void atl1c_tx_timeout(struct net_device *netdev)
373 schedule_work(&adapter->common_task); 373 schedule_work(&adapter->common_task);
374} 374}
375 375
376/* 376/**
377 * atl1c_set_multi - Multicast and Promiscuous mode set 377 * atl1c_set_multi - Multicast and Promiscuous mode set
378 * @netdev: network interface device structure 378 * @netdev: network interface device structure
379 * 379 *
@@ -452,7 +452,7 @@ static void atl1c_restore_vlan(struct atl1c_adapter *adapter)
452 atl1c_vlan_mode(adapter->netdev, adapter->netdev->features); 452 atl1c_vlan_mode(adapter->netdev, adapter->netdev->features);
453} 453}
454 454
455/* 455/**
456 * atl1c_set_mac - Change the Ethernet Address of the NIC 456 * atl1c_set_mac - Change the Ethernet Address of the NIC
457 * @netdev: network interface device structure 457 * @netdev: network interface device structure
458 * @p: pointer to an address structure 458 * @p: pointer to an address structure
@@ -517,7 +517,7 @@ static int atl1c_set_features(struct net_device *netdev,
517 return 0; 517 return 0;
518} 518}
519 519
520/* 520/**
521 * atl1c_change_mtu - Change the Maximum Transfer Unit 521 * atl1c_change_mtu - Change the Maximum Transfer Unit
522 * @netdev: network interface device structure 522 * @netdev: network interface device structure
523 * @new_mtu: new value for maximum frame size 523 * @new_mtu: new value for maximum frame size
@@ -576,12 +576,6 @@ static void atl1c_mdio_write(struct net_device *netdev, int phy_id,
576 atl1c_write_phy_reg(&adapter->hw, reg_num, val); 576 atl1c_write_phy_reg(&adapter->hw, reg_num, val);
577} 577}
578 578
579/*
580 * atl1c_mii_ioctl -
581 * @netdev:
582 * @ifreq:
583 * @cmd:
584 */
585static int atl1c_mii_ioctl(struct net_device *netdev, 579static int atl1c_mii_ioctl(struct net_device *netdev,
586 struct ifreq *ifr, int cmd) 580 struct ifreq *ifr, int cmd)
587{ 581{
@@ -632,12 +626,6 @@ out:
632 return retval; 626 return retval;
633} 627}
634 628
635/*
636 * atl1c_ioctl -
637 * @netdev:
638 * @ifreq:
639 * @cmd:
640 */
641static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 629static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
642{ 630{
643 switch (cmd) { 631 switch (cmd) {
@@ -650,7 +638,7 @@ static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
650 } 638 }
651} 639}
652 640
653/* 641/**
654 * atl1c_alloc_queues - Allocate memory for all rings 642 * atl1c_alloc_queues - Allocate memory for all rings
655 * @adapter: board private structure to initialize 643 * @adapter: board private structure to initialize
656 * 644 *
@@ -754,7 +742,7 @@ static void __devinit atl1c_patch_assign(struct atl1c_hw *hw)
754 i++; 742 i++;
755 } 743 }
756} 744}
757/* 745/**
758 * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter) 746 * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter)
759 * @adapter: board private structure to initialize 747 * @adapter: board private structure to initialize
760 * 748 *
@@ -852,7 +840,7 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
852 buffer_info->skb = NULL; 840 buffer_info->skb = NULL;
853 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); 841 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
854} 842}
855/* 843/**
856 * atl1c_clean_tx_ring - Free Tx-skb 844 * atl1c_clean_tx_ring - Free Tx-skb
857 * @adapter: board private structure 845 * @adapter: board private structure
858 */ 846 */
@@ -877,7 +865,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
877 tpd_ring->next_to_use = 0; 865 tpd_ring->next_to_use = 0;
878} 866}
879 867
880/* 868/**
881 * atl1c_clean_rx_ring - Free rx-reservation skbs 869 * atl1c_clean_rx_ring - Free rx-reservation skbs
882 * @adapter: board private structure 870 * @adapter: board private structure
883 */ 871 */
@@ -930,7 +918,7 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
930 } 918 }
931} 919}
932 920
933/* 921/**
934 * atl1c_free_ring_resources - Free Tx / RX descriptor Resources 922 * atl1c_free_ring_resources - Free Tx / RX descriptor Resources
935 * @adapter: board private structure 923 * @adapter: board private structure
936 * 924 *
@@ -953,7 +941,7 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
953 } 941 }
954} 942}
955 943
956/* 944/**
957 * atl1c_setup_mem_resources - allocate Tx / RX descriptor resources 945 * atl1c_setup_mem_resources - allocate Tx / RX descriptor resources
958 * @adapter: board private structure 946 * @adapter: board private structure
959 * 947 *
@@ -988,12 +976,12 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
988 } 976 }
989 for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) { 977 for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
990 tpd_ring[i].buffer_info = 978 tpd_ring[i].buffer_info =
991 (struct atl1c_buffer *) (tpd_ring->buffer_info + count); 979 (tpd_ring->buffer_info + count);
992 count += tpd_ring[i].count; 980 count += tpd_ring[i].count;
993 } 981 }
994 982
995 rfd_ring->buffer_info = 983 rfd_ring->buffer_info =
996 (struct atl1c_buffer *) (tpd_ring->buffer_info + count); 984 (tpd_ring->buffer_info + count);
997 count += rfd_ring->count; 985 count += rfd_ring->count;
998 rx_desc_count += rfd_ring->count; 986 rx_desc_count += rfd_ring->count;
999 987
@@ -1226,7 +1214,7 @@ static void atl1c_start_mac(struct atl1c_adapter *adapter)
1226 */ 1214 */
1227static int atl1c_reset_mac(struct atl1c_hw *hw) 1215static int atl1c_reset_mac(struct atl1c_hw *hw)
1228{ 1216{
1229 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; 1217 struct atl1c_adapter *adapter = hw->adapter;
1230 struct pci_dev *pdev = adapter->pdev; 1218 struct pci_dev *pdev = adapter->pdev;
1231 u32 ctrl_data = 0; 1219 u32 ctrl_data = 0;
1232 1220
@@ -1362,7 +1350,7 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed)
1362 return; 1350 return;
1363} 1351}
1364 1352
1365/* 1353/**
1366 * atl1c_configure - Configure Transmit&Receive Unit after Reset 1354 * atl1c_configure - Configure Transmit&Receive Unit after Reset
1367 * @adapter: board private structure 1355 * @adapter: board private structure
1368 * 1356 *
@@ -1476,7 +1464,7 @@ static void atl1c_update_hw_stats(struct atl1c_adapter *adapter)
1476 } 1464 }
1477} 1465}
1478 1466
1479/* 1467/**
1480 * atl1c_get_stats - Get System Network Statistics 1468 * atl1c_get_stats - Get System Network Statistics
1481 * @netdev: network interface device structure 1469 * @netdev: network interface device structure
1482 * 1470 *
@@ -1530,8 +1518,7 @@ static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter)
1530static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter, 1518static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
1531 enum atl1c_trans_queue type) 1519 enum atl1c_trans_queue type)
1532{ 1520{
1533 struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *) 1521 struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
1534 &adapter->tpd_ring[type];
1535 struct atl1c_buffer *buffer_info; 1522 struct atl1c_buffer *buffer_info;
1536 struct pci_dev *pdev = adapter->pdev; 1523 struct pci_dev *pdev = adapter->pdev;
1537 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); 1524 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
@@ -1558,11 +1545,10 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
1558 return true; 1545 return true;
1559} 1546}
1560 1547
1561/* 1548/**
1562 * atl1c_intr - Interrupt Handler 1549 * atl1c_intr - Interrupt Handler
1563 * @irq: interrupt number 1550 * @irq: interrupt number
1564 * @data: pointer to a network interface device structure 1551 * @data: pointer to a network interface device structure
1565 * @pt_regs: CPU registers structure
1566 */ 1552 */
1567static irqreturn_t atl1c_intr(int irq, void *data) 1553static irqreturn_t atl1c_intr(int irq, void *data)
1568{ 1554{
@@ -1813,9 +1799,8 @@ rrs_checked:
1813 atl1c_alloc_rx_buffer(adapter); 1799 atl1c_alloc_rx_buffer(adapter);
1814} 1800}
1815 1801
1816/* 1802/**
1817 * atl1c_clean - NAPI Rx polling callback 1803 * atl1c_clean - NAPI Rx polling callback
1818 * @adapter: board private structure
1819 */ 1804 */
1820static int atl1c_clean(struct napi_struct *napi, int budget) 1805static int atl1c_clean(struct napi_struct *napi, int budget)
1821{ 1806{
@@ -2270,7 +2255,7 @@ static void atl1c_down(struct atl1c_adapter *adapter)
2270 atl1c_reset_dma_ring(adapter); 2255 atl1c_reset_dma_ring(adapter);
2271} 2256}
2272 2257
2273/* 2258/**
2274 * atl1c_open - Called when a network interface is made active 2259 * atl1c_open - Called when a network interface is made active
2275 * @netdev: network interface device structure 2260 * @netdev: network interface device structure
2276 * 2261 *
@@ -2309,7 +2294,7 @@ err_up:
2309 return err; 2294 return err;
2310} 2295}
2311 2296
2312/* 2297/**
2313 * atl1c_close - Disables a network interface 2298 * atl1c_close - Disables a network interface
2314 * @netdev: network interface device structure 2299 * @netdev: network interface device structure
2315 * 2300 *
@@ -2432,7 +2417,7 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2432 return 0; 2417 return 0;
2433} 2418}
2434 2419
2435/* 2420/**
2436 * atl1c_probe - Device Initialization Routine 2421 * atl1c_probe - Device Initialization Routine
2437 * @pdev: PCI device information struct 2422 * @pdev: PCI device information struct
2438 * @ent: entry in atl1c_pci_tbl 2423 * @ent: entry in atl1c_pci_tbl
@@ -2579,7 +2564,7 @@ err_dma:
2579 return err; 2564 return err;
2580} 2565}
2581 2566
2582/* 2567/**
2583 * atl1c_remove - Device Removal Routine 2568 * atl1c_remove - Device Removal Routine
2584 * @pdev: PCI device information struct 2569 * @pdev: PCI device information struct
2585 * 2570 *
@@ -2605,7 +2590,7 @@ static void __devexit atl1c_remove(struct pci_dev *pdev)
2605 free_netdev(netdev); 2590 free_netdev(netdev);
2606} 2591}
2607 2592
2608/* 2593/**
2609 * atl1c_io_error_detected - called when PCI error is detected 2594 * atl1c_io_error_detected - called when PCI error is detected
2610 * @pdev: Pointer to PCI device 2595 * @pdev: Pointer to PCI device
2611 * @state: The current pci connection state 2596 * @state: The current pci connection state
@@ -2633,7 +2618,7 @@ static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev,
2633 return PCI_ERS_RESULT_NEED_RESET; 2618 return PCI_ERS_RESULT_NEED_RESET;
2634} 2619}
2635 2620
2636/* 2621/**
2637 * atl1c_io_slot_reset - called after the pci bus has been reset. 2622 * atl1c_io_slot_reset - called after the pci bus has been reset.
2638 * @pdev: Pointer to PCI device 2623 * @pdev: Pointer to PCI device
2639 * 2624 *
@@ -2661,7 +2646,7 @@ static pci_ers_result_t atl1c_io_slot_reset(struct pci_dev *pdev)
2661 return PCI_ERS_RESULT_RECOVERED; 2646 return PCI_ERS_RESULT_RECOVERED;
2662} 2647}
2663 2648
2664/* 2649/**
2665 * atl1c_io_resume - called when traffic can start flowing again. 2650 * atl1c_io_resume - called when traffic can start flowing again.
2666 * @pdev: Pointer to PCI device 2651 * @pdev: Pointer to PCI device
2667 * 2652 *
@@ -2704,7 +2689,7 @@ static struct pci_driver atl1c_driver = {
2704 .driver.pm = &atl1c_pm_ops, 2689 .driver.pm = &atl1c_pm_ops,
2705}; 2690};
2706 2691
2707/* 2692/**
2708 * atl1c_init_module - Driver Registration Routine 2693 * atl1c_init_module - Driver Registration Routine
2709 * 2694 *
2710 * atl1c_init_module is the first routine called when the driver is 2695 * atl1c_init_module is the first routine called when the driver is
@@ -2715,7 +2700,7 @@ static int __init atl1c_init_module(void)
2715 return pci_register_driver(&atl1c_driver); 2700 return pci_register_driver(&atl1c_driver);
2716} 2701}
2717 2702
2718/* 2703/**
2719 * atl1c_exit_module - Driver Exit Cleanup Routine 2704 * atl1c_exit_module - Driver Exit Cleanup Routine
2720 * 2705 *
2721 * atl1c_exit_module is called just before the driver is removed 2706 * atl1c_exit_module is called just before the driver is removed
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 6e61f9f9ebb..82b23861bf5 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -268,7 +268,7 @@ static int atl1e_set_eeprom(struct net_device *netdev,
268 if (eeprom_buff == NULL) 268 if (eeprom_buff == NULL)
269 return -ENOMEM; 269 return -ENOMEM;
270 270
271 ptr = (u32 *)eeprom_buff; 271 ptr = eeprom_buff;
272 272
273 if (eeprom->offset & 3) { 273 if (eeprom->offset & 3) {
274 /* need read/modify/write of first changed EEPROM word */ 274 /* need read/modify/write of first changed EEPROM word */
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 1220e511ced..a98acc8a956 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -89,7 +89,7 @@ static const u16 atl1e_pay_load_size[] = {
89 128, 256, 512, 1024, 2048, 4096, 89 128, 256, 512, 1024, 2048, 4096,
90}; 90};
91 91
92/* 92/**
93 * atl1e_irq_enable - Enable default interrupt generation settings 93 * atl1e_irq_enable - Enable default interrupt generation settings
94 * @adapter: board private structure 94 * @adapter: board private structure
95 */ 95 */
@@ -102,7 +102,7 @@ static inline void atl1e_irq_enable(struct atl1e_adapter *adapter)
102 } 102 }
103} 103}
104 104
105/* 105/**
106 * atl1e_irq_disable - Mask off interrupt generation on the NIC 106 * atl1e_irq_disable - Mask off interrupt generation on the NIC
107 * @adapter: board private structure 107 * @adapter: board private structure
108 */ 108 */
@@ -114,7 +114,7 @@ static inline void atl1e_irq_disable(struct atl1e_adapter *adapter)
114 synchronize_irq(adapter->pdev->irq); 114 synchronize_irq(adapter->pdev->irq);
115} 115}
116 116
117/* 117/**
118 * atl1e_irq_reset - reset interrupt confiure on the NIC 118 * atl1e_irq_reset - reset interrupt confiure on the NIC
119 * @adapter: board private structure 119 * @adapter: board private structure
120 */ 120 */
@@ -126,7 +126,7 @@ static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
126 AT_WRITE_FLUSH(&adapter->hw); 126 AT_WRITE_FLUSH(&adapter->hw);
127} 127}
128 128
129/* 129/**
130 * atl1e_phy_config - Timer Call-back 130 * atl1e_phy_config - Timer Call-back
131 * @data: pointer to netdev cast into an unsigned long 131 * @data: pointer to netdev cast into an unsigned long
132 */ 132 */
@@ -210,7 +210,7 @@ static int atl1e_check_link(struct atl1e_adapter *adapter)
210 return 0; 210 return 0;
211} 211}
212 212
213/* 213/**
214 * atl1e_link_chg_task - deal with link change event Out of interrupt context 214 * atl1e_link_chg_task - deal with link change event Out of interrupt context
215 * @netdev: network interface device structure 215 * @netdev: network interface device structure
216 */ 216 */
@@ -259,7 +259,7 @@ static void atl1e_cancel_work(struct atl1e_adapter *adapter)
259 cancel_work_sync(&adapter->link_chg_task); 259 cancel_work_sync(&adapter->link_chg_task);
260} 260}
261 261
262/* 262/**
263 * atl1e_tx_timeout - Respond to a Tx Hang 263 * atl1e_tx_timeout - Respond to a Tx Hang
264 * @netdev: network interface device structure 264 * @netdev: network interface device structure
265 */ 265 */
@@ -271,7 +271,7 @@ static void atl1e_tx_timeout(struct net_device *netdev)
271 schedule_work(&adapter->reset_task); 271 schedule_work(&adapter->reset_task);
272} 272}
273 273
274/* 274/**
275 * atl1e_set_multi - Multicast and Promiscuous mode set 275 * atl1e_set_multi - Multicast and Promiscuous mode set
276 * @netdev: network interface device structure 276 * @netdev: network interface device structure
277 * 277 *
@@ -345,7 +345,7 @@ static void atl1e_restore_vlan(struct atl1e_adapter *adapter)
345 atl1e_vlan_mode(adapter->netdev, adapter->netdev->features); 345 atl1e_vlan_mode(adapter->netdev, adapter->netdev->features);
346} 346}
347 347
348/* 348/**
349 * atl1e_set_mac - Change the Ethernet Address of the NIC 349 * atl1e_set_mac - Change the Ethernet Address of the NIC
350 * @netdev: network interface device structure 350 * @netdev: network interface device structure
351 * @p: pointer to an address structure 351 * @p: pointer to an address structure
@@ -397,7 +397,7 @@ static int atl1e_set_features(struct net_device *netdev,
397 return 0; 397 return 0;
398} 398}
399 399
400/* 400/**
401 * atl1e_change_mtu - Change the Maximum Transfer Unit 401 * atl1e_change_mtu - Change the Maximum Transfer Unit
402 * @netdev: network interface device structure 402 * @netdev: network interface device structure
403 * @new_mtu: new value for maximum frame size 403 * @new_mtu: new value for maximum frame size
@@ -449,12 +449,6 @@ static void atl1e_mdio_write(struct net_device *netdev, int phy_id,
449 atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val); 449 atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val);
450} 450}
451 451
452/*
453 * atl1e_mii_ioctl -
454 * @netdev:
455 * @ifreq:
456 * @cmd:
457 */
458static int atl1e_mii_ioctl(struct net_device *netdev, 452static int atl1e_mii_ioctl(struct net_device *netdev,
459 struct ifreq *ifr, int cmd) 453 struct ifreq *ifr, int cmd)
460{ 454{
@@ -505,12 +499,6 @@ out:
505 499
506} 500}
507 501
508/*
509 * atl1e_ioctl -
510 * @netdev:
511 * @ifreq:
512 * @cmd:
513 */
514static int atl1e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 502static int atl1e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
515{ 503{
516 switch (cmd) { 504 switch (cmd) {
@@ -541,7 +529,7 @@ static void atl1e_setup_pcicmd(struct pci_dev *pdev)
541 msleep(1); 529 msleep(1);
542} 530}
543 531
544/* 532/**
545 * atl1e_alloc_queues - Allocate memory for all rings 533 * atl1e_alloc_queues - Allocate memory for all rings
546 * @adapter: board private structure to initialize 534 * @adapter: board private structure to initialize
547 * 535 *
@@ -551,7 +539,7 @@ static int __devinit atl1e_alloc_queues(struct atl1e_adapter *adapter)
551 return 0; 539 return 0;
552} 540}
553 541
554/* 542/**
555 * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter) 543 * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter)
556 * @adapter: board private structure to initialize 544 * @adapter: board private structure to initialize
557 * 545 *
@@ -635,14 +623,13 @@ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
635 return 0; 623 return 0;
636} 624}
637 625
638/* 626/**
639 * atl1e_clean_tx_ring - Free Tx-skb 627 * atl1e_clean_tx_ring - Free Tx-skb
640 * @adapter: board private structure 628 * @adapter: board private structure
641 */ 629 */
642static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter) 630static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
643{ 631{
644 struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *) 632 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
645 &adapter->tx_ring;
646 struct atl1e_tx_buffer *tx_buffer = NULL; 633 struct atl1e_tx_buffer *tx_buffer = NULL;
647 struct pci_dev *pdev = adapter->pdev; 634 struct pci_dev *pdev = adapter->pdev;
648 u16 index, ring_count; 635 u16 index, ring_count;
@@ -679,14 +666,14 @@ static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
679 ring_count); 666 ring_count);
680} 667}
681 668
682/* 669/**
683 * atl1e_clean_rx_ring - Free rx-reservation skbs 670 * atl1e_clean_rx_ring - Free rx-reservation skbs
684 * @adapter: board private structure 671 * @adapter: board private structure
685 */ 672 */
686static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter) 673static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter)
687{ 674{
688 struct atl1e_rx_ring *rx_ring = 675 struct atl1e_rx_ring *rx_ring =
689 (struct atl1e_rx_ring *)&adapter->rx_ring; 676 &adapter->rx_ring;
690 struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc; 677 struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc;
691 u16 i, j; 678 u16 i, j;
692 679
@@ -762,7 +749,7 @@ static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter)
762 } 749 }
763} 750}
764 751
765/* 752/**
766 * atl1e_free_ring_resources - Free Tx / RX descriptor Resources 753 * atl1e_free_ring_resources - Free Tx / RX descriptor Resources
767 * @adapter: board private structure 754 * @adapter: board private structure
768 * 755 *
@@ -787,7 +774,7 @@ static void atl1e_free_ring_resources(struct atl1e_adapter *adapter)
787 } 774 }
788} 775}
789 776
790/* 777/**
791 * atl1e_setup_mem_resources - allocate Tx / RX descriptor resources 778 * atl1e_setup_mem_resources - allocate Tx / RX descriptor resources
792 * @adapter: board private structure 779 * @adapter: board private structure
793 * 780 *
@@ -884,14 +871,12 @@ failed:
884 return err; 871 return err;
885} 872}
886 873
887static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter) 874static inline void atl1e_configure_des_ring(struct atl1e_adapter *adapter)
888{ 875{
889 876
890 struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw; 877 struct atl1e_hw *hw = &adapter->hw;
891 struct atl1e_rx_ring *rx_ring = 878 struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
892 (struct atl1e_rx_ring *)&adapter->rx_ring; 879 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
893 struct atl1e_tx_ring *tx_ring =
894 (struct atl1e_tx_ring *)&adapter->tx_ring;
895 struct atl1e_rx_page_desc *rx_page_desc = NULL; 880 struct atl1e_rx_page_desc *rx_page_desc = NULL;
896 int i, j; 881 int i, j;
897 882
@@ -932,7 +917,7 @@ static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter)
932 917
933static inline void atl1e_configure_tx(struct atl1e_adapter *adapter) 918static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
934{ 919{
935 struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw; 920 struct atl1e_hw *hw = &adapter->hw;
936 u32 dev_ctrl_data = 0; 921 u32 dev_ctrl_data = 0;
937 u32 max_pay_load = 0; 922 u32 max_pay_load = 0;
938 u32 jumbo_thresh = 0; 923 u32 jumbo_thresh = 0;
@@ -975,7 +960,7 @@ static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
975 960
976static inline void atl1e_configure_rx(struct atl1e_adapter *adapter) 961static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
977{ 962{
978 struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw; 963 struct atl1e_hw *hw = &adapter->hw;
979 u32 rxf_len = 0; 964 u32 rxf_len = 0;
980 u32 rxf_low = 0; 965 u32 rxf_low = 0;
981 u32 rxf_high = 0; 966 u32 rxf_high = 0;
@@ -1078,7 +1063,7 @@ static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
1078 AT_WRITE_REG(hw, REG_MAC_CTRL, value); 1063 AT_WRITE_REG(hw, REG_MAC_CTRL, value);
1079} 1064}
1080 1065
1081/* 1066/**
1082 * atl1e_configure - Configure Transmit&Receive Unit after Reset 1067 * atl1e_configure - Configure Transmit&Receive Unit after Reset
1083 * @adapter: board private structure 1068 * @adapter: board private structure
1084 * 1069 *
@@ -1148,7 +1133,7 @@ static int atl1e_configure(struct atl1e_adapter *adapter)
1148 return 0; 1133 return 0;
1149} 1134}
1150 1135
1151/* 1136/**
1152 * atl1e_get_stats - Get System Network Statistics 1137 * atl1e_get_stats - Get System Network Statistics
1153 * @netdev: network interface device structure 1138 * @netdev: network interface device structure
1154 * 1139 *
@@ -1224,8 +1209,7 @@ static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter)
1224 1209
1225static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter) 1210static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
1226{ 1211{
1227 struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *) 1212 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
1228 &adapter->tx_ring;
1229 struct atl1e_tx_buffer *tx_buffer = NULL; 1213 struct atl1e_tx_buffer *tx_buffer = NULL;
1230 u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX); 1214 u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX);
1231 u16 next_to_clean = atomic_read(&tx_ring->next_to_clean); 1215 u16 next_to_clean = atomic_read(&tx_ring->next_to_clean);
@@ -1261,11 +1245,10 @@ static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
1261 return true; 1245 return true;
1262} 1246}
1263 1247
1264/* 1248/**
1265 * atl1e_intr - Interrupt Handler 1249 * atl1e_intr - Interrupt Handler
1266 * @irq: interrupt number 1250 * @irq: interrupt number
1267 * @data: pointer to a network interface device structure 1251 * @data: pointer to a network interface device structure
1268 * @pt_regs: CPU registers structure
1269 */ 1252 */
1270static irqreturn_t atl1e_intr(int irq, void *data) 1253static irqreturn_t atl1e_intr(int irq, void *data)
1271{ 1254{
@@ -1384,15 +1367,14 @@ static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter,
1384 (struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc; 1367 (struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc;
1385 u8 rx_using = rx_page_desc[que].rx_using; 1368 u8 rx_using = rx_page_desc[que].rx_using;
1386 1369
1387 return (struct atl1e_rx_page *)&(rx_page_desc[que].rx_page[rx_using]); 1370 return &(rx_page_desc[que].rx_page[rx_using]);
1388} 1371}
1389 1372
1390static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que, 1373static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
1391 int *work_done, int work_to_do) 1374 int *work_done, int work_to_do)
1392{ 1375{
1393 struct net_device *netdev = adapter->netdev; 1376 struct net_device *netdev = adapter->netdev;
1394 struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *) 1377 struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
1395 &adapter->rx_ring;
1396 struct atl1e_rx_page_desc *rx_page_desc = 1378 struct atl1e_rx_page_desc *rx_page_desc =
1397 (struct atl1e_rx_page_desc *) rx_ring->rx_page_desc; 1379 (struct atl1e_rx_page_desc *) rx_ring->rx_page_desc;
1398 struct sk_buff *skb = NULL; 1380 struct sk_buff *skb = NULL;
@@ -1494,9 +1476,8 @@ fatal_err:
1494 schedule_work(&adapter->reset_task); 1476 schedule_work(&adapter->reset_task);
1495} 1477}
1496 1478
1497/* 1479/**
1498 * atl1e_clean - NAPI Rx polling callback 1480 * atl1e_clean - NAPI Rx polling callback
1499 * @adapter: board private structure
1500 */ 1481 */
1501static int atl1e_clean(struct napi_struct *napi, int budget) 1482static int atl1e_clean(struct napi_struct *napi, int budget)
1502{ 1483{
@@ -1576,7 +1557,7 @@ static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter)
1576 tx_ring->next_to_use = 0; 1557 tx_ring->next_to_use = 0;
1577 1558
1578 memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc)); 1559 memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc));
1579 return (struct atl1e_tpd_desc *)&tx_ring->desc[next_to_use]; 1560 return &tx_ring->desc[next_to_use];
1580} 1561}
1581 1562
1582static struct atl1e_tx_buffer * 1563static struct atl1e_tx_buffer *
@@ -1961,7 +1942,7 @@ void atl1e_down(struct atl1e_adapter *adapter)
1961 atl1e_clean_rx_ring(adapter); 1942 atl1e_clean_rx_ring(adapter);
1962} 1943}
1963 1944
1964/* 1945/**
1965 * atl1e_open - Called when a network interface is made active 1946 * atl1e_open - Called when a network interface is made active
1966 * @netdev: network interface device structure 1947 * @netdev: network interface device structure
1967 * 1948 *
@@ -2007,7 +1988,7 @@ err_req_irq:
2007 return err; 1988 return err;
2008} 1989}
2009 1990
2010/* 1991/**
2011 * atl1e_close - Disables a network interface 1992 * atl1e_close - Disables a network interface
2012 * @netdev: network interface device structure 1993 * @netdev: network interface device structure
2013 * 1994 *
@@ -2061,8 +2042,8 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
2061 2042
2062 if (wufc) { 2043 if (wufc) {
2063 /* get link status */ 2044 /* get link status */
2064 atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); 2045 atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data);
2065 atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); 2046 atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data);
2066 2047
2067 mii_advertise_data = ADVERTISE_10HALF; 2048 mii_advertise_data = ADVERTISE_10HALF;
2068 2049
@@ -2086,7 +2067,7 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
2086 for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) { 2067 for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
2087 msleep(100); 2068 msleep(100);
2088 atl1e_read_phy_reg(hw, MII_BMSR, 2069 atl1e_read_phy_reg(hw, MII_BMSR,
2089 (u16 *)&mii_bmsr_data); 2070 &mii_bmsr_data);
2090 if (mii_bmsr_data & BMSR_LSTATUS) 2071 if (mii_bmsr_data & BMSR_LSTATUS)
2091 break; 2072 break;
2092 } 2073 }
@@ -2243,7 +2224,7 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2243 return 0; 2224 return 0;
2244} 2225}
2245 2226
2246/* 2227/**
2247 * atl1e_probe - Device Initialization Routine 2228 * atl1e_probe - Device Initialization Routine
2248 * @pdev: PCI device information struct 2229 * @pdev: PCI device information struct
2249 * @ent: entry in atl1e_pci_tbl 2230 * @ent: entry in atl1e_pci_tbl
@@ -2397,7 +2378,7 @@ err_dma:
2397 return err; 2378 return err;
2398} 2379}
2399 2380
2400/* 2381/**
2401 * atl1e_remove - Device Removal Routine 2382 * atl1e_remove - Device Removal Routine
2402 * @pdev: PCI device information struct 2383 * @pdev: PCI device information struct
2403 * 2384 *
@@ -2429,7 +2410,7 @@ static void __devexit atl1e_remove(struct pci_dev *pdev)
2429 pci_disable_device(pdev); 2410 pci_disable_device(pdev);
2430} 2411}
2431 2412
2432/* 2413/**
2433 * atl1e_io_error_detected - called when PCI error is detected 2414 * atl1e_io_error_detected - called when PCI error is detected
2434 * @pdev: Pointer to PCI device 2415 * @pdev: Pointer to PCI device
2435 * @state: The current pci connection state 2416 * @state: The current pci connection state
@@ -2457,7 +2438,7 @@ atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2457 return PCI_ERS_RESULT_NEED_RESET; 2438 return PCI_ERS_RESULT_NEED_RESET;
2458} 2439}
2459 2440
2460/* 2441/**
2461 * atl1e_io_slot_reset - called after the pci bus has been reset. 2442 * atl1e_io_slot_reset - called after the pci bus has been reset.
2462 * @pdev: Pointer to PCI device 2443 * @pdev: Pointer to PCI device
2463 * 2444 *
@@ -2484,7 +2465,7 @@ static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev)
2484 return PCI_ERS_RESULT_RECOVERED; 2465 return PCI_ERS_RESULT_RECOVERED;
2485} 2466}
2486 2467
2487/* 2468/**
2488 * atl1e_io_resume - called when traffic can start flowing again. 2469 * atl1e_io_resume - called when traffic can start flowing again.
2489 * @pdev: Pointer to PCI device 2470 * @pdev: Pointer to PCI device
2490 * 2471 *
@@ -2528,7 +2509,7 @@ static struct pci_driver atl1e_driver = {
2528 .err_handler = &atl1e_err_handler 2509 .err_handler = &atl1e_err_handler
2529}; 2510};
2530 2511
2531/* 2512/**
2532 * atl1e_init_module - Driver Registration Routine 2513 * atl1e_init_module - Driver Registration Routine
2533 * 2514 *
2534 * atl1e_init_module is the first routine called when the driver is 2515 * atl1e_init_module is the first routine called when the driver is
@@ -2539,7 +2520,7 @@ static int __init atl1e_init_module(void)
2539 return pci_register_driver(&atl1e_driver); 2520 return pci_register_driver(&atl1e_driver);
2540} 2521}
2541 2522
2542/* 2523/**
2543 * atl1e_exit_module - Driver Exit Cleanup Routine 2524 * atl1e_exit_module - Driver Exit Cleanup Routine
2544 * 2525 *
2545 * atl1e_exit_module is called just before the driver is removed 2526 * atl1e_exit_module is called just before the driver is removed
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_param.c b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
index 0ce60b6e7ef..b5086f1e637 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
@@ -168,7 +168,7 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
168 return -1; 168 return -1;
169} 169}
170 170
171/* 171/**
172 * atl1e_check_options - Range Checking for Command Line Parameters 172 * atl1e_check_options - Range Checking for Command Line Parameters
173 * @adapter: board private structure 173 * @adapter: board private structure
174 * 174 *
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 5d10884e508..f2402f355ce 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -195,7 +195,7 @@ static int __devinit atl1_validate_option(int *value, struct atl1_option *opt,
195 return -1; 195 return -1;
196} 196}
197 197
198/* 198/**
199 * atl1_check_options - Range Checking for Command Line Parameters 199 * atl1_check_options - Range Checking for Command Line Parameters
200 * @adapter: board private structure 200 * @adapter: board private structure
201 * 201 *
@@ -937,7 +937,7 @@ static void atl1_set_mac_addr(struct atl1_hw *hw)
937 iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2)); 937 iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
938} 938}
939 939
940/* 940/**
941 * atl1_sw_init - Initialize general software structures (struct atl1_adapter) 941 * atl1_sw_init - Initialize general software structures (struct atl1_adapter)
942 * @adapter: board private structure to initialize 942 * @adapter: board private structure to initialize
943 * 943 *
@@ -1014,12 +1014,6 @@ static void mdio_write(struct net_device *netdev, int phy_id, int reg_num,
1014 atl1_write_phy_reg(&adapter->hw, reg_num, val); 1014 atl1_write_phy_reg(&adapter->hw, reg_num, val);
1015} 1015}
1016 1016
1017/*
1018 * atl1_mii_ioctl -
1019 * @netdev:
1020 * @ifreq:
1021 * @cmd:
1022 */
1023static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1017static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1024{ 1018{
1025 struct atl1_adapter *adapter = netdev_priv(netdev); 1019 struct atl1_adapter *adapter = netdev_priv(netdev);
@@ -1036,7 +1030,7 @@ static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1036 return retval; 1030 return retval;
1037} 1031}
1038 1032
1039/* 1033/**
1040 * atl1_setup_mem_resources - allocate Tx / RX descriptor resources 1034 * atl1_setup_mem_resources - allocate Tx / RX descriptor resources
1041 * @adapter: board private structure 1035 * @adapter: board private structure
1042 * 1036 *
@@ -1061,7 +1055,7 @@ static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
1061 goto err_nomem; 1055 goto err_nomem;
1062 } 1056 }
1063 rfd_ring->buffer_info = 1057 rfd_ring->buffer_info =
1064 (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); 1058 (tpd_ring->buffer_info + tpd_ring->count);
1065 1059
1066 /* 1060 /*
1067 * real ring DMA buffer 1061 * real ring DMA buffer
@@ -1147,7 +1141,7 @@ static void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
1147 atomic_set(&rrd_ring->next_to_clean, 0); 1141 atomic_set(&rrd_ring->next_to_clean, 0);
1148} 1142}
1149 1143
1150/* 1144/**
1151 * atl1_clean_rx_ring - Free RFD Buffers 1145 * atl1_clean_rx_ring - Free RFD Buffers
1152 * @adapter: board private structure 1146 * @adapter: board private structure
1153 */ 1147 */
@@ -1187,7 +1181,7 @@ static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
1187 atomic_set(&rrd_ring->next_to_clean, 0); 1181 atomic_set(&rrd_ring->next_to_clean, 0);
1188} 1182}
1189 1183
1190/* 1184/**
1191 * atl1_clean_tx_ring - Free Tx Buffers 1185 * atl1_clean_tx_ring - Free Tx Buffers
1192 * @adapter: board private structure 1186 * @adapter: board private structure
1193 */ 1187 */
@@ -1227,7 +1221,7 @@ static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
1227 atomic_set(&tpd_ring->next_to_clean, 0); 1221 atomic_set(&tpd_ring->next_to_clean, 0);
1228} 1222}
1229 1223
1230/* 1224/**
1231 * atl1_free_ring_resources - Free Tx / RX descriptor Resources 1225 * atl1_free_ring_resources - Free Tx / RX descriptor Resources
1232 * @adapter: board private structure 1226 * @adapter: board private structure
1233 * 1227 *
@@ -1470,7 +1464,7 @@ static void set_flow_ctrl_new(struct atl1_hw *hw)
1470 iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); 1464 iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
1471} 1465}
1472 1466
1473/* 1467/**
1474 * atl1_configure - Configure Transmit&Receive Unit after Reset 1468 * atl1_configure - Configure Transmit&Receive Unit after Reset
1475 * @adapter: board private structure 1469 * @adapter: board private structure
1476 * 1470 *
@@ -1844,7 +1838,7 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
1844 } 1838 }
1845} 1839}
1846 1840
1847/* 1841/**
1848 * atl1_alloc_rx_buffers - Replace used receive buffers 1842 * atl1_alloc_rx_buffers - Replace used receive buffers
1849 * @adapter: address of board private structure 1843 * @adapter: address of board private structure
1850 */ 1844 */
@@ -2489,11 +2483,10 @@ static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter)
2489 return 1; 2483 return 1;
2490} 2484}
2491 2485
2492/* 2486/**
2493 * atl1_intr - Interrupt Handler 2487 * atl1_intr - Interrupt Handler
2494 * @irq: interrupt number 2488 * @irq: interrupt number
2495 * @data: pointer to a network interface device structure 2489 * @data: pointer to a network interface device structure
2496 * @pt_regs: CPU registers structure
2497 */ 2490 */
2498static irqreturn_t atl1_intr(int irq, void *data) 2491static irqreturn_t atl1_intr(int irq, void *data)
2499{ 2492{
@@ -2574,7 +2567,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
2574} 2567}
2575 2568
2576 2569
2577/* 2570/**
2578 * atl1_phy_config - Timer Call-back 2571 * atl1_phy_config - Timer Call-back
2579 * @data: pointer to netdev cast into an unsigned long 2572 * @data: pointer to netdev cast into an unsigned long
2580 */ 2573 */
@@ -2693,7 +2686,7 @@ static void atl1_reset_dev_task(struct work_struct *work)
2693 netif_device_attach(netdev); 2686 netif_device_attach(netdev);
2694} 2687}
2695 2688
2696/* 2689/**
2697 * atl1_change_mtu - Change the Maximum Transfer Unit 2690 * atl1_change_mtu - Change the Maximum Transfer Unit
2698 * @netdev: network interface device structure 2691 * @netdev: network interface device structure
2699 * @new_mtu: new value for maximum frame size 2692 * @new_mtu: new value for maximum frame size
@@ -2727,7 +2720,7 @@ static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
2727 return 0; 2720 return 0;
2728} 2721}
2729 2722
2730/* 2723/**
2731 * atl1_open - Called when a network interface is made active 2724 * atl1_open - Called when a network interface is made active
2732 * @netdev: network interface device structure 2725 * @netdev: network interface device structure
2733 * 2726 *
@@ -2762,7 +2755,7 @@ err_up:
2762 return err; 2755 return err;
2763} 2756}
2764 2757
2765/* 2758/**
2766 * atl1_close - Disables a network interface 2759 * atl1_close - Disables a network interface
2767 * @netdev: network interface device structure 2760 * @netdev: network interface device structure
2768 * 2761 *
@@ -2930,7 +2923,7 @@ static const struct net_device_ops atl1_netdev_ops = {
2930#endif 2923#endif
2931}; 2924};
2932 2925
2933/* 2926/**
2934 * atl1_probe - Device Initialization Routine 2927 * atl1_probe - Device Initialization Routine
2935 * @pdev: PCI device information struct 2928 * @pdev: PCI device information struct
2936 * @ent: entry in atl1_pci_tbl 2929 * @ent: entry in atl1_pci_tbl
@@ -3111,7 +3104,7 @@ err_request_regions:
3111 return err; 3104 return err;
3112} 3105}
3113 3106
3114/* 3107/**
3115 * atl1_remove - Device Removal Routine 3108 * atl1_remove - Device Removal Routine
3116 * @pdev: PCI device information struct 3109 * @pdev: PCI device information struct
3117 * 3110 *
@@ -3158,7 +3151,7 @@ static struct pci_driver atl1_driver = {
3158 .driver.pm = ATL1_PM_OPS, 3151 .driver.pm = ATL1_PM_OPS,
3159}; 3152};
3160 3153
3161/* 3154/**
3162 * atl1_exit_module - Driver Exit Cleanup Routine 3155 * atl1_exit_module - Driver Exit Cleanup Routine
3163 * 3156 *
3164 * atl1_exit_module is called just before the driver is removed 3157 * atl1_exit_module is called just before the driver is removed
@@ -3169,7 +3162,7 @@ static void __exit atl1_exit_module(void)
3169 pci_unregister_driver(&atl1_driver); 3162 pci_unregister_driver(&atl1_driver);
3170} 3163}
3171 3164
3172/* 3165/**
3173 * atl1_init_module - Driver Registration Routine 3166 * atl1_init_module - Driver Registration Routine
3174 * 3167 *
3175 * atl1_init_module is the first routine called when the driver is 3168 * atl1_init_module is the first routine called when the driver is
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 6762dc406b2..7c0b7e2bcb6 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -75,7 +75,7 @@ static void atl2_set_ethtool_ops(struct net_device *netdev);
75 75
76static void atl2_check_options(struct atl2_adapter *adapter); 76static void atl2_check_options(struct atl2_adapter *adapter);
77 77
78/* 78/**
79 * atl2_sw_init - Initialize general software structures (struct atl2_adapter) 79 * atl2_sw_init - Initialize general software structures (struct atl2_adapter)
80 * @adapter: board private structure to initialize 80 * @adapter: board private structure to initialize
81 * 81 *
@@ -123,7 +123,7 @@ static int __devinit atl2_sw_init(struct atl2_adapter *adapter)
123 return 0; 123 return 0;
124} 124}
125 125
126/* 126/**
127 * atl2_set_multi - Multicast and Promiscuous mode set 127 * atl2_set_multi - Multicast and Promiscuous mode set
128 * @netdev: network interface device structure 128 * @netdev: network interface device structure
129 * 129 *
@@ -177,7 +177,7 @@ static void init_ring_ptrs(struct atl2_adapter *adapter)
177 adapter->txs_next_clear = 0; 177 adapter->txs_next_clear = 0;
178} 178}
179 179
180/* 180/**
181 * atl2_configure - Configure Transmit&Receive Unit after Reset 181 * atl2_configure - Configure Transmit&Receive Unit after Reset
182 * @adapter: board private structure 182 * @adapter: board private structure
183 * 183 *
@@ -283,7 +283,7 @@ static int atl2_configure(struct atl2_adapter *adapter)
283 return value; 283 return value;
284} 284}
285 285
286/* 286/**
287 * atl2_setup_ring_resources - allocate Tx / RX descriptor resources 287 * atl2_setup_ring_resources - allocate Tx / RX descriptor resources
288 * @adapter: board private structure 288 * @adapter: board private structure
289 * 289 *
@@ -340,7 +340,7 @@ static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter)
340 return 0; 340 return 0;
341} 341}
342 342
343/* 343/**
344 * atl2_irq_enable - Enable default interrupt generation settings 344 * atl2_irq_enable - Enable default interrupt generation settings
345 * @adapter: board private structure 345 * @adapter: board private structure
346 */ 346 */
@@ -350,7 +350,7 @@ static inline void atl2_irq_enable(struct atl2_adapter *adapter)
350 ATL2_WRITE_FLUSH(&adapter->hw); 350 ATL2_WRITE_FLUSH(&adapter->hw);
351} 351}
352 352
353/* 353/**
354 * atl2_irq_disable - Mask off interrupt generation on the NIC 354 * atl2_irq_disable - Mask off interrupt generation on the NIC
355 * @adapter: board private structure 355 * @adapter: board private structure
356 */ 356 */
@@ -599,11 +599,10 @@ static inline void atl2_clear_phy_int(struct atl2_adapter *adapter)
599 spin_unlock(&adapter->stats_lock); 599 spin_unlock(&adapter->stats_lock);
600} 600}
601 601
602/* 602/**
603 * atl2_intr - Interrupt Handler 603 * atl2_intr - Interrupt Handler
604 * @irq: interrupt number 604 * @irq: interrupt number
605 * @data: pointer to a network interface device structure 605 * @data: pointer to a network interface device structure
606 * @pt_regs: CPU registers structure
607 */ 606 */
608static irqreturn_t atl2_intr(int irq, void *data) 607static irqreturn_t atl2_intr(int irq, void *data)
609{ 608{
@@ -679,7 +678,7 @@ static int atl2_request_irq(struct atl2_adapter *adapter)
679 netdev); 678 netdev);
680} 679}
681 680
682/* 681/**
683 * atl2_free_ring_resources - Free Tx / RX descriptor Resources 682 * atl2_free_ring_resources - Free Tx / RX descriptor Resources
684 * @adapter: board private structure 683 * @adapter: board private structure
685 * 684 *
@@ -692,7 +691,7 @@ static void atl2_free_ring_resources(struct atl2_adapter *adapter)
692 adapter->ring_dma); 691 adapter->ring_dma);
693} 692}
694 693
695/* 694/**
696 * atl2_open - Called when a network interface is made active 695 * atl2_open - Called when a network interface is made active
697 * @netdev: network interface device structure 696 * @netdev: network interface device structure
698 * 697 *
@@ -798,7 +797,7 @@ static void atl2_free_irq(struct atl2_adapter *adapter)
798#endif 797#endif
799} 798}
800 799
801/* 800/**
802 * atl2_close - Disables a network interface 801 * atl2_close - Disables a network interface
803 * @netdev: network interface device structure 802 * @netdev: network interface device structure
804 * 803 *
@@ -918,7 +917,7 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
918 return NETDEV_TX_OK; 917 return NETDEV_TX_OK;
919} 918}
920 919
921/* 920/**
922 * atl2_change_mtu - Change the Maximum Transfer Unit 921 * atl2_change_mtu - Change the Maximum Transfer Unit
923 * @netdev: network interface device structure 922 * @netdev: network interface device structure
924 * @new_mtu: new value for maximum frame size 923 * @new_mtu: new value for maximum frame size
@@ -943,7 +942,7 @@ static int atl2_change_mtu(struct net_device *netdev, int new_mtu)
943 return 0; 942 return 0;
944} 943}
945 944
946/* 945/**
947 * atl2_set_mac - Change the Ethernet Address of the NIC 946 * atl2_set_mac - Change the Ethernet Address of the NIC
948 * @netdev: network interface device structure 947 * @netdev: network interface device structure
949 * @p: pointer to an address structure 948 * @p: pointer to an address structure
@@ -969,12 +968,6 @@ static int atl2_set_mac(struct net_device *netdev, void *p)
969 return 0; 968 return 0;
970} 969}
971 970
972/*
973 * atl2_mii_ioctl -
974 * @netdev:
975 * @ifreq:
976 * @cmd:
977 */
978static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 971static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
979{ 972{
980 struct atl2_adapter *adapter = netdev_priv(netdev); 973 struct atl2_adapter *adapter = netdev_priv(netdev);
@@ -1011,12 +1004,6 @@ static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1011 return 0; 1004 return 0;
1012} 1005}
1013 1006
1014/*
1015 * atl2_ioctl -
1016 * @netdev:
1017 * @ifreq:
1018 * @cmd:
1019 */
1020static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1007static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1021{ 1008{
1022 switch (cmd) { 1009 switch (cmd) {
@@ -1033,7 +1020,7 @@ static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1033 } 1020 }
1034} 1021}
1035 1022
1036/* 1023/**
1037 * atl2_tx_timeout - Respond to a Tx Hang 1024 * atl2_tx_timeout - Respond to a Tx Hang
1038 * @netdev: network interface device structure 1025 * @netdev: network interface device structure
1039 */ 1026 */
@@ -1045,7 +1032,7 @@ static void atl2_tx_timeout(struct net_device *netdev)
1045 schedule_work(&adapter->reset_task); 1032 schedule_work(&adapter->reset_task);
1046} 1033}
1047 1034
1048/* 1035/**
1049 * atl2_watchdog - Timer Call-back 1036 * atl2_watchdog - Timer Call-back
1050 * @data: pointer to netdev cast into an unsigned long 1037 * @data: pointer to netdev cast into an unsigned long
1051 */ 1038 */
@@ -1070,7 +1057,7 @@ static void atl2_watchdog(unsigned long data)
1070 } 1057 }
1071} 1058}
1072 1059
1073/* 1060/**
1074 * atl2_phy_config - Timer Call-back 1061 * atl2_phy_config - Timer Call-back
1075 * @data: pointer to netdev cast into an unsigned long 1062 * @data: pointer to netdev cast into an unsigned long
1076 */ 1063 */
@@ -1274,9 +1261,8 @@ static int atl2_check_link(struct atl2_adapter *adapter)
1274 return 0; 1261 return 0;
1275} 1262}
1276 1263
1277/* 1264/**
1278 * atl2_link_chg_task - deal with link change event Out of interrupt context 1265 * atl2_link_chg_task - deal with link change event Out of interrupt context
1279 * @netdev: network interface device structure
1280 */ 1266 */
1281static void atl2_link_chg_task(struct work_struct *work) 1267static void atl2_link_chg_task(struct work_struct *work)
1282{ 1268{
@@ -1341,7 +1327,7 @@ static const struct net_device_ops atl2_netdev_ops = {
1341#endif 1327#endif
1342}; 1328};
1343 1329
1344/* 1330/**
1345 * atl2_probe - Device Initialization Routine 1331 * atl2_probe - Device Initialization Routine
1346 * @pdev: PCI device information struct 1332 * @pdev: PCI device information struct
1347 * @ent: entry in atl2_pci_tbl 1333 * @ent: entry in atl2_pci_tbl
@@ -1501,7 +1487,7 @@ err_dma:
1501 return err; 1487 return err;
1502} 1488}
1503 1489
1504/* 1490/**
1505 * atl2_remove - Device Removal Routine 1491 * atl2_remove - Device Removal Routine
1506 * @pdev: PCI device information struct 1492 * @pdev: PCI device information struct
1507 * 1493 *
@@ -1728,7 +1714,7 @@ static struct pci_driver atl2_driver = {
1728 .shutdown = atl2_shutdown, 1714 .shutdown = atl2_shutdown,
1729}; 1715};
1730 1716
1731/* 1717/**
1732 * atl2_init_module - Driver Registration Routine 1718 * atl2_init_module - Driver Registration Routine
1733 * 1719 *
1734 * atl2_init_module is the first routine called when the driver is 1720 * atl2_init_module is the first routine called when the driver is
@@ -1743,7 +1729,7 @@ static int __init atl2_init_module(void)
1743} 1729}
1744module_init(atl2_init_module); 1730module_init(atl2_init_module);
1745 1731
1746/* 1732/**
1747 * atl2_exit_module - Driver Exit Cleanup Routine 1733 * atl2_exit_module - Driver Exit Cleanup Routine
1748 * 1734 *
1749 * atl2_exit_module is called just before the driver is removed 1735 * atl2_exit_module is called just before the driver is removed
@@ -2997,7 +2983,7 @@ static int __devinit atl2_validate_option(int *value, struct atl2_option *opt)
2997 return -1; 2983 return -1;
2998} 2984}
2999 2985
3000/* 2986/**
3001 * atl2_check_options - Range Checking for Command Line Parameters 2987 * atl2_check_options - Range Checking for Command Line Parameters
3002 * @adapter: board private structure 2988 * @adapter: board private structure
3003 * 2989 *
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index b4f3aa49a7f..77ffbc4a507 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -64,7 +64,7 @@ static int atlx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
64 } 64 }
65} 65}
66 66
67/* 67/**
68 * atlx_set_mac - Change the Ethernet Address of the NIC 68 * atlx_set_mac - Change the Ethernet Address of the NIC
69 * @netdev: network interface device structure 69 * @netdev: network interface device structure
70 * @p: pointer to an address structure 70 * @p: pointer to an address structure
@@ -115,7 +115,7 @@ static void atlx_check_for_link(struct atlx_adapter *adapter)
115 schedule_work(&adapter->link_chg_task); 115 schedule_work(&adapter->link_chg_task);
116} 116}
117 117
118/* 118/**
119 * atlx_set_multi - Multicast and Promiscuous mode set 119 * atlx_set_multi - Multicast and Promiscuous mode set
120 * @netdev: network interface device structure 120 * @netdev: network interface device structure
121 * 121 *
@@ -162,7 +162,7 @@ static inline void atlx_imr_set(struct atlx_adapter *adapter,
162 ioread32(adapter->hw.hw_addr + REG_IMR); 162 ioread32(adapter->hw.hw_addr + REG_IMR);
163} 163}
164 164
165/* 165/**
166 * atlx_irq_enable - Enable default interrupt generation settings 166 * atlx_irq_enable - Enable default interrupt generation settings
167 * @adapter: board private structure 167 * @adapter: board private structure
168 */ 168 */
@@ -172,7 +172,7 @@ static void atlx_irq_enable(struct atlx_adapter *adapter)
172 adapter->int_enabled = true; 172 adapter->int_enabled = true;
173} 173}
174 174
175/* 175/**
176 * atlx_irq_disable - Mask off interrupt generation on the NIC 176 * atlx_irq_disable - Mask off interrupt generation on the NIC
177 * @adapter: board private structure 177 * @adapter: board private structure
178 */ 178 */
@@ -193,7 +193,7 @@ static void atlx_clear_phy_int(struct atlx_adapter *adapter)
193 spin_unlock_irqrestore(&adapter->lock, flags); 193 spin_unlock_irqrestore(&adapter->lock, flags);
194} 194}
195 195
196/* 196/**
197 * atlx_tx_timeout - Respond to a Tx Hang 197 * atlx_tx_timeout - Respond to a Tx Hang
198 * @netdev: network interface device structure 198 * @netdev: network interface device structure
199 */ 199 */
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 1fa4927a45b..0ced154129a 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/moduleparam.h> 15#include <linux/moduleparam.h>
16 16
17#include <linux/stringify.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/timer.h> 19#include <linux/timer.h>
19#include <linux/errno.h> 20#include <linux/errno.h>
@@ -57,8 +58,8 @@
57#include "bnx2_fw.h" 58#include "bnx2_fw.h"
58 59
59#define DRV_MODULE_NAME "bnx2" 60#define DRV_MODULE_NAME "bnx2"
60#define DRV_MODULE_VERSION "2.2.1" 61#define DRV_MODULE_VERSION "2.2.3"
61#define DRV_MODULE_RELDATE "Dec 18, 2011" 62#define DRV_MODULE_RELDATE "June 27, 2012"
62#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw" 63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
63#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" 64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
64#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw" 65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -872,8 +873,7 @@ bnx2_alloc_mem(struct bnx2 *bp)
872 873
873 bnapi = &bp->bnx2_napi[i]; 874 bnapi = &bp->bnx2_napi[i];
874 875
875 sblk = (void *) (status_blk + 876 sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
876 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
877 bnapi->status_blk.msix = sblk; 877 bnapi->status_blk.msix = sblk;
878 bnapi->hw_tx_cons_ptr = 878 bnapi->hw_tx_cons_ptr =
879 &sblk->status_tx_quick_consumer_index; 879 &sblk->status_tx_quick_consumer_index;
@@ -1972,22 +1972,26 @@ bnx2_remote_phy_event(struct bnx2 *bp)
1972 switch (speed) { 1972 switch (speed) {
1973 case BNX2_LINK_STATUS_10HALF: 1973 case BNX2_LINK_STATUS_10HALF:
1974 bp->duplex = DUPLEX_HALF; 1974 bp->duplex = DUPLEX_HALF;
1975 /* fall through */
1975 case BNX2_LINK_STATUS_10FULL: 1976 case BNX2_LINK_STATUS_10FULL:
1976 bp->line_speed = SPEED_10; 1977 bp->line_speed = SPEED_10;
1977 break; 1978 break;
1978 case BNX2_LINK_STATUS_100HALF: 1979 case BNX2_LINK_STATUS_100HALF:
1979 bp->duplex = DUPLEX_HALF; 1980 bp->duplex = DUPLEX_HALF;
1981 /* fall through */
1980 case BNX2_LINK_STATUS_100BASE_T4: 1982 case BNX2_LINK_STATUS_100BASE_T4:
1981 case BNX2_LINK_STATUS_100FULL: 1983 case BNX2_LINK_STATUS_100FULL:
1982 bp->line_speed = SPEED_100; 1984 bp->line_speed = SPEED_100;
1983 break; 1985 break;
1984 case BNX2_LINK_STATUS_1000HALF: 1986 case BNX2_LINK_STATUS_1000HALF:
1985 bp->duplex = DUPLEX_HALF; 1987 bp->duplex = DUPLEX_HALF;
1988 /* fall through */
1986 case BNX2_LINK_STATUS_1000FULL: 1989 case BNX2_LINK_STATUS_1000FULL:
1987 bp->line_speed = SPEED_1000; 1990 bp->line_speed = SPEED_1000;
1988 break; 1991 break;
1989 case BNX2_LINK_STATUS_2500HALF: 1992 case BNX2_LINK_STATUS_2500HALF:
1990 bp->duplex = DUPLEX_HALF; 1993 bp->duplex = DUPLEX_HALF;
1994 /* fall through */
1991 case BNX2_LINK_STATUS_2500FULL: 1995 case BNX2_LINK_STATUS_2500FULL:
1992 bp->line_speed = SPEED_2500; 1996 bp->line_speed = SPEED_2500;
1993 break; 1997 break;
@@ -2473,6 +2477,7 @@ bnx2_dump_mcp_state(struct bnx2 *bp)
2473 bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE)); 2477 bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2474 pr_cont(" condition[%08x]\n", 2478 pr_cont(" condition[%08x]\n",
2475 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION)); 2479 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2480 DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2476 DP_SHMEM_LINE(bp, 0x3cc); 2481 DP_SHMEM_LINE(bp, 0x3cc);
2477 DP_SHMEM_LINE(bp, 0x3dc); 2482 DP_SHMEM_LINE(bp, 0x3dc);
2478 DP_SHMEM_LINE(bp, 0x3ec); 2483 DP_SHMEM_LINE(bp, 0x3ec);
@@ -6245,7 +6250,7 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6245static int 6250static int
6246bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi) 6251bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6247{ 6252{
6248 int cpus = num_online_cpus(); 6253 int cpus = netif_get_num_default_rss_queues();
6249 int msix_vecs; 6254 int msix_vecs;
6250 6255
6251 if (!bp->num_req_rx_rings) 6256 if (!bp->num_req_rx_rings)
@@ -6406,6 +6411,75 @@ bnx2_reset_task(struct work_struct *work)
6406 rtnl_unlock(); 6411 rtnl_unlock();
6407} 6412}
6408 6413
6414#define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6415
6416static void
6417bnx2_dump_ftq(struct bnx2 *bp)
6418{
6419 int i;
6420 u32 reg, bdidx, cid, valid;
6421 struct net_device *dev = bp->dev;
6422 static const struct ftq_reg {
6423 char *name;
6424 u32 off;
6425 } ftq_arr[] = {
6426 BNX2_FTQ_ENTRY(RV2P_P),
6427 BNX2_FTQ_ENTRY(RV2P_T),
6428 BNX2_FTQ_ENTRY(RV2P_M),
6429 BNX2_FTQ_ENTRY(TBDR_),
6430 BNX2_FTQ_ENTRY(TDMA_),
6431 BNX2_FTQ_ENTRY(TXP_),
6432 BNX2_FTQ_ENTRY(TXP_),
6433 BNX2_FTQ_ENTRY(TPAT_),
6434 BNX2_FTQ_ENTRY(RXP_C),
6435 BNX2_FTQ_ENTRY(RXP_),
6436 BNX2_FTQ_ENTRY(COM_COMXQ_),
6437 BNX2_FTQ_ENTRY(COM_COMTQ_),
6438 BNX2_FTQ_ENTRY(COM_COMQ_),
6439 BNX2_FTQ_ENTRY(CP_CPQ_),
6440 };
6441
6442 netdev_err(dev, "<--- start FTQ dump --->\n");
6443 for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6444 netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6445 bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6446
6447 netdev_err(dev, "CPU states:\n");
6448 for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6449 netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6450 reg, bnx2_reg_rd_ind(bp, reg),
6451 bnx2_reg_rd_ind(bp, reg + 4),
6452 bnx2_reg_rd_ind(bp, reg + 8),
6453 bnx2_reg_rd_ind(bp, reg + 0x1c),
6454 bnx2_reg_rd_ind(bp, reg + 0x1c),
6455 bnx2_reg_rd_ind(bp, reg + 0x20));
6456
6457 netdev_err(dev, "<--- end FTQ dump --->\n");
6458 netdev_err(dev, "<--- start TBDC dump --->\n");
6459 netdev_err(dev, "TBDC free cnt: %ld\n",
6460 REG_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6461 netdev_err(dev, "LINE CID BIDX CMD VALIDS\n");
6462 for (i = 0; i < 0x20; i++) {
6463 int j = 0;
6464
6465 REG_WR(bp, BNX2_TBDC_BD_ADDR, i);
6466 REG_WR(bp, BNX2_TBDC_CAM_OPCODE,
6467 BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6468 REG_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6469 while ((REG_RD(bp, BNX2_TBDC_COMMAND) &
6470 BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6471 j++;
6472
6473 cid = REG_RD(bp, BNX2_TBDC_CID);
6474 bdidx = REG_RD(bp, BNX2_TBDC_BIDX);
6475 valid = REG_RD(bp, BNX2_TBDC_CAM_OPCODE);
6476 netdev_err(dev, "%02x %06x %04lx %02x [%x]\n",
6477 i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6478 bdidx >> 24, (valid >> 8) & 0x0ff);
6479 }
6480 netdev_err(dev, "<--- end TBDC dump --->\n");
6481}
6482
6409static void 6483static void
6410bnx2_dump_state(struct bnx2 *bp) 6484bnx2_dump_state(struct bnx2 *bp)
6411{ 6485{
@@ -6435,6 +6509,7 @@ bnx2_tx_timeout(struct net_device *dev)
6435{ 6509{
6436 struct bnx2 *bp = netdev_priv(dev); 6510 struct bnx2 *bp = netdev_priv(dev);
6437 6511
6512 bnx2_dump_ftq(bp);
6438 bnx2_dump_state(bp); 6513 bnx2_dump_state(bp);
6439 bnx2_dump_mcp_state(bp); 6514 bnx2_dump_mcp_state(bp);
6440 6515
@@ -6628,6 +6703,7 @@ bnx2_close(struct net_device *dev)
6628 6703
6629 bnx2_disable_int_sync(bp); 6704 bnx2_disable_int_sync(bp);
6630 bnx2_napi_disable(bp); 6705 bnx2_napi_disable(bp);
6706 netif_tx_disable(dev);
6631 del_timer_sync(&bp->timer); 6707 del_timer_sync(&bp->timer);
6632 bnx2_shutdown_chip(bp); 6708 bnx2_shutdown_chip(bp);
6633 bnx2_free_irq(bp); 6709 bnx2_free_irq(bp);
@@ -7832,7 +7908,7 @@ bnx2_get_5709_media(struct bnx2 *bp)
7832 else 7908 else
7833 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8; 7909 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7834 7910
7835 if (PCI_FUNC(bp->pdev->devfn) == 0) { 7911 if (bp->func == 0) {
7836 switch (strap) { 7912 switch (strap) {
7837 case 0x4: 7913 case 0x4:
7838 case 0x5: 7914 case 0x5:
@@ -8131,9 +8207,12 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8131 8207
8132 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE); 8208 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8133 8209
8210 if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8211 bp->func = 1;
8212
8134 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) == 8213 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8135 BNX2_SHM_HDR_SIGNATURE_SIG) { 8214 BNX2_SHM_HDR_SIGNATURE_SIG) {
8136 u32 off = PCI_FUNC(pdev->devfn) << 2; 8215 u32 off = bp->func << 2;
8137 8216
8138 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off); 8217 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8139 } else 8218 } else
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index dc06bda73be..af6451dec29 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -4642,6 +4642,47 @@ struct l2_fhdr {
4642#define BNX2_TBDR_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) 4642#define BNX2_TBDR_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
4643 4643
4644 4644
4645/*
4646 * tbdc definition
4647 * offset: 0x5400
4648 */
4649#define BNX2_TBDC_COMMAND 0x5400
4650#define BNX2_TBDC_COMMAND_CMD_ENABLED (1UL<<0)
4651#define BNX2_TBDC_COMMAND_CMD_FLUSH (1UL<<1)
4652#define BNX2_TBDC_COMMAND_CMD_SOFT_RST (1UL<<2)
4653#define BNX2_TBDC_COMMAND_CMD_REG_ARB (1UL<<3)
4654#define BNX2_TBDC_COMMAND_WRCHK_RANGE_ERROR (1UL<<4)
4655#define BNX2_TBDC_COMMAND_WRCHK_ALL_ONES_ERROR (1UL<<5)
4656#define BNX2_TBDC_COMMAND_WRCHK_ALL_ZEROS_ERROR (1UL<<6)
4657#define BNX2_TBDC_COMMAND_WRCHK_ANY_ONES_ERROR (1UL<<7)
4658#define BNX2_TBDC_COMMAND_WRCHK_ANY_ZEROS_ERROR (1UL<<8)
4659
4660#define BNX2_TBDC_STATUS 0x5404
4661#define BNX2_TBDC_STATUS_FREE_CNT (0x3fUL<<0)
4662
4663#define BNX2_TBDC_BD_ADDR 0x5424
4664
4665#define BNX2_TBDC_BIDX 0x542c
4666#define BNX2_TBDC_BDIDX_BDIDX (0xffffUL<<0)
4667#define BNX2_TBDC_BDIDX_CMD (0xffUL<<24)
4668
4669#define BNX2_TBDC_CID 0x5430
4670
4671#define BNX2_TBDC_CAM_OPCODE 0x5434
4672#define BNX2_TBDC_CAM_OPCODE_OPCODE (0x7UL<<0)
4673#define BNX2_TBDC_CAM_OPCODE_OPCODE_SEARCH (0UL<<0)
4674#define BNX2_TBDC_CAM_OPCODE_OPCODE_CACHE_WRITE (1UL<<0)
4675#define BNX2_TBDC_CAM_OPCODE_OPCODE_INVALIDATE (2UL<<0)
4676#define BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_WRITE (4UL<<0)
4677#define BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ (5UL<<0)
4678#define BNX2_TBDC_CAM_OPCODE_OPCODE_RAM_WRITE (6UL<<0)
4679#define BNX2_TBDC_CAM_OPCODE_OPCODE_RAM_READ (7UL<<0)
4680#define BNX2_TBDC_CAM_OPCODE_SMASK_BDIDX (1UL<<4)
4681#define BNX2_TBDC_CAM_OPCODE_SMASK_CID (1UL<<5)
4682#define BNX2_TBDC_CAM_OPCODE_SMASK_CMD (1UL<<6)
4683#define BNX2_TBDC_CAM_OPCODE_WMT_FAILED (1UL<<7)
4684#define BNX2_TBDC_CAM_OPCODE_CAM_VALIDS (0xffUL<<8)
4685
4645 4686
4646/* 4687/*
4647 * tdma_reg definition 4688 * tdma_reg definition
@@ -6930,6 +6971,8 @@ struct bnx2 {
6930 struct bnx2_irq irq_tbl[BNX2_MAX_MSIX_VEC]; 6971 struct bnx2_irq irq_tbl[BNX2_MAX_MSIX_VEC];
6931 int irq_nvecs; 6972 int irq_nvecs;
6932 6973
6974 u8 func;
6975
6933 u8 num_tx_rings; 6976 u8 num_tx_rings;
6934 u8 num_rx_rings; 6977 u8 num_rx_rings;
6935 6978
@@ -7314,6 +7357,8 @@ struct bnx2_rv2p_fw_file {
7314#define BNX2_BC_STATE_RESET_TYPE_VALUE(msg) (BNX2_BC_STATE_RESET_TYPE_SIG | \ 7357#define BNX2_BC_STATE_RESET_TYPE_VALUE(msg) (BNX2_BC_STATE_RESET_TYPE_SIG | \
7315 (msg)) 7358 (msg))
7316 7359
7360#define BNX2_BC_RESET_TYPE 0x000001c0
7361
7317#define BNX2_BC_STATE 0x000001c4 7362#define BNX2_BC_STATE 0x000001c4
7318#define BNX2_BC_STATE_ERR_MASK 0x0000ff00 7363#define BNX2_BC_STATE_ERR_MASK 0x0000ff00
7319#define BNX2_BC_STATE_SIGN 0x42530000 7364#define BNX2_BC_STATE_SIGN 0x42530000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 7de82418497..52f33b8c41e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -23,8 +23,8 @@
23 * (you will need to reboot afterwards) */ 23 * (you will need to reboot afterwards) */
24/* #define BNX2X_STOP_ON_ERROR */ 24/* #define BNX2X_STOP_ON_ERROR */
25 25
26#define DRV_MODULE_VERSION "1.72.50-0" 26#define DRV_MODULE_VERSION "1.72.51-0"
27#define DRV_MODULE_RELDATE "2012/04/23" 27#define DRV_MODULE_RELDATE "2012/06/18"
28#define BNX2X_BC_VER 0x040200 28#define BNX2X_BC_VER 0x040200
29 29
30#if defined(CONFIG_DCB) 30#if defined(CONFIG_DCB)
@@ -51,6 +51,7 @@
51 51
52#include "bnx2x_reg.h" 52#include "bnx2x_reg.h"
53#include "bnx2x_fw_defs.h" 53#include "bnx2x_fw_defs.h"
54#include "bnx2x_mfw_req.h"
54#include "bnx2x_hsi.h" 55#include "bnx2x_hsi.h"
55#include "bnx2x_link.h" 56#include "bnx2x_link.h"
56#include "bnx2x_sp.h" 57#include "bnx2x_sp.h"
@@ -248,13 +249,12 @@ enum {
248 BNX2X_MAX_CNIC_ETH_CL_ID_IDX, 249 BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
249}; 250};
250 251
251#define BNX2X_CNIC_START_ETH_CID 48 252#define BNX2X_CNIC_START_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) *\
252enum { 253 (bp)->max_cos)
253 /* iSCSI L2 */ 254 /* iSCSI L2 */
254 BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID, 255#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp))
255 /* FCoE L2 */ 256 /* FCoE L2 */
256 BNX2X_FCOE_ETH_CID, 257#define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1)
257};
258 258
259/** Additional rings budgeting */ 259/** Additional rings budgeting */
260#ifdef BCM_CNIC 260#ifdef BCM_CNIC
@@ -276,29 +276,30 @@ enum {
276#define FIRST_TX_ONLY_COS_INDEX 1 276#define FIRST_TX_ONLY_COS_INDEX 1
277#define FIRST_TX_COS_INDEX 0 277#define FIRST_TX_COS_INDEX 0
278 278
279/* defines for decodeing the fastpath index and the cos index out of the
280 * transmission queue index
281 */
282#define MAX_TXQS_PER_COS FP_SB_MAX_E1x
283
284#define TXQ_TO_FP(txq_index) ((txq_index) % MAX_TXQS_PER_COS)
285#define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS)
286
287/* rules for calculating the cids of tx-only connections */ 279/* rules for calculating the cids of tx-only connections */
288#define CID_TO_FP(cid) ((cid) % MAX_TXQS_PER_COS) 280#define CID_TO_FP(cid, bp) ((cid) % BNX2X_NUM_NON_CNIC_QUEUES(bp))
289#define CID_COS_TO_TX_ONLY_CID(cid, cos) (cid + cos * MAX_TXQS_PER_COS) 281#define CID_COS_TO_TX_ONLY_CID(cid, cos, bp) \
282 (cid + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
290 283
291/* fp index inside class of service range */ 284/* fp index inside class of service range */
292#define FP_COS_TO_TXQ(fp, cos) ((fp)->index + cos * MAX_TXQS_PER_COS) 285#define FP_COS_TO_TXQ(fp, cos, bp) \
293 286 ((fp)->index + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
294/* 287
295 * 0..15 eth cos0 288/* Indexes for transmission queues array:
296 * 16..31 eth cos1 if applicable 289 * txdata for RSS i CoS j is at location i + (j * num of RSS)
297 * 32..47 eth cos2 If applicable 290 * txdata for FCoE (if exist) is at location max cos * num of RSS
298 * fcoe queue follows eth queues (16, 32, 48 depending on cos) 291 * txdata for FWD (if exist) is one location after FCoE
292 * txdata for OOO (if exist) is one location after FWD
299 */ 293 */
300#define MAX_ETH_TXQ_IDX(bp) (MAX_TXQS_PER_COS * (bp)->max_cos) 294enum {
301#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp)) 295 FCOE_TXQ_IDX_OFFSET,
296 FWD_TXQ_IDX_OFFSET,
297 OOO_TXQ_IDX_OFFSET,
298};
299#define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos)
300#ifdef BCM_CNIC
301#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET)
302#endif
302 303
303/* fast path */ 304/* fast path */
304/* 305/*
@@ -453,6 +454,7 @@ struct bnx2x_agg_info {
453 u16 vlan_tag; 454 u16 vlan_tag;
454 u16 len_on_bd; 455 u16 len_on_bd;
455 u32 rxhash; 456 u32 rxhash;
457 bool l4_rxhash;
456 u16 gro_size; 458 u16 gro_size;
457 u16 full_page; 459 u16 full_page;
458}; 460};
@@ -481,6 +483,8 @@ struct bnx2x_fp_txdata {
481 __le16 *tx_cons_sb; 483 __le16 *tx_cons_sb;
482 484
483 int txq_index; 485 int txq_index;
486 struct bnx2x_fastpath *parent_fp;
487 int tx_ring_size;
484}; 488};
485 489
486enum bnx2x_tpa_mode_t { 490enum bnx2x_tpa_mode_t {
@@ -507,7 +511,7 @@ struct bnx2x_fastpath {
507 enum bnx2x_tpa_mode_t mode; 511 enum bnx2x_tpa_mode_t mode;
508 512
509 u8 max_cos; /* actual number of active tx coses */ 513 u8 max_cos; /* actual number of active tx coses */
510 struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS]; 514 struct bnx2x_fp_txdata *txdata_ptr[BNX2X_MULTI_TX_COS];
511 515
512 struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */ 516 struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */
513 struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */ 517 struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */
@@ -547,51 +551,45 @@ struct bnx2x_fastpath {
547 rx_calls; 551 rx_calls;
548 552
549 /* TPA related */ 553 /* TPA related */
550 struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2]; 554 struct bnx2x_agg_info *tpa_info;
551 u8 disable_tpa; 555 u8 disable_tpa;
552#ifdef BNX2X_STOP_ON_ERROR 556#ifdef BNX2X_STOP_ON_ERROR
553 u64 tpa_queue_used; 557 u64 tpa_queue_used;
554#endif 558#endif
555
556 struct tstorm_per_queue_stats old_tclient;
557 struct ustorm_per_queue_stats old_uclient;
558 struct xstorm_per_queue_stats old_xclient;
559 struct bnx2x_eth_q_stats eth_q_stats;
560 struct bnx2x_eth_q_stats_old eth_q_stats_old;
561
562 /* The size is calculated using the following: 559 /* The size is calculated using the following:
563 sizeof name field from netdev structure + 560 sizeof name field from netdev structure +
564 4 ('-Xx-' string) + 561 4 ('-Xx-' string) +
565 4 (for the digits and to make it DWORD aligned) */ 562 4 (for the digits and to make it DWORD aligned) */
566#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) 563#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
567 char name[FP_NAME_SIZE]; 564 char name[FP_NAME_SIZE];
568
569 /* MACs object */
570 struct bnx2x_vlan_mac_obj mac_obj;
571
572 /* Queue State object */
573 struct bnx2x_queue_sp_obj q_obj;
574
575}; 565};
576 566
577#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) 567#define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var)
568#define bnx2x_sp_obj(bp, fp) ((bp)->sp_objs[(fp)->index])
569#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
570#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
578 571
579/* Use 2500 as a mini-jumbo MTU for FCoE */ 572/* Use 2500 as a mini-jumbo MTU for FCoE */
580#define BNX2X_FCOE_MINI_JUMBO_MTU 2500 573#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
581 574
582/* FCoE L2 `fastpath' entry is right after the eth entries */ 575#define FCOE_IDX_OFFSET 0
583#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) 576
584#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX]) 577#define FCOE_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) + \
585#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) 578 FCOE_IDX_OFFSET)
586#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \ 579#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX(bp)])
587 txdata[FIRST_TX_COS_INDEX].var) 580#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
581#define bnx2x_fcoe_inner_sp_obj(bp) (&bp->sp_objs[FCOE_IDX(bp)])
582#define bnx2x_fcoe_sp_obj(bp, var) (bnx2x_fcoe_inner_sp_obj(bp)->var)
583#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \
584 txdata_ptr[FIRST_TX_COS_INDEX] \
585 ->var)
588 586
589 587
590#define IS_ETH_FP(fp) (fp->index < \ 588#define IS_ETH_FP(fp) (fp->index < \
591 BNX2X_NUM_ETH_QUEUES(fp->bp)) 589 BNX2X_NUM_ETH_QUEUES(fp->bp))
592#ifdef BCM_CNIC 590#ifdef BCM_CNIC
593#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX) 591#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX(fp->bp))
594#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX) 592#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
595#else 593#else
596#define IS_FCOE_FP(fp) false 594#define IS_FCOE_FP(fp) false
597#define IS_FCOE_IDX(idx) false 595#define IS_FCOE_IDX(idx) false
@@ -978,8 +976,8 @@ union cdu_context {
978}; 976};
979 977
980/* CDU host DB constants */ 978/* CDU host DB constants */
981#define CDU_ILT_PAGE_SZ_HW 3 979#define CDU_ILT_PAGE_SZ_HW 2
982#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */ 980#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */
983#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context)) 981#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
984 982
985#ifdef BCM_CNIC 983#ifdef BCM_CNIC
@@ -1182,11 +1180,31 @@ struct bnx2x_prev_path_list {
1182 struct list_head list; 1180 struct list_head list;
1183}; 1181};
1184 1182
1183struct bnx2x_sp_objs {
1184 /* MACs object */
1185 struct bnx2x_vlan_mac_obj mac_obj;
1186
1187 /* Queue State object */
1188 struct bnx2x_queue_sp_obj q_obj;
1189};
1190
1191struct bnx2x_fp_stats {
1192 struct tstorm_per_queue_stats old_tclient;
1193 struct ustorm_per_queue_stats old_uclient;
1194 struct xstorm_per_queue_stats old_xclient;
1195 struct bnx2x_eth_q_stats eth_q_stats;
1196 struct bnx2x_eth_q_stats_old eth_q_stats_old;
1197};
1198
1185struct bnx2x { 1199struct bnx2x {
1186 /* Fields used in the tx and intr/napi performance paths 1200 /* Fields used in the tx and intr/napi performance paths
1187 * are grouped together in the beginning of the structure 1201 * are grouped together in the beginning of the structure
1188 */ 1202 */
1189 struct bnx2x_fastpath *fp; 1203 struct bnx2x_fastpath *fp;
1204 struct bnx2x_sp_objs *sp_objs;
1205 struct bnx2x_fp_stats *fp_stats;
1206 struct bnx2x_fp_txdata *bnx2x_txq;
1207 int bnx2x_txq_size;
1190 void __iomem *regview; 1208 void __iomem *regview;
1191 void __iomem *doorbells; 1209 void __iomem *doorbells;
1192 u16 db_size; 1210 u16 db_size;
@@ -1301,7 +1319,9 @@ struct bnx2x {
1301#define NO_ISCSI_FLAG (1 << 14) 1319#define NO_ISCSI_FLAG (1 << 14)
1302#define NO_FCOE_FLAG (1 << 15) 1320#define NO_FCOE_FLAG (1 << 15)
1303#define BC_SUPPORTS_PFC_STATS (1 << 17) 1321#define BC_SUPPORTS_PFC_STATS (1 << 17)
1322#define BC_SUPPORTS_FCOE_FEATURES (1 << 19)
1304#define USING_SINGLE_MSIX_FLAG (1 << 20) 1323#define USING_SINGLE_MSIX_FLAG (1 << 20)
1324#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
1305 1325
1306#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) 1326#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
1307#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) 1327#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1377,6 +1397,7 @@ struct bnx2x {
1377#define BNX2X_MAX_COS 3 1397#define BNX2X_MAX_COS 3
1378#define BNX2X_MAX_TX_COS 2 1398#define BNX2X_MAX_TX_COS 2
1379 int num_queues; 1399 int num_queues;
1400 int num_napi_queues;
1380 int disable_tpa; 1401 int disable_tpa;
1381 1402
1382 u32 rx_mode; 1403 u32 rx_mode;
@@ -1389,6 +1410,7 @@ struct bnx2x {
1389 u8 igu_dsb_id; 1410 u8 igu_dsb_id;
1390 u8 igu_base_sb; 1411 u8 igu_base_sb;
1391 u8 igu_sb_cnt; 1412 u8 igu_sb_cnt;
1413
1392 dma_addr_t def_status_blk_mapping; 1414 dma_addr_t def_status_blk_mapping;
1393 1415
1394 struct bnx2x_slowpath *slowpath; 1416 struct bnx2x_slowpath *slowpath;
@@ -1420,7 +1442,11 @@ struct bnx2x {
1420 dma_addr_t fw_stats_data_mapping; 1442 dma_addr_t fw_stats_data_mapping;
1421 int fw_stats_data_sz; 1443 int fw_stats_data_sz;
1422 1444
1423 struct hw_context context; 1445 /* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB
1446 * context size we need 8 ILT entries.
1447 */
1448#define ILT_MAX_L2_LINES 8
1449 struct hw_context context[ILT_MAX_L2_LINES];
1424 1450
1425 struct bnx2x_ilt *ilt; 1451 struct bnx2x_ilt *ilt;
1426#define BP_ILT(bp) ((bp)->ilt) 1452#define BP_ILT(bp) ((bp)->ilt)
@@ -1433,13 +1459,14 @@ struct bnx2x {
1433 1459
1434/* 1460/*
1435 * Maximum CID count that might be required by the bnx2x: 1461 * Maximum CID count that might be required by the bnx2x:
1436 * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related) 1462 * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
1437 */ 1463 */
1438#define BNX2X_L2_CID_COUNT(bp) (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\ 1464#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
1439 NON_ETH_CONTEXT_USE + CNIC_PRESENT) 1465 + NON_ETH_CONTEXT_USE + CNIC_PRESENT)
1466#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
1467 + NON_ETH_CONTEXT_USE + CNIC_PRESENT)
1440#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\ 1468#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
1441 ILT_PAGE_CIDS)) 1469 ILT_PAGE_CIDS))
1442#define BNX2X_DB_SIZE(bp) (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT))
1443 1470
1444 int qm_cid_count; 1471 int qm_cid_count;
1445 1472
@@ -1598,6 +1625,8 @@ struct bnx2x {
1598extern int num_queues; 1625extern int num_queues;
1599#define BNX2X_NUM_QUEUES(bp) (bp->num_queues) 1626#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
1600#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE) 1627#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE)
1628#define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \
1629 NON_ETH_CONTEXT_USE)
1601#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp) 1630#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp)
1602 1631
1603#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) 1632#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
@@ -1656,6 +1685,9 @@ struct bnx2x_func_init_params {
1656 continue; \ 1685 continue; \
1657 else 1686 else
1658 1687
1688#define for_each_napi_rx_queue(bp, var) \
1689 for ((var) = 0; (var) < bp->num_napi_queues; (var)++)
1690
1659/* Skip OOO FP */ 1691/* Skip OOO FP */
1660#define for_each_tx_queue(bp, var) \ 1692#define for_each_tx_queue(bp, var) \
1661 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ 1693 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
@@ -1709,15 +1741,6 @@ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
1709 struct bnx2x_vlan_mac_obj *obj, bool set, 1741 struct bnx2x_vlan_mac_obj *obj, bool set,
1710 int mac_type, unsigned long *ramrod_flags); 1742 int mac_type, unsigned long *ramrod_flags);
1711/** 1743/**
1712 * Deletes all MACs configured for the specific MAC object.
1713 *
1714 * @param bp Function driver instance
1715 * @param mac_obj MAC object to cleanup
1716 *
1717 * @return zero if all MACs were cleaned
1718 */
1719
1720/**
1721 * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object 1744 * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
1722 * 1745 *
1723 * @bp: driver handle 1746 * @bp: driver handle
@@ -1817,6 +1840,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1817#define LOAD_NORMAL 0 1840#define LOAD_NORMAL 0
1818#define LOAD_OPEN 1 1841#define LOAD_OPEN 1
1819#define LOAD_DIAG 2 1842#define LOAD_DIAG 2
1843#define LOAD_LOOPBACK_EXT 3
1820#define UNLOAD_NORMAL 0 1844#define UNLOAD_NORMAL 0
1821#define UNLOAD_CLOSE 1 1845#define UNLOAD_CLOSE 1
1822#define UNLOAD_RECOVERY 2 1846#define UNLOAD_RECOVERY 2
@@ -1899,13 +1923,17 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1899#define PCICFG_LINK_SPEED 0xf0000 1923#define PCICFG_LINK_SPEED 0xf0000
1900#define PCICFG_LINK_SPEED_SHIFT 16 1924#define PCICFG_LINK_SPEED_SHIFT 16
1901 1925
1902 1926#define BNX2X_NUM_TESTS_SF 7
1903#define BNX2X_NUM_TESTS 7 1927#define BNX2X_NUM_TESTS_MF 3
1928#define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \
1929 BNX2X_NUM_TESTS_SF)
1904 1930
1905#define BNX2X_PHY_LOOPBACK 0 1931#define BNX2X_PHY_LOOPBACK 0
1906#define BNX2X_MAC_LOOPBACK 1 1932#define BNX2X_MAC_LOOPBACK 1
1933#define BNX2X_EXT_LOOPBACK 2
1907#define BNX2X_PHY_LOOPBACK_FAILED 1 1934#define BNX2X_PHY_LOOPBACK_FAILED 1
1908#define BNX2X_MAC_LOOPBACK_FAILED 2 1935#define BNX2X_MAC_LOOPBACK_FAILED 2
1936#define BNX2X_EXT_LOOPBACK_FAILED 3
1909#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \ 1937#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \
1910 BNX2X_PHY_LOOPBACK_FAILED) 1938 BNX2X_PHY_LOOPBACK_FAILED)
1911 1939
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 8098eea9704..5aeb034fa05 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -40,12 +40,19 @@
40 * Makes sure the contents of the bp->fp[to].napi is kept 40 * Makes sure the contents of the bp->fp[to].napi is kept
41 * intact. This is done by first copying the napi struct from 41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire 42 * the target to the source, and then mem copying the entire
43 * source onto the target 43 * source onto the target. Update txdata pointers and related
44 * content.
44 */ 45 */
45static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) 46static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
46{ 47{
47 struct bnx2x_fastpath *from_fp = &bp->fp[from]; 48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
48 struct bnx2x_fastpath *to_fp = &bp->fp[to]; 49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
50 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
49 56
50 /* Copy the NAPI object as it has been already initialized */ 57 /* Copy the NAPI object as it has been already initialized */
51 from_fp->napi = to_fp->napi; 58 from_fp->napi = to_fp->napi;
@@ -53,6 +60,30 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
53 /* Move bnx2x_fastpath contents */ 60 /* Move bnx2x_fastpath contents */
54 memcpy(to_fp, from_fp, sizeof(*to_fp)); 61 memcpy(to_fp, from_fp, sizeof(*to_fp));
55 to_fp->index = to; 62 to_fp->index = to;
63
64 /* move sp_objs contents as well, as their indices match fp ones */
65 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
66
67 /* move fp_stats contents as well, as their indices match fp ones */
68 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
69
70 /* Update txdata pointers in fp and move txdata content accordingly:
71 * Each fp consumes 'max_cos' txdata structures, so the index should be
72 * decremented by max_cos x delta.
73 */
74
75 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
76 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
77 (bp)->max_cos;
78 if (from == FCOE_IDX(bp)) {
79 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
81 }
82
83 memcpy(&bp->bnx2x_txq[old_txdata_index],
84 &bp->bnx2x_txq[new_txdata_index],
85 sizeof(struct bnx2x_fp_txdata));
86 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
56} 87}
57 88
58int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ 89int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
@@ -264,12 +295,20 @@ static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
264 * CQE (calculated by HW). 295 * CQE (calculated by HW).
265 */ 296 */
266static u32 bnx2x_get_rxhash(const struct bnx2x *bp, 297static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
267 const struct eth_fast_path_rx_cqe *cqe) 298 const struct eth_fast_path_rx_cqe *cqe,
299 bool *l4_rxhash)
268{ 300{
269 /* Set Toeplitz hash from CQE */ 301 /* Set Toeplitz hash from CQE */
270 if ((bp->dev->features & NETIF_F_RXHASH) && 302 if ((bp->dev->features & NETIF_F_RXHASH) &&
271 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) 303 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
304 enum eth_rss_hash_type htype;
305
306 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
307 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
308 (htype == TCP_IPV6_HASH_TYPE);
272 return le32_to_cpu(cqe->rss_hash_result); 309 return le32_to_cpu(cqe->rss_hash_result);
310 }
311 *l4_rxhash = false;
273 return 0; 312 return 0;
274} 313}
275 314
@@ -323,7 +362,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
323 tpa_info->tpa_state = BNX2X_TPA_START; 362 tpa_info->tpa_state = BNX2X_TPA_START;
324 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd); 363 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
325 tpa_info->placement_offset = cqe->placement_offset; 364 tpa_info->placement_offset = cqe->placement_offset;
326 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe); 365 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
327 if (fp->mode == TPA_MODE_GRO) { 366 if (fp->mode == TPA_MODE_GRO) {
328 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len); 367 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
329 tpa_info->full_page = 368 tpa_info->full_page =
@@ -479,7 +518,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
479 where we are and drop the whole packet */ 518 where we are and drop the whole packet */
480 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); 519 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
481 if (unlikely(err)) { 520 if (unlikely(err)) {
482 fp->eth_q_stats.rx_skb_alloc_failed++; 521 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
483 return err; 522 return err;
484 } 523 }
485 524
@@ -558,6 +597,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
558 skb_reserve(skb, pad + NET_SKB_PAD); 597 skb_reserve(skb, pad + NET_SKB_PAD);
559 skb_put(skb, len); 598 skb_put(skb, len);
560 skb->rxhash = tpa_info->rxhash; 599 skb->rxhash = tpa_info->rxhash;
600 skb->l4_rxhash = tpa_info->l4_rxhash;
561 601
562 skb->protocol = eth_type_trans(skb, bp->dev); 602 skb->protocol = eth_type_trans(skb, bp->dev);
563 skb->ip_summed = CHECKSUM_UNNECESSARY; 603 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -584,7 +624,7 @@ drop:
584 /* drop the packet and keep the buffer in the bin */ 624 /* drop the packet and keep the buffer in the bin */
585 DP(NETIF_MSG_RX_STATUS, 625 DP(NETIF_MSG_RX_STATUS,
586 "Failed to allocate or map a new skb - dropping packet!\n"); 626 "Failed to allocate or map a new skb - dropping packet!\n");
587 fp->eth_q_stats.rx_skb_alloc_failed++; 627 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
588} 628}
589 629
590static int bnx2x_alloc_rx_data(struct bnx2x *bp, 630static int bnx2x_alloc_rx_data(struct bnx2x *bp,
@@ -617,8 +657,10 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
617 return 0; 657 return 0;
618} 658}
619 659
620static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, 660static
621 struct bnx2x_fastpath *fp) 661void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
662 struct bnx2x_fastpath *fp,
663 struct bnx2x_eth_q_stats *qstats)
622{ 664{
623 /* Do nothing if no IP/L4 csum validation was done */ 665 /* Do nothing if no IP/L4 csum validation was done */
624 666
@@ -632,7 +674,7 @@ static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
632 if (cqe->fast_path_cqe.type_error_flags & 674 if (cqe->fast_path_cqe.type_error_flags &
633 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | 675 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
634 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) 676 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
635 fp->eth_q_stats.hw_csum_err++; 677 qstats->hw_csum_err++;
636 else 678 else
637 skb->ip_summed = CHECKSUM_UNNECESSARY; 679 skb->ip_summed = CHECKSUM_UNNECESSARY;
638} 680}
@@ -679,6 +721,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
679 enum eth_rx_cqe_type cqe_fp_type; 721 enum eth_rx_cqe_type cqe_fp_type;
680 u16 len, pad, queue; 722 u16 len, pad, queue;
681 u8 *data; 723 u8 *data;
724 bool l4_rxhash;
682 725
683#ifdef BNX2X_STOP_ON_ERROR 726#ifdef BNX2X_STOP_ON_ERROR
684 if (unlikely(bp->panic)) 727 if (unlikely(bp->panic))
@@ -776,7 +819,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
776 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, 819 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
777 "ERROR flags %x rx packet %u\n", 820 "ERROR flags %x rx packet %u\n",
778 cqe_fp_flags, sw_comp_cons); 821 cqe_fp_flags, sw_comp_cons);
779 fp->eth_q_stats.rx_err_discard_pkt++; 822 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
780 goto reuse_rx; 823 goto reuse_rx;
781 } 824 }
782 825
@@ -789,7 +832,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
789 if (skb == NULL) { 832 if (skb == NULL) {
790 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, 833 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
791 "ERROR packet dropped because of alloc failure\n"); 834 "ERROR packet dropped because of alloc failure\n");
792 fp->eth_q_stats.rx_skb_alloc_failed++; 835 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
793 goto reuse_rx; 836 goto reuse_rx;
794 } 837 }
795 memcpy(skb->data, data + pad, len); 838 memcpy(skb->data, data + pad, len);
@@ -803,14 +846,15 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
803 skb = build_skb(data, 0); 846 skb = build_skb(data, 0);
804 if (unlikely(!skb)) { 847 if (unlikely(!skb)) {
805 kfree(data); 848 kfree(data);
806 fp->eth_q_stats.rx_skb_alloc_failed++; 849 bnx2x_fp_qstats(bp, fp)->
850 rx_skb_alloc_failed++;
807 goto next_rx; 851 goto next_rx;
808 } 852 }
809 skb_reserve(skb, pad); 853 skb_reserve(skb, pad);
810 } else { 854 } else {
811 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, 855 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
812 "ERROR packet dropped because of alloc failure\n"); 856 "ERROR packet dropped because of alloc failure\n");
813 fp->eth_q_stats.rx_skb_alloc_failed++; 857 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
814reuse_rx: 858reuse_rx:
815 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); 859 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
816 goto next_rx; 860 goto next_rx;
@@ -821,13 +865,14 @@ reuse_rx:
821 skb->protocol = eth_type_trans(skb, bp->dev); 865 skb->protocol = eth_type_trans(skb, bp->dev);
822 866
823 /* Set Toeplitz hash for a none-LRO skb */ 867 /* Set Toeplitz hash for a none-LRO skb */
824 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp); 868 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
869 skb->l4_rxhash = l4_rxhash;
825 870
826 skb_checksum_none_assert(skb); 871 skb_checksum_none_assert(skb);
827 872
828 if (bp->dev->features & NETIF_F_RXCSUM) 873 if (bp->dev->features & NETIF_F_RXCSUM)
829 bnx2x_csum_validate(skb, cqe, fp); 874 bnx2x_csum_validate(skb, cqe, fp,
830 875 bnx2x_fp_qstats(bp, fp));
831 876
832 skb_record_rx_queue(skb, fp->rx_queue); 877 skb_record_rx_queue(skb, fp->rx_queue);
833 878
@@ -888,7 +933,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
888 prefetch(fp->rx_cons_sb); 933 prefetch(fp->rx_cons_sb);
889 934
890 for_each_cos_in_tx_queue(fp, cos) 935 for_each_cos_in_tx_queue(fp, cos)
891 prefetch(fp->txdata[cos].tx_cons_sb); 936 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
892 937
893 prefetch(&fp->sb_running_index[SM_RX_ID]); 938 prefetch(&fp->sb_running_index[SM_RX_ID]);
894 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 939 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
@@ -1205,7 +1250,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1205 for_each_tx_queue(bp, i) { 1250 for_each_tx_queue(bp, i) {
1206 struct bnx2x_fastpath *fp = &bp->fp[i]; 1251 struct bnx2x_fastpath *fp = &bp->fp[i];
1207 for_each_cos_in_tx_queue(fp, cos) { 1252 for_each_cos_in_tx_queue(fp, cos) {
1208 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; 1253 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1209 unsigned pkts_compl = 0, bytes_compl = 0; 1254 unsigned pkts_compl = 0, bytes_compl = 0;
1210 1255
1211 u16 sw_prod = txdata->tx_pkt_prod; 1256 u16 sw_prod = txdata->tx_pkt_prod;
@@ -1217,7 +1262,8 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1217 sw_cons++; 1262 sw_cons++;
1218 } 1263 }
1219 netdev_tx_reset_queue( 1264 netdev_tx_reset_queue(
1220 netdev_get_tx_queue(bp->dev, txdata->txq_index)); 1265 netdev_get_tx_queue(bp->dev,
1266 txdata->txq_index));
1221 } 1267 }
1222 } 1268 }
1223} 1269}
@@ -1325,7 +1371,7 @@ void bnx2x_free_irq(struct bnx2x *bp)
1325 free_irq(bp->dev->irq, bp->dev); 1371 free_irq(bp->dev->irq, bp->dev);
1326} 1372}
1327 1373
1328int __devinit bnx2x_enable_msix(struct bnx2x *bp) 1374int bnx2x_enable_msix(struct bnx2x *bp)
1329{ 1375{
1330 int msix_vec = 0, i, rc, req_cnt; 1376 int msix_vec = 0, i, rc, req_cnt;
1331 1377
@@ -1579,6 +1625,8 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
1579#endif 1625#endif
1580 /* Add special queues */ 1626 /* Add special queues */
1581 bp->num_queues += NON_ETH_CONTEXT_USE; 1627 bp->num_queues += NON_ETH_CONTEXT_USE;
1628
1629 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1582} 1630}
1583 1631
1584/** 1632/**
@@ -1607,8 +1655,8 @@ static int bnx2x_set_real_num_queues(struct bnx2x *bp)
1607{ 1655{
1608 int rc, tx, rx; 1656 int rc, tx, rx;
1609 1657
1610 tx = MAX_TXQS_PER_COS * bp->max_cos; 1658 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1611 rx = BNX2X_NUM_ETH_QUEUES(bp); 1659 rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE;
1612 1660
1613/* account for fcoe queue */ 1661/* account for fcoe queue */
1614#ifdef BCM_CNIC 1662#ifdef BCM_CNIC
@@ -1666,14 +1714,13 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1666static int bnx2x_init_rss_pf(struct bnx2x *bp) 1714static int bnx2x_init_rss_pf(struct bnx2x *bp)
1667{ 1715{
1668 int i; 1716 int i;
1669 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1670 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); 1717 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1671 1718
1672 /* Prepare the initial contents fo the indirection table if RSS is 1719 /* Prepare the initial contents fo the indirection table if RSS is
1673 * enabled 1720 * enabled
1674 */ 1721 */
1675 for (i = 0; i < sizeof(ind_table); i++) 1722 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1676 ind_table[i] = 1723 bp->rss_conf_obj.ind_table[i] =
1677 bp->fp->cl_id + 1724 bp->fp->cl_id +
1678 ethtool_rxfh_indir_default(i, num_eth_queues); 1725 ethtool_rxfh_indir_default(i, num_eth_queues);
1679 1726
@@ -1685,12 +1732,11 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp)
1685 * For 57712 and newer on the other hand it's a per-function 1732 * For 57712 and newer on the other hand it's a per-function
1686 * configuration. 1733 * configuration.
1687 */ 1734 */
1688 return bnx2x_config_rss_eth(bp, ind_table, 1735 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1689 bp->port.pmf || !CHIP_IS_E1x(bp));
1690} 1736}
1691 1737
1692int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, 1738int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1693 u8 *ind_table, bool config_hash) 1739 bool config_hash)
1694{ 1740{
1695 struct bnx2x_config_rss_params params = {NULL}; 1741 struct bnx2x_config_rss_params params = {NULL};
1696 int i; 1742 int i;
@@ -1713,11 +1759,15 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1713 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags); 1759 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1714 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags); 1760 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1715 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags); 1761 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1762 if (rss_obj->udp_rss_v4)
1763 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1764 if (rss_obj->udp_rss_v6)
1765 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
1716 1766
1717 /* Hash bits */ 1767 /* Hash bits */
1718 params.rss_result_mask = MULTI_MASK; 1768 params.rss_result_mask = MULTI_MASK;
1719 1769
1720 memcpy(params.ind_table, ind_table, sizeof(params.ind_table)); 1770 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
1721 1771
1722 if (config_hash) { 1772 if (config_hash) {
1723 /* RSS keys */ 1773 /* RSS keys */
@@ -1754,7 +1804,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
1754 int rc; 1804 int rc;
1755 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 1805 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1756 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 1806 struct bnx2x_mcast_ramrod_params rparam = {NULL};
1757 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; 1807 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
1758 1808
1759 /***************** Cleanup MACs' object first *************************/ 1809 /***************** Cleanup MACs' object first *************************/
1760 1810
@@ -1765,7 +1815,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
1765 1815
1766 /* Clean ETH primary MAC */ 1816 /* Clean ETH primary MAC */
1767 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags); 1817 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1768 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags, 1818 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
1769 &ramrod_flags); 1819 &ramrod_flags);
1770 if (rc != 0) 1820 if (rc != 0)
1771 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc); 1821 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
@@ -1851,11 +1901,16 @@ bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1851static void bnx2x_bz_fp(struct bnx2x *bp, int index) 1901static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1852{ 1902{
1853 struct bnx2x_fastpath *fp = &bp->fp[index]; 1903 struct bnx2x_fastpath *fp = &bp->fp[index];
1904 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
1905
1906 int cos;
1854 struct napi_struct orig_napi = fp->napi; 1907 struct napi_struct orig_napi = fp->napi;
1908 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
1855 /* bzero bnx2x_fastpath contents */ 1909 /* bzero bnx2x_fastpath contents */
1856 if (bp->stats_init) 1910 if (bp->stats_init) {
1911 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
1857 memset(fp, 0, sizeof(*fp)); 1912 memset(fp, 0, sizeof(*fp));
1858 else { 1913 } else {
1859 /* Keep Queue statistics */ 1914 /* Keep Queue statistics */
1860 struct bnx2x_eth_q_stats *tmp_eth_q_stats; 1915 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1861 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old; 1916 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
@@ -1863,26 +1918,27 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1863 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats), 1918 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1864 GFP_KERNEL); 1919 GFP_KERNEL);
1865 if (tmp_eth_q_stats) 1920 if (tmp_eth_q_stats)
1866 memcpy(tmp_eth_q_stats, &fp->eth_q_stats, 1921 memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
1867 sizeof(struct bnx2x_eth_q_stats)); 1922 sizeof(struct bnx2x_eth_q_stats));
1868 1923
1869 tmp_eth_q_stats_old = 1924 tmp_eth_q_stats_old =
1870 kzalloc(sizeof(struct bnx2x_eth_q_stats_old), 1925 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1871 GFP_KERNEL); 1926 GFP_KERNEL);
1872 if (tmp_eth_q_stats_old) 1927 if (tmp_eth_q_stats_old)
1873 memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old, 1928 memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
1874 sizeof(struct bnx2x_eth_q_stats_old)); 1929 sizeof(struct bnx2x_eth_q_stats_old));
1875 1930
1931 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
1876 memset(fp, 0, sizeof(*fp)); 1932 memset(fp, 0, sizeof(*fp));
1877 1933
1878 if (tmp_eth_q_stats) { 1934 if (tmp_eth_q_stats) {
1879 memcpy(&fp->eth_q_stats, tmp_eth_q_stats, 1935 memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
1880 sizeof(struct bnx2x_eth_q_stats)); 1936 sizeof(struct bnx2x_eth_q_stats));
1881 kfree(tmp_eth_q_stats); 1937 kfree(tmp_eth_q_stats);
1882 } 1938 }
1883 1939
1884 if (tmp_eth_q_stats_old) { 1940 if (tmp_eth_q_stats_old) {
1885 memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old, 1941 memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
1886 sizeof(struct bnx2x_eth_q_stats_old)); 1942 sizeof(struct bnx2x_eth_q_stats_old));
1887 kfree(tmp_eth_q_stats_old); 1943 kfree(tmp_eth_q_stats_old);
1888 } 1944 }
@@ -1891,7 +1947,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1891 1947
1892 /* Restore the NAPI object as it has been already initialized */ 1948 /* Restore the NAPI object as it has been already initialized */
1893 fp->napi = orig_napi; 1949 fp->napi = orig_napi;
1894 1950 fp->tpa_info = orig_tpa_info;
1895 fp->bp = bp; 1951 fp->bp = bp;
1896 fp->index = index; 1952 fp->index = index;
1897 if (IS_ETH_FP(fp)) 1953 if (IS_ETH_FP(fp))
@@ -1900,6 +1956,16 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1900 /* Special queues support only one CoS */ 1956 /* Special queues support only one CoS */
1901 fp->max_cos = 1; 1957 fp->max_cos = 1;
1902 1958
1959 /* Init txdata pointers */
1960#ifdef BCM_CNIC
1961 if (IS_FCOE_FP(fp))
1962 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
1963#endif
1964 if (IS_ETH_FP(fp))
1965 for_each_cos_in_tx_queue(fp, cos)
1966 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
1967 BNX2X_NUM_ETH_QUEUES(bp) + index];
1968
1903 /* 1969 /*
1904 * set the tpa flag for each queue. The tpa flag determines the queue 1970 * set the tpa flag for each queue. The tpa flag determines the queue
1905 * minimal size so it must be set prior to queue memory allocation 1971 * minimal size so it must be set prior to queue memory allocation
@@ -1949,11 +2015,13 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1949 /* 2015 /*
1950 * Zero fastpath structures preserving invariants like napi, which are 2016 * Zero fastpath structures preserving invariants like napi, which are
1951 * allocated only once, fp index, max_cos, bp pointer. 2017 * allocated only once, fp index, max_cos, bp pointer.
1952 * Also set fp->disable_tpa. 2018 * Also set fp->disable_tpa and txdata_ptr.
1953 */ 2019 */
1954 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); 2020 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
1955 for_each_queue(bp, i) 2021 for_each_queue(bp, i)
1956 bnx2x_bz_fp(bp, i); 2022 bnx2x_bz_fp(bp, i);
2023 memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size *
2024 sizeof(struct bnx2x_fp_txdata));
1957 2025
1958 2026
1959 /* Set the receive queues buffer size */ 2027 /* Set the receive queues buffer size */
@@ -2176,6 +2244,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2176 break; 2244 break;
2177 2245
2178 case LOAD_DIAG: 2246 case LOAD_DIAG:
2247 case LOAD_LOOPBACK_EXT:
2179 bp->state = BNX2X_STATE_DIAG; 2248 bp->state = BNX2X_STATE_DIAG;
2180 break; 2249 break;
2181 2250
@@ -2195,6 +2264,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2195 /* re-read iscsi info */ 2264 /* re-read iscsi info */
2196 bnx2x_get_iscsi_info(bp); 2265 bnx2x_get_iscsi_info(bp);
2197 bnx2x_setup_cnic_irq_info(bp); 2266 bnx2x_setup_cnic_irq_info(bp);
2267 bnx2x_setup_cnic_info(bp);
2198 if (bp->state == BNX2X_STATE_OPEN) 2268 if (bp->state == BNX2X_STATE_OPEN)
2199 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); 2269 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2200#endif 2270#endif
@@ -2215,7 +2285,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2215 return -EBUSY; 2285 return -EBUSY;
2216 } 2286 }
2217 2287
2218 bnx2x_dcbx_init(bp); 2288 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2289 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2290 bnx2x_dcbx_init(bp, false);
2291
2219 return 0; 2292 return 0;
2220 2293
2221#ifndef BNX2X_STOP_ON_ERROR 2294#ifndef BNX2X_STOP_ON_ERROR
@@ -2298,6 +2371,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2298 2371
2299 /* Stop Tx */ 2372 /* Stop Tx */
2300 bnx2x_tx_disable(bp); 2373 bnx2x_tx_disable(bp);
2374 netdev_reset_tc(bp->dev);
2301 2375
2302#ifdef BCM_CNIC 2376#ifdef BCM_CNIC
2303 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); 2377 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
@@ -2456,8 +2530,8 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
2456#endif 2530#endif
2457 2531
2458 for_each_cos_in_tx_queue(fp, cos) 2532 for_each_cos_in_tx_queue(fp, cos)
2459 if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) 2533 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
2460 bnx2x_tx_int(bp, &fp->txdata[cos]); 2534 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
2461 2535
2462 2536
2463 if (bnx2x_has_rx_work(fp)) { 2537 if (bnx2x_has_rx_work(fp)) {
@@ -2834,7 +2908,6 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2834{ 2908{
2835 struct bnx2x *bp = netdev_priv(dev); 2909 struct bnx2x *bp = netdev_priv(dev);
2836 2910
2837 struct bnx2x_fastpath *fp;
2838 struct netdev_queue *txq; 2911 struct netdev_queue *txq;
2839 struct bnx2x_fp_txdata *txdata; 2912 struct bnx2x_fp_txdata *txdata;
2840 struct sw_tx_bd *tx_buf; 2913 struct sw_tx_bd *tx_buf;
@@ -2844,7 +2917,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2844 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; 2917 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2845 u32 pbd_e2_parsing_data = 0; 2918 u32 pbd_e2_parsing_data = 0;
2846 u16 pkt_prod, bd_prod; 2919 u16 pkt_prod, bd_prod;
2847 int nbd, txq_index, fp_index, txdata_index; 2920 int nbd, txq_index;
2848 dma_addr_t mapping; 2921 dma_addr_t mapping;
2849 u32 xmit_type = bnx2x_xmit_type(bp, skb); 2922 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2850 int i; 2923 int i;
@@ -2863,31 +2936,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2863 2936
2864 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT); 2937 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2865 2938
2866 /* decode the fastpath index and the cos index from the txq */ 2939 txdata = &bp->bnx2x_txq[txq_index];
2867 fp_index = TXQ_TO_FP(txq_index);
2868 txdata_index = TXQ_TO_COS(txq_index);
2869
2870#ifdef BCM_CNIC
2871 /*
2872 * Override the above for the FCoE queue:
2873 * - FCoE fp entry is right after the ETH entries.
2874 * - FCoE L2 queue uses bp->txdata[0] only.
2875 */
2876 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2877 bnx2x_fcoe_tx(bp, txq_index)))) {
2878 fp_index = FCOE_IDX;
2879 txdata_index = 0;
2880 }
2881#endif
2882 2940
2883 /* enable this debug print to view the transmission queue being used 2941 /* enable this debug print to view the transmission queue being used
2884 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n", 2942 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
2885 txq_index, fp_index, txdata_index); */ 2943 txq_index, fp_index, txdata_index); */
2886 2944
2887 /* locate the fastpath and the txdata */
2888 fp = &bp->fp[fp_index];
2889 txdata = &fp->txdata[txdata_index];
2890
2891 /* enable this debug print to view the tranmission details 2945 /* enable this debug print to view the tranmission details
2892 DP(NETIF_MSG_TX_QUEUED, 2946 DP(NETIF_MSG_TX_QUEUED,
2893 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n", 2947 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
@@ -2895,7 +2949,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2895 2949
2896 if (unlikely(bnx2x_tx_avail(bp, txdata) < 2950 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2897 (skb_shinfo(skb)->nr_frags + 3))) { 2951 (skb_shinfo(skb)->nr_frags + 3))) {
2898 fp->eth_q_stats.driver_xoff++; 2952 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
2899 netif_tx_stop_queue(txq); 2953 netif_tx_stop_queue(txq);
2900 BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); 2954 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2901 return NETDEV_TX_BUSY; 2955 return NETDEV_TX_BUSY;
@@ -3177,7 +3231,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3177 * fp->bd_tx_cons */ 3231 * fp->bd_tx_cons */
3178 smp_mb(); 3232 smp_mb();
3179 3233
3180 fp->eth_q_stats.driver_xoff++; 3234 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3181 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4) 3235 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
3182 netif_tx_wake_queue(txq); 3236 netif_tx_wake_queue(txq);
3183 } 3237 }
@@ -3243,7 +3297,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3243 /* configure traffic class to transmission queue mapping */ 3297 /* configure traffic class to transmission queue mapping */
3244 for (cos = 0; cos < bp->max_cos; cos++) { 3298 for (cos = 0; cos < bp->max_cos; cos++) {
3245 count = BNX2X_NUM_ETH_QUEUES(bp); 3299 count = BNX2X_NUM_ETH_QUEUES(bp);
3246 offset = cos * MAX_TXQS_PER_COS; 3300 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
3247 netdev_set_tc_queue(dev, cos, count, offset); 3301 netdev_set_tc_queue(dev, cos, count, offset);
3248 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 3302 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3249 "mapping tc %d to offset %d count %d\n", 3303 "mapping tc %d to offset %d count %d\n",
@@ -3342,7 +3396,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3342 if (!skip_tx_queue(bp, fp_index)) { 3396 if (!skip_tx_queue(bp, fp_index)) {
3343 /* fastpath tx rings: tx_buf tx_desc */ 3397 /* fastpath tx rings: tx_buf tx_desc */
3344 for_each_cos_in_tx_queue(fp, cos) { 3398 for_each_cos_in_tx_queue(fp, cos) {
3345 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; 3399 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3346 3400
3347 DP(NETIF_MSG_IFDOWN, 3401 DP(NETIF_MSG_IFDOWN,
3348 "freeing tx memory of fp %d cos %d cid %d\n", 3402 "freeing tx memory of fp %d cos %d cid %d\n",
@@ -3414,7 +3468,7 @@ static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3414 cqe_ring_prod); 3468 cqe_ring_prod);
3415 fp->rx_pkt = fp->rx_calls = 0; 3469 fp->rx_pkt = fp->rx_calls = 0;
3416 3470
3417 fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt; 3471 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3418 3472
3419 return i - failure_cnt; 3473 return i - failure_cnt;
3420} 3474}
@@ -3499,7 +3553,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3499 if (!skip_tx_queue(bp, index)) { 3553 if (!skip_tx_queue(bp, index)) {
3500 /* fastpath tx rings: tx_buf tx_desc */ 3554 /* fastpath tx rings: tx_buf tx_desc */
3501 for_each_cos_in_tx_queue(fp, cos) { 3555 for_each_cos_in_tx_queue(fp, cos) {
3502 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; 3556 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3503 3557
3504 DP(NETIF_MSG_IFUP, 3558 DP(NETIF_MSG_IFUP,
3505 "allocating tx memory of fp %d cos %d\n", 3559 "allocating tx memory of fp %d cos %d\n",
@@ -3582,7 +3636,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3582#ifdef BCM_CNIC 3636#ifdef BCM_CNIC
3583 if (!NO_FCOE(bp)) 3637 if (!NO_FCOE(bp))
3584 /* FCoE */ 3638 /* FCoE */
3585 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX)) 3639 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3586 /* we will fail load process instead of mark 3640 /* we will fail load process instead of mark
3587 * NO_FCOE_FLAG 3641 * NO_FCOE_FLAG
3588 */ 3642 */
@@ -3607,7 +3661,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3607 */ 3661 */
3608 3662
3609 /* move FCoE fp even NO_FCOE_FLAG is on */ 3663 /* move FCoE fp even NO_FCOE_FLAG is on */
3610 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta); 3664 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
3611#endif 3665#endif
3612 bp->num_queues -= delta; 3666 bp->num_queues -= delta;
3613 BNX2X_ERR("Adjusted num of queues from %d to %d\n", 3667 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
@@ -3619,7 +3673,11 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3619 3673
3620void bnx2x_free_mem_bp(struct bnx2x *bp) 3674void bnx2x_free_mem_bp(struct bnx2x *bp)
3621{ 3675{
3676 kfree(bp->fp->tpa_info);
3622 kfree(bp->fp); 3677 kfree(bp->fp);
3678 kfree(bp->sp_objs);
3679 kfree(bp->fp_stats);
3680 kfree(bp->bnx2x_txq);
3623 kfree(bp->msix_table); 3681 kfree(bp->msix_table);
3624 kfree(bp->ilt); 3682 kfree(bp->ilt);
3625} 3683}
@@ -3630,6 +3688,8 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3630 struct msix_entry *tbl; 3688 struct msix_entry *tbl;
3631 struct bnx2x_ilt *ilt; 3689 struct bnx2x_ilt *ilt;
3632 int msix_table_size = 0; 3690 int msix_table_size = 0;
3691 int fp_array_size;
3692 int i;
3633 3693
3634 /* 3694 /*
3635 * The biggest MSI-X table we might need is as a maximum number of fast 3695 * The biggest MSI-X table we might need is as a maximum number of fast
@@ -3638,12 +3698,44 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3638 msix_table_size = bp->igu_sb_cnt + 1; 3698 msix_table_size = bp->igu_sb_cnt + 1;
3639 3699
3640 /* fp array: RSS plus CNIC related L2 queues */ 3700 /* fp array: RSS plus CNIC related L2 queues */
3641 fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE, 3701 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE;
3642 sizeof(*fp), GFP_KERNEL); 3702 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
3703
3704 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
3643 if (!fp) 3705 if (!fp)
3644 goto alloc_err; 3706 goto alloc_err;
3707 for (i = 0; i < fp_array_size; i++) {
3708 fp[i].tpa_info =
3709 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
3710 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
3711 if (!(fp[i].tpa_info))
3712 goto alloc_err;
3713 }
3714
3645 bp->fp = fp; 3715 bp->fp = fp;
3646 3716
3717 /* allocate sp objs */
3718 bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
3719 GFP_KERNEL);
3720 if (!bp->sp_objs)
3721 goto alloc_err;
3722
3723 /* allocate fp_stats */
3724 bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
3725 GFP_KERNEL);
3726 if (!bp->fp_stats)
3727 goto alloc_err;
3728
3729 /* Allocate memory for the transmission queues array */
3730 bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS;
3731#ifdef BCM_CNIC
3732 bp->bnx2x_txq_size++;
3733#endif
3734 bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size,
3735 sizeof(struct bnx2x_fp_txdata), GFP_KERNEL);
3736 if (!bp->bnx2x_txq)
3737 goto alloc_err;
3738
3647 /* msix table */ 3739 /* msix table */
3648 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL); 3740 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
3649 if (!tbl) 3741 if (!tbl)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 7cd99b75347..dfa757e7429 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -29,6 +29,7 @@
29extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ 29extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
30 30
31extern int num_queues; 31extern int num_queues;
32extern int int_mode;
32 33
33/************************ Macros ********************************/ 34/************************ Macros ********************************/
34#define BNX2X_PCI_FREE(x, y, size) \ 35#define BNX2X_PCI_FREE(x, y, size) \
@@ -89,12 +90,12 @@ void bnx2x_send_unload_done(struct bnx2x *bp);
89 * bnx2x_config_rss_pf - configure RSS parameters in a PF. 90 * bnx2x_config_rss_pf - configure RSS parameters in a PF.
90 * 91 *
91 * @bp: driver handle 92 * @bp: driver handle
92 * @rss_obj RSS object to use 93 * @rss_obj: RSS object to use
93 * @ind_table: indirection table to configure 94 * @ind_table: indirection table to configure
94 * @config_hash: re-configure RSS hash keys configuration 95 * @config_hash: re-configure RSS hash keys configuration
95 */ 96 */
96int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, 97int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
97 u8 *ind_table, bool config_hash); 98 bool config_hash);
98 99
99/** 100/**
100 * bnx2x__init_func_obj - init function object 101 * bnx2x__init_func_obj - init function object
@@ -244,6 +245,14 @@ int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
244 * @bp: driver handle 245 * @bp: driver handle
245 */ 246 */
246void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); 247void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
248
249/**
250 * bnx2x_setup_cnic_info - provides cnic with updated info
251 *
252 * @bp: driver handle
253 */
254void bnx2x_setup_cnic_info(struct bnx2x *bp);
255
247#endif 256#endif
248 257
249/** 258/**
@@ -409,7 +418,7 @@ void bnx2x_ilt_set_info(struct bnx2x *bp);
409 * 418 *
410 * @bp: driver handle 419 * @bp: driver handle
411 */ 420 */
412void bnx2x_dcbx_init(struct bnx2x *bp); 421void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem);
413 422
414/** 423/**
415 * bnx2x_set_power_state - set power state to the requested value. 424 * bnx2x_set_power_state - set power state to the requested value.
@@ -487,7 +496,7 @@ void bnx2x_netif_start(struct bnx2x *bp);
487 * fills msix_table, requests vectors, updates num_queues 496 * fills msix_table, requests vectors, updates num_queues
488 * according to number of available vectors. 497 * according to number of available vectors.
489 */ 498 */
490int __devinit bnx2x_enable_msix(struct bnx2x *bp); 499int bnx2x_enable_msix(struct bnx2x *bp);
491 500
492/** 501/**
493 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly 502 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
@@ -728,7 +737,7 @@ static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
728{ 737{
729 u8 cos; 738 u8 cos;
730 for_each_cos_in_tx_queue(fp, cos) 739 for_each_cos_in_tx_queue(fp, cos)
731 if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) 740 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
732 return true; 741 return true;
733 return false; 742 return false;
734} 743}
@@ -780,8 +789,10 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
780{ 789{
781 int i; 790 int i;
782 791
792 bp->num_napi_queues = bp->num_queues;
793
783 /* Add NAPI objects */ 794 /* Add NAPI objects */
784 for_each_rx_queue(bp, i) 795 for_each_napi_rx_queue(bp, i)
785 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 796 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
786 bnx2x_poll, BNX2X_NAPI_WEIGHT); 797 bnx2x_poll, BNX2X_NAPI_WEIGHT);
787} 798}
@@ -790,10 +801,12 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
790{ 801{
791 int i; 802 int i;
792 803
793 for_each_rx_queue(bp, i) 804 for_each_napi_rx_queue(bp, i)
794 netif_napi_del(&bnx2x_fp(bp, i, napi)); 805 netif_napi_del(&bnx2x_fp(bp, i, napi));
795} 806}
796 807
808void bnx2x_set_int_mode(struct bnx2x *bp);
809
797static inline void bnx2x_disable_msi(struct bnx2x *bp) 810static inline void bnx2x_disable_msi(struct bnx2x *bp)
798{ 811{
799 if (bp->flags & USING_MSIX_FLAG) { 812 if (bp->flags & USING_MSIX_FLAG) {
@@ -809,7 +822,8 @@ static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
809{ 822{
810 return num_queues ? 823 return num_queues ?
811 min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) : 824 min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
812 min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp)); 825 min_t(int, netif_get_num_default_rss_queues(),
826 BNX2X_MAX_QUEUES(bp));
813} 827}
814 828
815static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) 829static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
@@ -865,11 +879,9 @@ static inline int func_by_vn(struct bnx2x *bp, int vn)
865 return 2 * vn + BP_PORT(bp); 879 return 2 * vn + BP_PORT(bp);
866} 880}
867 881
868static inline int bnx2x_config_rss_eth(struct bnx2x *bp, u8 *ind_table, 882static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash)
869 bool config_hash)
870{ 883{
871 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, ind_table, 884 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash);
872 config_hash);
873} 885}
874 886
875/** 887/**
@@ -975,8 +987,8 @@ static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
975 struct bnx2x *bp = fp->bp; 987 struct bnx2x *bp = fp->bp;
976 988
977 /* Configure classification DBs */ 989 /* Configure classification DBs */
978 bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid, 990 bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id,
979 BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), 991 fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
980 bnx2x_sp_mapping(bp, mac_rdata), 992 bnx2x_sp_mapping(bp, mac_rdata),
981 BNX2X_FILTER_MAC_PENDING, 993 BNX2X_FILTER_MAC_PENDING,
982 &bp->sp_state, obj_type, 994 &bp->sp_state, obj_type,
@@ -1068,12 +1080,14 @@ static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
1068} 1080}
1069 1081
1070static inline void bnx2x_init_txdata(struct bnx2x *bp, 1082static inline void bnx2x_init_txdata(struct bnx2x *bp,
1071 struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index, 1083 struct bnx2x_fp_txdata *txdata, u32 cid,
1072 __le16 *tx_cons_sb) 1084 int txq_index, __le16 *tx_cons_sb,
1085 struct bnx2x_fastpath *fp)
1073{ 1086{
1074 txdata->cid = cid; 1087 txdata->cid = cid;
1075 txdata->txq_index = txq_index; 1088 txdata->txq_index = txq_index;
1076 txdata->tx_cons_sb = tx_cons_sb; 1089 txdata->tx_cons_sb = tx_cons_sb;
1090 txdata->parent_fp = fp;
1077 1091
1078 DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n", 1092 DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n",
1079 txdata->cid, txdata->txq_index); 1093 txdata->cid, txdata->txq_index);
@@ -1107,18 +1121,13 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
1107 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); 1121 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
1108 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, 1122 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
1109 BNX2X_FCOE_ETH_CL_ID_IDX); 1123 BNX2X_FCOE_ETH_CL_ID_IDX);
1110 /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than 1124 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
1111 * 16 ETH clients per function when CNIC is enabled!
1112 *
1113 * Fix it ASAP!!!
1114 */
1115 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
1116 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; 1125 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
1117 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; 1126 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
1118 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; 1127 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
1119 1128 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
1120 bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]), 1129 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
1121 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX); 1130 fp);
1122 1131
1123 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); 1132 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
1124 1133
@@ -1135,8 +1144,8 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
1135 /* No multi-CoS for FCoE L2 client */ 1144 /* No multi-CoS for FCoE L2 client */
1136 BUG_ON(fp->max_cos != 1); 1145 BUG_ON(fp->max_cos != 1);
1137 1146
1138 bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1, 1147 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
1139 BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 1148 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
1140 bnx2x_sp_mapping(bp, q_rdata), q_type); 1149 bnx2x_sp_mapping(bp, q_rdata), q_type);
1141 1150
1142 DP(NETIF_MSG_IFUP, 1151 DP(NETIF_MSG_IFUP,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 4f9244bd753..8a73374e52a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -972,23 +972,26 @@ void bnx2x_dcbx_init_params(struct bnx2x *bp)
972 bp->dcbx_config_params.admin_default_priority = 0; 972 bp->dcbx_config_params.admin_default_priority = 0;
973} 973}
974 974
975void bnx2x_dcbx_init(struct bnx2x *bp) 975void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem)
976{ 976{
977 u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE; 977 u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE;
978 978
979 /* only PMF can send ADMIN msg to MFW in old MFW versions */
980 if ((!bp->port.pmf) && (!(bp->flags & BC_SUPPORTS_DCBX_MSG_NON_PMF)))
981 return;
982
979 if (bp->dcbx_enabled <= 0) 983 if (bp->dcbx_enabled <= 0)
980 return; 984 return;
981 985
982 /* validate: 986 /* validate:
983 * chip of good for dcbx version, 987 * chip of good for dcbx version,
984 * dcb is wanted 988 * dcb is wanted
985 * the function is pmf
986 * shmem2 contains DCBX support fields 989 * shmem2 contains DCBX support fields
987 */ 990 */
988 DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n", 991 DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n",
989 bp->dcb_state, bp->port.pmf); 992 bp->dcb_state, bp->port.pmf);
990 993
991 if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf && 994 if (bp->dcb_state == BNX2X_DCB_STATE_ON &&
992 SHMEM2_HAS(bp, dcbx_lldp_params_offset)) { 995 SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
993 dcbx_lldp_params_offset = 996 dcbx_lldp_params_offset =
994 SHMEM2_RD(bp, dcbx_lldp_params_offset); 997 SHMEM2_RD(bp, dcbx_lldp_params_offset);
@@ -999,12 +1002,23 @@ void bnx2x_dcbx_init(struct bnx2x *bp)
999 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0); 1002 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
1000 1003
1001 if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) { 1004 if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
1002 bnx2x_dcbx_admin_mib_updated_params(bp, 1005 /* need HW lock to avoid scenario of two drivers
1003 dcbx_lldp_params_offset); 1006 * writing in parallel to shmem
1007 */
1008 bnx2x_acquire_hw_lock(bp,
1009 HW_LOCK_RESOURCE_DCBX_ADMIN_MIB);
1010 if (update_shmem)
1011 bnx2x_dcbx_admin_mib_updated_params(bp,
1012 dcbx_lldp_params_offset);
1004 1013
1005 /* Let HW start negotiation */ 1014 /* Let HW start negotiation */
1006 bnx2x_fw_command(bp, 1015 bnx2x_fw_command(bp,
1007 DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0); 1016 DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
1017 /* release HW lock only after MFW acks that it finished
1018 * reading values from shmem
1019 */
1020 bnx2x_release_hw_lock(bp,
1021 HW_LOCK_RESOURCE_DCBX_ADMIN_MIB);
1008 } 1022 }
1009 } 1023 }
1010} 1024}
@@ -2063,10 +2077,8 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
2063 "Handling parity error recovery. Try again later\n"); 2077 "Handling parity error recovery. Try again later\n");
2064 return 1; 2078 return 1;
2065 } 2079 }
2066 if (netif_running(bp->dev)) { 2080 if (netif_running(bp->dev))
2067 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 2081 bnx2x_dcbx_init(bp, true);
2068 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2069 }
2070 DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc); 2082 DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc);
2071 if (rc) 2083 if (rc)
2072 return 1; 2084 return 1;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index ddc18ee5c5a..bff31290198 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -177,6 +177,8 @@ static const struct {
177 4, STATS_FLAGS_FUNC, "recoverable_errors" }, 177 4, STATS_FLAGS_FUNC, "recoverable_errors" },
178 { STATS_OFFSET32(unrecoverable_error), 178 { STATS_OFFSET32(unrecoverable_error),
179 4, STATS_FLAGS_FUNC, "unrecoverable_errors" }, 179 4, STATS_FLAGS_FUNC, "unrecoverable_errors" },
180 { STATS_OFFSET32(eee_tx_lpi),
181 4, STATS_FLAGS_PORT, "Tx LPI entry count"}
180}; 182};
181 183
182#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr) 184#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
@@ -185,7 +187,8 @@ static int bnx2x_get_port_type(struct bnx2x *bp)
185 int port_type; 187 int port_type;
186 u32 phy_idx = bnx2x_get_cur_phy_idx(bp); 188 u32 phy_idx = bnx2x_get_cur_phy_idx(bp);
187 switch (bp->link_params.phy[phy_idx].media_type) { 189 switch (bp->link_params.phy[phy_idx].media_type) {
188 case ETH_PHY_SFP_FIBER: 190 case ETH_PHY_SFPP_10G_FIBER:
191 case ETH_PHY_SFP_1G_FIBER:
189 case ETH_PHY_XFP_FIBER: 192 case ETH_PHY_XFP_FIBER:
190 case ETH_PHY_KR: 193 case ETH_PHY_KR:
191 case ETH_PHY_CX4: 194 case ETH_PHY_CX4:
@@ -218,6 +221,11 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
218 (bp->port.supported[cfg_idx ^ 1] & 221 (bp->port.supported[cfg_idx ^ 1] &
219 (SUPPORTED_TP | SUPPORTED_FIBRE)); 222 (SUPPORTED_TP | SUPPORTED_FIBRE));
220 cmd->advertising = bp->port.advertising[cfg_idx]; 223 cmd->advertising = bp->port.advertising[cfg_idx];
224 if (bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type ==
225 ETH_PHY_SFP_1G_FIBER) {
226 cmd->supported &= ~(SUPPORTED_10000baseT_Full);
227 cmd->advertising &= ~(ADVERTISED_10000baseT_Full);
228 }
221 229
222 if ((bp->state == BNX2X_STATE_OPEN) && (bp->link_vars.link_up)) { 230 if ((bp->state == BNX2X_STATE_OPEN) && (bp->link_vars.link_up)) {
223 if (!(bp->flags & MF_FUNC_DIS)) { 231 if (!(bp->flags & MF_FUNC_DIS)) {
@@ -293,7 +301,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
293{ 301{
294 struct bnx2x *bp = netdev_priv(dev); 302 struct bnx2x *bp = netdev_priv(dev);
295 u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config; 303 u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
296 u32 speed; 304 u32 speed, phy_idx;
297 305
298 if (IS_MF_SD(bp)) 306 if (IS_MF_SD(bp))
299 return 0; 307 return 0;
@@ -548,9 +556,11 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
548 "10G half not supported\n"); 556 "10G half not supported\n");
549 return -EINVAL; 557 return -EINVAL;
550 } 558 }
551 559 phy_idx = bnx2x_get_cur_phy_idx(bp);
552 if (!(bp->port.supported[cfg_idx] 560 if (!(bp->port.supported[cfg_idx]
553 & SUPPORTED_10000baseT_Full)) { 561 & SUPPORTED_10000baseT_Full) ||
562 (bp->link_params.phy[phy_idx].media_type ==
563 ETH_PHY_SFP_1G_FIBER)) {
554 DP(BNX2X_MSG_ETHTOOL, 564 DP(BNX2X_MSG_ETHTOOL,
555 "10G full not supported\n"); 565 "10G full not supported\n");
556 return -EINVAL; 566 return -EINVAL;
@@ -824,7 +834,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
824 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver); 834 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
825 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); 835 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
826 info->n_stats = BNX2X_NUM_STATS; 836 info->n_stats = BNX2X_NUM_STATS;
827 info->testinfo_len = BNX2X_NUM_TESTS; 837 info->testinfo_len = BNX2X_NUM_TESTS(bp);
828 info->eedump_len = bp->common.flash_size; 838 info->eedump_len = bp->common.flash_size;
829 info->regdump_len = bnx2x_get_regs_len(dev); 839 info->regdump_len = bnx2x_get_regs_len(dev);
830} 840}
@@ -1150,6 +1160,65 @@ static int bnx2x_get_eeprom(struct net_device *dev,
1150 return rc; 1160 return rc;
1151} 1161}
1152 1162
1163static int bnx2x_get_module_eeprom(struct net_device *dev,
1164 struct ethtool_eeprom *ee,
1165 u8 *data)
1166{
1167 struct bnx2x *bp = netdev_priv(dev);
1168 int rc = 0, phy_idx;
1169 u8 *user_data = data;
1170 int remaining_len = ee->len, xfer_size;
1171 unsigned int page_off = ee->offset;
1172
1173 if (!netif_running(dev)) {
1174 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1175 "cannot access eeprom when the interface is down\n");
1176 return -EAGAIN;
1177 }
1178
1179 phy_idx = bnx2x_get_cur_phy_idx(bp);
1180 bnx2x_acquire_phy_lock(bp);
1181 while (!rc && remaining_len > 0) {
1182 xfer_size = (remaining_len > SFP_EEPROM_PAGE_SIZE) ?
1183 SFP_EEPROM_PAGE_SIZE : remaining_len;
1184 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
1185 &bp->link_params,
1186 page_off,
1187 xfer_size,
1188 user_data);
1189 remaining_len -= xfer_size;
1190 user_data += xfer_size;
1191 page_off += xfer_size;
1192 }
1193
1194 bnx2x_release_phy_lock(bp);
1195 return rc;
1196}
1197
1198static int bnx2x_get_module_info(struct net_device *dev,
1199 struct ethtool_modinfo *modinfo)
1200{
1201 struct bnx2x *bp = netdev_priv(dev);
1202 int phy_idx;
1203 if (!netif_running(dev)) {
1204 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1205 "cannot access eeprom when the interface is down\n");
1206 return -EAGAIN;
1207 }
1208
1209 phy_idx = bnx2x_get_cur_phy_idx(bp);
1210 switch (bp->link_params.phy[phy_idx].media_type) {
1211 case ETH_PHY_SFPP_10G_FIBER:
1212 case ETH_PHY_SFP_1G_FIBER:
1213 case ETH_PHY_DA_TWINAX:
1214 modinfo->type = ETH_MODULE_SFF_8079;
1215 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
1216 return 0;
1217 default:
1218 return -EOPNOTSUPP;
1219 }
1220}
1221
1153static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, 1222static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
1154 u32 cmd_flags) 1223 u32 cmd_flags)
1155{ 1224{
@@ -1531,18 +1600,146 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
1531 return 0; 1600 return 0;
1532} 1601}
1533 1602
1534static const struct { 1603static char *bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF] = {
1535 char string[ETH_GSTRING_LEN]; 1604 "register_test (offline) ",
1536} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = { 1605 "memory_test (offline) ",
1537 { "register_test (offline)" }, 1606 "int_loopback_test (offline)",
1538 { "memory_test (offline)" }, 1607 "ext_loopback_test (offline)",
1539 { "loopback_test (offline)" }, 1608 "nvram_test (online) ",
1540 { "nvram_test (online)" }, 1609 "interrupt_test (online) ",
1541 { "interrupt_test (online)" }, 1610 "link_test (online) "
1542 { "link_test (online)" },
1543 { "idle check (online)" }
1544}; 1611};
1545 1612
1613static u32 bnx2x_eee_to_adv(u32 eee_adv)
1614{
1615 u32 modes = 0;
1616
1617 if (eee_adv & SHMEM_EEE_100M_ADV)
1618 modes |= ADVERTISED_100baseT_Full;
1619 if (eee_adv & SHMEM_EEE_1G_ADV)
1620 modes |= ADVERTISED_1000baseT_Full;
1621 if (eee_adv & SHMEM_EEE_10G_ADV)
1622 modes |= ADVERTISED_10000baseT_Full;
1623
1624 return modes;
1625}
1626
1627static u32 bnx2x_adv_to_eee(u32 modes, u32 shift)
1628{
1629 u32 eee_adv = 0;
1630 if (modes & ADVERTISED_100baseT_Full)
1631 eee_adv |= SHMEM_EEE_100M_ADV;
1632 if (modes & ADVERTISED_1000baseT_Full)
1633 eee_adv |= SHMEM_EEE_1G_ADV;
1634 if (modes & ADVERTISED_10000baseT_Full)
1635 eee_adv |= SHMEM_EEE_10G_ADV;
1636
1637 return eee_adv << shift;
1638}
1639
1640static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
1641{
1642 struct bnx2x *bp = netdev_priv(dev);
1643 u32 eee_cfg;
1644
1645 if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
1646 DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
1647 return -EOPNOTSUPP;
1648 }
1649
1650 eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]);
1651
1652 edata->supported =
1653 bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
1654 SHMEM_EEE_SUPPORTED_SHIFT);
1655
1656 edata->advertised =
1657 bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
1658 SHMEM_EEE_ADV_STATUS_SHIFT);
1659 edata->lp_advertised =
1660 bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
1661 SHMEM_EEE_LP_ADV_STATUS_SHIFT);
1662
1663 /* SHMEM value is in 16u units --> Convert to 1u units. */
1664 edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4;
1665
1666 edata->eee_enabled = (eee_cfg & SHMEM_EEE_REQUESTED_BIT) ? 1 : 0;
1667 edata->eee_active = (eee_cfg & SHMEM_EEE_ACTIVE_BIT) ? 1 : 0;
1668 edata->tx_lpi_enabled = (eee_cfg & SHMEM_EEE_LPI_REQUESTED_BIT) ? 1 : 0;
1669
1670 return 0;
1671}
1672
1673static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
1674{
1675 struct bnx2x *bp = netdev_priv(dev);
1676 u32 eee_cfg;
1677 u32 advertised;
1678
1679 if (IS_MF(bp))
1680 return 0;
1681
1682 if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
1683 DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
1684 return -EOPNOTSUPP;
1685 }
1686
1687 eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]);
1688
1689 if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) {
1690 DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n");
1691 return -EOPNOTSUPP;
1692 }
1693
1694 advertised = bnx2x_adv_to_eee(edata->advertised,
1695 SHMEM_EEE_ADV_STATUS_SHIFT);
1696 if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
1697 DP(BNX2X_MSG_ETHTOOL,
1698 "Direct manipulation of EEE advertisment is not supported\n");
1699 return -EINVAL;
1700 }
1701
1702 if (edata->tx_lpi_timer > EEE_MODE_TIMER_MASK) {
1703 DP(BNX2X_MSG_ETHTOOL,
1704 "Maximal Tx Lpi timer supported is %x(u)\n",
1705 EEE_MODE_TIMER_MASK);
1706 return -EINVAL;
1707 }
1708 if (edata->tx_lpi_enabled &&
1709 (edata->tx_lpi_timer < EEE_MODE_NVRAM_AGGRESSIVE_TIME)) {
1710 DP(BNX2X_MSG_ETHTOOL,
1711 "Minimal Tx Lpi timer supported is %d(u)\n",
1712 EEE_MODE_NVRAM_AGGRESSIVE_TIME);
1713 return -EINVAL;
1714 }
1715
1716 /* All is well; Apply changes*/
1717 if (edata->eee_enabled)
1718 bp->link_params.eee_mode |= EEE_MODE_ADV_LPI;
1719 else
1720 bp->link_params.eee_mode &= ~EEE_MODE_ADV_LPI;
1721
1722 if (edata->tx_lpi_enabled)
1723 bp->link_params.eee_mode |= EEE_MODE_ENABLE_LPI;
1724 else
1725 bp->link_params.eee_mode &= ~EEE_MODE_ENABLE_LPI;
1726
1727 bp->link_params.eee_mode &= ~EEE_MODE_TIMER_MASK;
1728 bp->link_params.eee_mode |= (edata->tx_lpi_timer &
1729 EEE_MODE_TIMER_MASK) |
1730 EEE_MODE_OVERRIDE_NVRAM |
1731 EEE_MODE_OUTPUT_TIME;
1732
1733 /* Restart link to propogate changes */
1734 if (netif_running(dev)) {
1735 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1736 bnx2x_link_set(bp);
1737 }
1738
1739 return 0;
1740}
1741
1742
1546enum { 1743enum {
1547 BNX2X_CHIP_E1_OFST = 0, 1744 BNX2X_CHIP_E1_OFST = 0,
1548 BNX2X_CHIP_E1H_OFST, 1745 BNX2X_CHIP_E1H_OFST,
@@ -1811,6 +2008,14 @@ static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
1811 2008
1812 if (cnt <= 0 && bnx2x_link_test(bp, is_serdes)) 2009 if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
1813 DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n"); 2010 DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n");
2011
2012 cnt = 1400;
2013 while (!bp->link_vars.link_up && cnt--)
2014 msleep(20);
2015
2016 if (cnt <= 0 && !bp->link_vars.link_up)
2017 DP(BNX2X_MSG_ETHTOOL,
2018 "Timeout waiting for link init\n");
1814 } 2019 }
1815} 2020}
1816 2021
@@ -1821,7 +2026,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
1821 unsigned char *packet; 2026 unsigned char *packet;
1822 struct bnx2x_fastpath *fp_rx = &bp->fp[0]; 2027 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
1823 struct bnx2x_fastpath *fp_tx = &bp->fp[0]; 2028 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
1824 struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0]; 2029 struct bnx2x_fp_txdata *txdata = fp_tx->txdata_ptr[0];
1825 u16 tx_start_idx, tx_idx; 2030 u16 tx_start_idx, tx_idx;
1826 u16 rx_start_idx, rx_idx; 2031 u16 rx_start_idx, rx_idx;
1827 u16 pkt_prod, bd_prod; 2032 u16 pkt_prod, bd_prod;
@@ -1836,13 +2041,16 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
1836 u16 len; 2041 u16 len;
1837 int rc = -ENODEV; 2042 int rc = -ENODEV;
1838 u8 *data; 2043 u8 *data;
1839 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); 2044 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev,
2045 txdata->txq_index);
1840 2046
1841 /* check the loopback mode */ 2047 /* check the loopback mode */
1842 switch (loopback_mode) { 2048 switch (loopback_mode) {
1843 case BNX2X_PHY_LOOPBACK: 2049 case BNX2X_PHY_LOOPBACK:
1844 if (bp->link_params.loopback_mode != LOOPBACK_XGXS) 2050 if (bp->link_params.loopback_mode != LOOPBACK_XGXS) {
2051 DP(BNX2X_MSG_ETHTOOL, "PHY loopback not supported\n");
1845 return -EINVAL; 2052 return -EINVAL;
2053 }
1846 break; 2054 break;
1847 case BNX2X_MAC_LOOPBACK: 2055 case BNX2X_MAC_LOOPBACK:
1848 if (CHIP_IS_E3(bp)) { 2056 if (CHIP_IS_E3(bp)) {
@@ -1859,6 +2067,13 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
1859 2067
1860 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2068 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1861 break; 2069 break;
2070 case BNX2X_EXT_LOOPBACK:
2071 if (bp->link_params.loopback_mode != LOOPBACK_EXT) {
2072 DP(BNX2X_MSG_ETHTOOL,
2073 "Can't configure external loopback\n");
2074 return -EINVAL;
2075 }
2076 break;
1862 default: 2077 default:
1863 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); 2078 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
1864 return -EINVAL; 2079 return -EINVAL;
@@ -2030,6 +2245,38 @@ static int bnx2x_test_loopback(struct bnx2x *bp)
2030 return rc; 2245 return rc;
2031} 2246}
2032 2247
2248static int bnx2x_test_ext_loopback(struct bnx2x *bp)
2249{
2250 int rc;
2251 u8 is_serdes =
2252 (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
2253
2254 if (BP_NOMCP(bp))
2255 return -ENODEV;
2256
2257 if (!netif_running(bp->dev))
2258 return BNX2X_EXT_LOOPBACK_FAILED;
2259
2260 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2261 rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT);
2262 if (rc) {
2263 DP(BNX2X_MSG_ETHTOOL,
2264 "Can't perform self-test, nic_load (for external lb) failed\n");
2265 return -ENODEV;
2266 }
2267 bnx2x_wait_for_link(bp, 1, is_serdes);
2268
2269 bnx2x_netif_stop(bp, 1);
2270
2271 rc = bnx2x_run_loopback(bp, BNX2X_EXT_LOOPBACK);
2272 if (rc)
2273 DP(BNX2X_MSG_ETHTOOL, "EXT loopback failed (res %d)\n", rc);
2274
2275 bnx2x_netif_start(bp);
2276
2277 return rc;
2278}
2279
2033#define CRC32_RESIDUAL 0xdebb20e3 2280#define CRC32_RESIDUAL 0xdebb20e3
2034 2281
2035static int bnx2x_test_nvram(struct bnx2x *bp) 2282static int bnx2x_test_nvram(struct bnx2x *bp)
@@ -2112,7 +2359,7 @@ static int bnx2x_test_intr(struct bnx2x *bp)
2112 return -ENODEV; 2359 return -ENODEV;
2113 } 2360 }
2114 2361
2115 params.q_obj = &bp->fp->q_obj; 2362 params.q_obj = &bp->sp_objs->q_obj;
2116 params.cmd = BNX2X_Q_CMD_EMPTY; 2363 params.cmd = BNX2X_Q_CMD_EMPTY;
2117 2364
2118 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags); 2365 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
@@ -2125,24 +2372,31 @@ static void bnx2x_self_test(struct net_device *dev,
2125{ 2372{
2126 struct bnx2x *bp = netdev_priv(dev); 2373 struct bnx2x *bp = netdev_priv(dev);
2127 u8 is_serdes; 2374 u8 is_serdes;
2375 int rc;
2376
2128 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 2377 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2129 netdev_err(bp->dev, 2378 netdev_err(bp->dev,
2130 "Handling parity error recovery. Try again later\n"); 2379 "Handling parity error recovery. Try again later\n");
2131 etest->flags |= ETH_TEST_FL_FAILED; 2380 etest->flags |= ETH_TEST_FL_FAILED;
2132 return; 2381 return;
2133 } 2382 }
2383 DP(BNX2X_MSG_ETHTOOL,
2384 "Self-test command parameters: offline = %d, external_lb = %d\n",
2385 (etest->flags & ETH_TEST_FL_OFFLINE),
2386 (etest->flags & ETH_TEST_FL_EXTERNAL_LB)>>2);
2134 2387
2135 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS); 2388 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
2136 2389
2137 if (!netif_running(dev)) 2390 if (!netif_running(dev)) {
2391 DP(BNX2X_MSG_ETHTOOL,
2392 "Can't perform self-test when interface is down\n");
2138 return; 2393 return;
2394 }
2139 2395
2140 /* offline tests are not supported in MF mode */
2141 if (IS_MF(bp))
2142 etest->flags &= ~ETH_TEST_FL_OFFLINE;
2143 is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0; 2396 is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
2144 2397
2145 if (etest->flags & ETH_TEST_FL_OFFLINE) { 2398 /* offline tests are not supported in MF mode */
2399 if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) {
2146 int port = BP_PORT(bp); 2400 int port = BP_PORT(bp);
2147 u32 val; 2401 u32 val;
2148 u8 link_up; 2402 u8 link_up;
@@ -2155,7 +2409,14 @@ static void bnx2x_self_test(struct net_device *dev,
2155 link_up = bp->link_vars.link_up; 2409 link_up = bp->link_vars.link_up;
2156 2410
2157 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 2411 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2158 bnx2x_nic_load(bp, LOAD_DIAG); 2412 rc = bnx2x_nic_load(bp, LOAD_DIAG);
2413 if (rc) {
2414 etest->flags |= ETH_TEST_FL_FAILED;
2415 DP(BNX2X_MSG_ETHTOOL,
2416 "Can't perform self-test, nic_load (for offline) failed\n");
2417 return;
2418 }
2419
2159 /* wait until link state is restored */ 2420 /* wait until link state is restored */
2160 bnx2x_wait_for_link(bp, 1, is_serdes); 2421 bnx2x_wait_for_link(bp, 1, is_serdes);
2161 2422
@@ -2168,30 +2429,51 @@ static void bnx2x_self_test(struct net_device *dev,
2168 etest->flags |= ETH_TEST_FL_FAILED; 2429 etest->flags |= ETH_TEST_FL_FAILED;
2169 } 2430 }
2170 2431
2171 buf[2] = bnx2x_test_loopback(bp); 2432 buf[2] = bnx2x_test_loopback(bp); /* internal LB */
2172 if (buf[2] != 0) 2433 if (buf[2] != 0)
2173 etest->flags |= ETH_TEST_FL_FAILED; 2434 etest->flags |= ETH_TEST_FL_FAILED;
2174 2435
2436 if (etest->flags & ETH_TEST_FL_EXTERNAL_LB) {
2437 buf[3] = bnx2x_test_ext_loopback(bp); /* external LB */
2438 if (buf[3] != 0)
2439 etest->flags |= ETH_TEST_FL_FAILED;
2440 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
2441 }
2442
2175 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 2443 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2176 2444
2177 /* restore input for TX port IF */ 2445 /* restore input for TX port IF */
2178 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val); 2446 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
2179 2447 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2180 bnx2x_nic_load(bp, LOAD_NORMAL); 2448 if (rc) {
2449 etest->flags |= ETH_TEST_FL_FAILED;
2450 DP(BNX2X_MSG_ETHTOOL,
2451 "Can't perform self-test, nic_load (for online) failed\n");
2452 return;
2453 }
2181 /* wait until link state is restored */ 2454 /* wait until link state is restored */
2182 bnx2x_wait_for_link(bp, link_up, is_serdes); 2455 bnx2x_wait_for_link(bp, link_up, is_serdes);
2183 } 2456 }
2184 if (bnx2x_test_nvram(bp) != 0) { 2457 if (bnx2x_test_nvram(bp) != 0) {
2185 buf[3] = 1; 2458 if (!IS_MF(bp))
2459 buf[4] = 1;
2460 else
2461 buf[0] = 1;
2186 etest->flags |= ETH_TEST_FL_FAILED; 2462 etest->flags |= ETH_TEST_FL_FAILED;
2187 } 2463 }
2188 if (bnx2x_test_intr(bp) != 0) { 2464 if (bnx2x_test_intr(bp) != 0) {
2189 buf[4] = 1; 2465 if (!IS_MF(bp))
2466 buf[5] = 1;
2467 else
2468 buf[1] = 1;
2190 etest->flags |= ETH_TEST_FL_FAILED; 2469 etest->flags |= ETH_TEST_FL_FAILED;
2191 } 2470 }
2192 2471
2193 if (bnx2x_link_test(bp, is_serdes) != 0) { 2472 if (bnx2x_link_test(bp, is_serdes) != 0) {
2194 buf[5] = 1; 2473 if (!IS_MF(bp))
2474 buf[6] = 1;
2475 else
2476 buf[2] = 1;
2195 etest->flags |= ETH_TEST_FL_FAILED; 2477 etest->flags |= ETH_TEST_FL_FAILED;
2196 } 2478 }
2197 2479
@@ -2236,7 +2518,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
2236 return num_stats; 2518 return num_stats;
2237 2519
2238 case ETH_SS_TEST: 2520 case ETH_SS_TEST:
2239 return BNX2X_NUM_TESTS; 2521 return BNX2X_NUM_TESTS(bp);
2240 2522
2241 default: 2523 default:
2242 return -EINVAL; 2524 return -EINVAL;
@@ -2246,7 +2528,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
2246static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 2528static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
2247{ 2529{
2248 struct bnx2x *bp = netdev_priv(dev); 2530 struct bnx2x *bp = netdev_priv(dev);
2249 int i, j, k; 2531 int i, j, k, offset, start;
2250 char queue_name[MAX_QUEUE_NAME_LEN+1]; 2532 char queue_name[MAX_QUEUE_NAME_LEN+1];
2251 2533
2252 switch (stringset) { 2534 switch (stringset) {
@@ -2277,7 +2559,17 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
2277 break; 2559 break;
2278 2560
2279 case ETH_SS_TEST: 2561 case ETH_SS_TEST:
2280 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr)); 2562 /* First 4 tests cannot be done in MF mode */
2563 if (!IS_MF(bp))
2564 start = 0;
2565 else
2566 start = 4;
2567 for (i = 0, j = start; j < (start + BNX2X_NUM_TESTS(bp));
2568 i++, j++) {
2569 offset = sprintf(buf+32*i, "%s",
2570 bnx2x_tests_str_arr[j]);
2571 *(buf+offset) = '\0';
2572 }
2281 break; 2573 break;
2282 } 2574 }
2283} 2575}
@@ -2291,7 +2583,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
2291 2583
2292 if (is_multi(bp)) { 2584 if (is_multi(bp)) {
2293 for_each_eth_queue(bp, i) { 2585 for_each_eth_queue(bp, i) {
2294 hw_stats = (u32 *)&bp->fp[i].eth_q_stats; 2586 hw_stats = (u32 *)&bp->fp_stats[i].eth_q_stats;
2295 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { 2587 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
2296 if (bnx2x_q_stats_arr[j].size == 0) { 2588 if (bnx2x_q_stats_arr[j].size == 0) {
2297 /* skip this counter */ 2589 /* skip this counter */
@@ -2375,6 +2667,41 @@ static int bnx2x_set_phys_id(struct net_device *dev,
2375 return 0; 2667 return 0;
2376} 2668}
2377 2669
2670static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
2671{
2672
2673 switch (info->flow_type) {
2674 case TCP_V4_FLOW:
2675 case TCP_V6_FLOW:
2676 info->data = RXH_IP_SRC | RXH_IP_DST |
2677 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2678 break;
2679 case UDP_V4_FLOW:
2680 if (bp->rss_conf_obj.udp_rss_v4)
2681 info->data = RXH_IP_SRC | RXH_IP_DST |
2682 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2683 else
2684 info->data = RXH_IP_SRC | RXH_IP_DST;
2685 break;
2686 case UDP_V6_FLOW:
2687 if (bp->rss_conf_obj.udp_rss_v6)
2688 info->data = RXH_IP_SRC | RXH_IP_DST |
2689 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2690 else
2691 info->data = RXH_IP_SRC | RXH_IP_DST;
2692 break;
2693 case IPV4_FLOW:
2694 case IPV6_FLOW:
2695 info->data = RXH_IP_SRC | RXH_IP_DST;
2696 break;
2697 default:
2698 info->data = 0;
2699 break;
2700 }
2701
2702 return 0;
2703}
2704
2378static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 2705static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2379 u32 *rules __always_unused) 2706 u32 *rules __always_unused)
2380{ 2707{
@@ -2384,7 +2711,102 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2384 case ETHTOOL_GRXRINGS: 2711 case ETHTOOL_GRXRINGS:
2385 info->data = BNX2X_NUM_ETH_QUEUES(bp); 2712 info->data = BNX2X_NUM_ETH_QUEUES(bp);
2386 return 0; 2713 return 0;
2714 case ETHTOOL_GRXFH:
2715 return bnx2x_get_rss_flags(bp, info);
2716 default:
2717 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
2718 return -EOPNOTSUPP;
2719 }
2720}
2721
2722static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
2723{
2724 int udp_rss_requested;
2725
2726 DP(BNX2X_MSG_ETHTOOL,
2727 "Set rss flags command parameters: flow type = %d, data = %llu\n",
2728 info->flow_type, info->data);
2729
2730 switch (info->flow_type) {
2731 case TCP_V4_FLOW:
2732 case TCP_V6_FLOW:
2733 /* For TCP only 4-tupple hash is supported */
2734 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
2735 RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2736 DP(BNX2X_MSG_ETHTOOL,
2737 "Command parameters not supported\n");
2738 return -EINVAL;
2739 } else {
2740 return 0;
2741 }
2387 2742
2743 case UDP_V4_FLOW:
2744 case UDP_V6_FLOW:
2745 /* For UDP either 2-tupple hash or 4-tupple hash is supported */
2746 if (info->data == (RXH_IP_SRC | RXH_IP_DST |
2747 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2748 udp_rss_requested = 1;
2749 else if (info->data == (RXH_IP_SRC | RXH_IP_DST))
2750 udp_rss_requested = 0;
2751 else
2752 return -EINVAL;
2753 if ((info->flow_type == UDP_V4_FLOW) &&
2754 (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) {
2755 bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested;
2756 DP(BNX2X_MSG_ETHTOOL,
2757 "rss re-configured, UDP 4-tupple %s\n",
2758 udp_rss_requested ? "enabled" : "disabled");
2759 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
2760 } else if ((info->flow_type == UDP_V6_FLOW) &&
2761 (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
2762 bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
2763 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
2764 DP(BNX2X_MSG_ETHTOOL,
2765 "rss re-configured, UDP 4-tupple %s\n",
2766 udp_rss_requested ? "enabled" : "disabled");
2767 } else {
2768 return 0;
2769 }
2770 case IPV4_FLOW:
2771 case IPV6_FLOW:
2772 /* For IP only 2-tupple hash is supported */
2773 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
2774 DP(BNX2X_MSG_ETHTOOL,
2775 "Command parameters not supported\n");
2776 return -EINVAL;
2777 } else {
2778 return 0;
2779 }
2780 case SCTP_V4_FLOW:
2781 case AH_ESP_V4_FLOW:
2782 case AH_V4_FLOW:
2783 case ESP_V4_FLOW:
2784 case SCTP_V6_FLOW:
2785 case AH_ESP_V6_FLOW:
2786 case AH_V6_FLOW:
2787 case ESP_V6_FLOW:
2788 case IP_USER_FLOW:
2789 case ETHER_FLOW:
2790 /* RSS is not supported for these protocols */
2791 if (info->data) {
2792 DP(BNX2X_MSG_ETHTOOL,
2793 "Command parameters not supported\n");
2794 return -EINVAL;
2795 } else {
2796 return 0;
2797 }
2798 default:
2799 return -EINVAL;
2800 }
2801}
2802
2803static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
2804{
2805 struct bnx2x *bp = netdev_priv(dev);
2806
2807 switch (info->cmd) {
2808 case ETHTOOL_SRXFH:
2809 return bnx2x_set_rss_flags(bp, info);
2388 default: 2810 default:
2389 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); 2811 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
2390 return -EOPNOTSUPP; 2812 return -EOPNOTSUPP;
@@ -2424,7 +2846,6 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
2424{ 2846{
2425 struct bnx2x *bp = netdev_priv(dev); 2847 struct bnx2x *bp = netdev_priv(dev);
2426 size_t i; 2848 size_t i;
2427 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
2428 2849
2429 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { 2850 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
2430 /* 2851 /*
@@ -2436,10 +2857,88 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
2436 * align the received table to the Client ID of the leading RSS 2857 * align the received table to the Client ID of the leading RSS
2437 * queue 2858 * queue
2438 */ 2859 */
2439 ind_table[i] = indir[i] + bp->fp->cl_id; 2860 bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
2440 } 2861 }
2441 2862
2442 return bnx2x_config_rss_eth(bp, ind_table, false); 2863 return bnx2x_config_rss_eth(bp, false);
2864}
2865
2866/**
2867 * bnx2x_get_channels - gets the number of RSS queues.
2868 *
2869 * @dev: net device
2870 * @channels: returns the number of max / current queues
2871 */
2872static void bnx2x_get_channels(struct net_device *dev,
2873 struct ethtool_channels *channels)
2874{
2875 struct bnx2x *bp = netdev_priv(dev);
2876
2877 channels->max_combined = BNX2X_MAX_RSS_COUNT(bp);
2878 channels->combined_count = BNX2X_NUM_ETH_QUEUES(bp);
2879}
2880
2881/**
2882 * bnx2x_change_num_queues - change the number of RSS queues.
2883 *
2884 * @bp: bnx2x private structure
2885 *
2886 * Re-configure interrupt mode to get the new number of MSI-X
2887 * vectors and re-add NAPI objects.
2888 */
2889static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
2890{
2891 bnx2x_del_all_napi(bp);
2892 bnx2x_disable_msi(bp);
2893 BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
2894 bnx2x_set_int_mode(bp);
2895 bnx2x_add_all_napi(bp);
2896}
2897
2898/**
2899 * bnx2x_set_channels - sets the number of RSS queues.
2900 *
2901 * @dev: net device
2902 * @channels: includes the number of queues requested
2903 */
2904static int bnx2x_set_channels(struct net_device *dev,
2905 struct ethtool_channels *channels)
2906{
2907 struct bnx2x *bp = netdev_priv(dev);
2908
2909
2910 DP(BNX2X_MSG_ETHTOOL,
2911 "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
2912 channels->rx_count, channels->tx_count, channels->other_count,
2913 channels->combined_count);
2914
2915 /* We don't support separate rx / tx channels.
2916 * We don't allow setting 'other' channels.
2917 */
2918 if (channels->rx_count || channels->tx_count || channels->other_count
2919 || (channels->combined_count == 0) ||
2920 (channels->combined_count > BNX2X_MAX_RSS_COUNT(bp))) {
2921 DP(BNX2X_MSG_ETHTOOL, "command parameters not supported\n");
2922 return -EINVAL;
2923 }
2924
2925 /* Check if there was a change in the active parameters */
2926 if (channels->combined_count == BNX2X_NUM_ETH_QUEUES(bp)) {
2927 DP(BNX2X_MSG_ETHTOOL, "No change in active parameters\n");
2928 return 0;
2929 }
2930
2931 /* Set the requested number of queues in bp context.
2932 * Note that the actual number of queues created during load may be
2933 * less than requested if memory is low.
2934 */
2935 if (unlikely(!netif_running(dev))) {
2936 bnx2x_change_num_queues(bp, channels->combined_count);
2937 return 0;
2938 }
2939 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2940 bnx2x_change_num_queues(bp, channels->combined_count);
2941 return bnx2x_nic_load(bp, LOAD_NORMAL);
2443} 2942}
2444 2943
2445static const struct ethtool_ops bnx2x_ethtool_ops = { 2944static const struct ethtool_ops bnx2x_ethtool_ops = {
@@ -2469,9 +2968,16 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
2469 .set_phys_id = bnx2x_set_phys_id, 2968 .set_phys_id = bnx2x_set_phys_id,
2470 .get_ethtool_stats = bnx2x_get_ethtool_stats, 2969 .get_ethtool_stats = bnx2x_get_ethtool_stats,
2471 .get_rxnfc = bnx2x_get_rxnfc, 2970 .get_rxnfc = bnx2x_get_rxnfc,
2971 .set_rxnfc = bnx2x_set_rxnfc,
2472 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, 2972 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
2473 .get_rxfh_indir = bnx2x_get_rxfh_indir, 2973 .get_rxfh_indir = bnx2x_get_rxfh_indir,
2474 .set_rxfh_indir = bnx2x_set_rxfh_indir, 2974 .set_rxfh_indir = bnx2x_set_rxfh_indir,
2975 .get_channels = bnx2x_get_channels,
2976 .set_channels = bnx2x_set_channels,
2977 .get_module_info = bnx2x_get_module_info,
2978 .get_module_eeprom = bnx2x_get_module_eeprom,
2979 .get_eee = bnx2x_get_eee,
2980 .set_eee = bnx2x_set_eee,
2475}; 2981};
2476 2982
2477void bnx2x_set_ethtool_ops(struct net_device *netdev) 2983void bnx2x_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 426f77aa721..bbc66ced9c2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -321,9 +321,7 @@
321#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0 321#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0
322 322
323 323
324/** 324/* This file defines HSI constants common to all microcode flows */
325 * This file defines HSI constants common to all microcode flows
326 */
327 325
328#define PROTOCOL_STATE_BIT_OFFSET 6 326#define PROTOCOL_STATE_BIT_OFFSET 6
329 327
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index a440a8ba85f..76b6e65790f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -10,6 +10,7 @@
10#define BNX2X_HSI_H 10#define BNX2X_HSI_H
11 11
12#include "bnx2x_fw_defs.h" 12#include "bnx2x_fw_defs.h"
13#include "bnx2x_mfw_req.h"
13 14
14#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e 15#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
15 16
@@ -33,12 +34,6 @@ struct license_key {
33 u32 reserved_b[4]; 34 u32 reserved_b[4];
34}; 35};
35 36
36
37#define PORT_0 0
38#define PORT_1 1
39#define PORT_MAX 2
40#define NVM_PATH_MAX 2
41
42/**************************************************************************** 37/****************************************************************************
43 * Shared HW configuration * 38 * Shared HW configuration *
44 ****************************************************************************/ 39 ****************************************************************************/
@@ -1067,8 +1062,18 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
1067 uses the same defines as link_config */ 1062 uses the same defines as link_config */
1068 u32 mfw_wol_link_cfg2; /* 0x480 */ 1063 u32 mfw_wol_link_cfg2; /* 0x480 */
1069 1064
1070 u32 Reserved2[17]; /* 0x484 */
1071 1065
1066 /* EEE power saving mode */
1067 u32 eee_power_mode; /* 0x484 */
1068 #define PORT_FEAT_CFG_EEE_POWER_MODE_MASK 0x000000FF
1069 #define PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT 0
1070 #define PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED 0x00000000
1071 #define PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED 0x00000001
1072 #define PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE 0x00000002
1073 #define PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY 0x00000003
1074
1075
1076 u32 Reserved2[16]; /* 0x488 */
1072}; 1077};
1073 1078
1074 1079
@@ -1140,6 +1145,7 @@ struct drv_port_mb {
1140 u32 link_status; 1145 u32 link_status;
1141 /* Driver should update this field on any link change event */ 1146 /* Driver should update this field on any link change event */
1142 1147
1148 #define LINK_STATUS_NONE (0<<0)
1143 #define LINK_STATUS_LINK_FLAG_MASK 0x00000001 1149 #define LINK_STATUS_LINK_FLAG_MASK 0x00000001
1144 #define LINK_STATUS_LINK_UP 0x00000001 1150 #define LINK_STATUS_LINK_UP 0x00000001
1145 #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E 1151 #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E
@@ -1197,6 +1203,7 @@ struct drv_port_mb {
1197 #define LINK_STATUS_PFC_ENABLED 0x20000000 1203 #define LINK_STATUS_PFC_ENABLED 0x20000000
1198 1204
1199 #define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000 1205 #define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000
1206 #define LINK_STATUS_SFP_TX_FAULT 0x80000000
1200 1207
1201 u32 port_stx; 1208 u32 port_stx;
1202 1209
@@ -1240,9 +1247,11 @@ struct drv_func_mb {
1240 #define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002 1247 #define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002
1241 #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014 1248 #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
1242 #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201 1249 #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201
1250 #define REQ_BC_VER_4_FCOE_FEATURES 0x00070209
1243 1251
1244 #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000 1252 #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
1245 #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 1253 #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
1254 #define REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF 0x00070401
1246 1255
1247 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 1256 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
1248 1257
@@ -1255,6 +1264,8 @@ struct drv_func_mb {
1255 #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000 1264 #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000
1256 #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000 1265 #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000
1257 1266
1267 #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000
1268
1258 #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 1269 #define DRV_MSG_CODE_SET_MF_BW 0xe0000000
1259 #define REQ_BC_VER_4_SET_MF_BW 0x00060202 1270 #define REQ_BC_VER_4_SET_MF_BW 0x00060202
1260 #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 1271 #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
@@ -1320,6 +1331,8 @@ struct drv_func_mb {
1320 #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000 1331 #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000
1321 #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000 1332 #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000
1322 1333
1334 #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000
1335
1323 #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 1336 #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
1324 #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 1337 #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
1325 1338
@@ -1383,6 +1396,8 @@ struct drv_func_mb {
1383 1396
1384 #define DRV_STATUS_DRV_INFO_REQ 0x04000000 1397 #define DRV_STATUS_DRV_INFO_REQ 0x04000000
1385 1398
1399 #define DRV_STATUS_EEE_NEGOTIATION_RESULTS 0x08000000
1400
1386 u32 virt_mac_upper; 1401 u32 virt_mac_upper;
1387 #define VIRT_MAC_SIGN_MASK 0xffff0000 1402 #define VIRT_MAC_SIGN_MASK 0xffff0000
1388 #define VIRT_MAC_SIGNATURE 0x564d0000 1403 #define VIRT_MAC_SIGNATURE 0x564d0000
@@ -1613,6 +1628,11 @@ struct fw_flr_mb {
1613 struct fw_flr_ack ack; 1628 struct fw_flr_ack ack;
1614}; 1629};
1615 1630
1631struct eee_remote_vals {
1632 u32 tx_tw;
1633 u32 rx_tw;
1634};
1635
1616/**** SUPPORT FOR SHMEM ARRRAYS *** 1636/**** SUPPORT FOR SHMEM ARRRAYS ***
1617 * The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to 1637 * The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to
1618 * define arrays with storage types smaller then unsigned dwords. 1638 * define arrays with storage types smaller then unsigned dwords.
@@ -2053,6 +2073,41 @@ struct shmem2_region {
2053#define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00 2073#define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00
2054#define DRV_INFO_CONTROL_OP_CODE_SHIFT 8 2074#define DRV_INFO_CONTROL_OP_CODE_SHIFT 8
2055 u32 ibft_host_addr; /* initialized by option ROM */ 2075 u32 ibft_host_addr; /* initialized by option ROM */
2076 struct eee_remote_vals eee_remote_vals[PORT_MAX];
2077 u32 reserved[E2_FUNC_MAX];
2078
2079
2080 /* the status of EEE auto-negotiation
2081 * bits 15:0 the configured tx-lpi entry timer value. Depends on bit 31.
2082 * bits 19:16 the supported modes for EEE.
2083 * bits 23:20 the speeds advertised for EEE.
2084 * bits 27:24 the speeds the Link partner advertised for EEE.
2085 * The supported/adv. modes in bits 27:19 originate from the
2086 * SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed).
2087 * bit 28 when 1'b1 EEE was requested.
2088 * bit 29 when 1'b1 tx lpi was requested.
2089 * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted iff
2090 * 30:29 are 2'b11.
2091 * bit 31 when 1'b0 bits 15:0 contain a PORT_FEAT_CFG_EEE_ define as
2092 * value. When 1'b1 those bits contains a value times 16 microseconds.
2093 */
2094 u32 eee_status[PORT_MAX];
2095 #define SHMEM_EEE_TIMER_MASK 0x0000ffff
2096 #define SHMEM_EEE_SUPPORTED_MASK 0x000f0000
2097 #define SHMEM_EEE_SUPPORTED_SHIFT 16
2098 #define SHMEM_EEE_ADV_STATUS_MASK 0x00f00000
2099 #define SHMEM_EEE_100M_ADV (1<<0)
2100 #define SHMEM_EEE_1G_ADV (1<<1)
2101 #define SHMEM_EEE_10G_ADV (1<<2)
2102 #define SHMEM_EEE_ADV_STATUS_SHIFT 20
2103 #define SHMEM_EEE_LP_ADV_STATUS_MASK 0x0f000000
2104 #define SHMEM_EEE_LP_ADV_STATUS_SHIFT 24
2105 #define SHMEM_EEE_REQUESTED_BIT 0x10000000
2106 #define SHMEM_EEE_LPI_REQUESTED_BIT 0x20000000
2107 #define SHMEM_EEE_ACTIVE_BIT 0x40000000
2108 #define SHMEM_EEE_TIME_OUTPUT_BIT 0x80000000
2109
2110 u32 sizeof_port_stats;
2056}; 2111};
2057 2112
2058 2113
@@ -2599,6 +2654,9 @@ struct host_port_stats {
2599 u32 pfc_frames_tx_lo; 2654 u32 pfc_frames_tx_lo;
2600 u32 pfc_frames_rx_hi; 2655 u32 pfc_frames_rx_hi;
2601 u32 pfc_frames_rx_lo; 2656 u32 pfc_frames_rx_lo;
2657
2658 u32 eee_lpi_count_hi;
2659 u32 eee_lpi_count_lo;
2602}; 2660};
2603 2661
2604 2662
@@ -2638,118 +2696,6 @@ struct host_func_stats {
2638/* VIC definitions */ 2696/* VIC definitions */
2639#define VICSTATST_UIF_INDEX 2 2697#define VICSTATST_UIF_INDEX 2
2640 2698
2641/* current drv_info version */
2642#define DRV_INFO_CUR_VER 1
2643
2644/* drv_info op codes supported */
2645enum drv_info_opcode {
2646 ETH_STATS_OPCODE,
2647 FCOE_STATS_OPCODE,
2648 ISCSI_STATS_OPCODE
2649};
2650
2651#define ETH_STAT_INFO_VERSION_LEN 12
2652/* Per PCI Function Ethernet Statistics required from the driver */
2653struct eth_stats_info {
2654 /* Function's Driver Version. padded to 12 */
2655 u8 version[ETH_STAT_INFO_VERSION_LEN];
2656 /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
2657 u8 mac_local[8];
2658 u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
2659 u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
2660 u32 mtu_size; /* MTU Size. Note : Negotiated MTU */
2661 u32 feature_flags; /* Feature_Flags. */
2662#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01
2663#define FEATURE_ETH_LSO_MASK 0x02
2664#define FEATURE_ETH_BOOTMODE_MASK 0x1C
2665#define FEATURE_ETH_BOOTMODE_SHIFT 2
2666#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2)
2667#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2)
2668#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2)
2669#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2)
2670#define FEATURE_ETH_TOE_MASK 0x20
2671 u32 lso_max_size; /* LSO MaxOffloadSize. */
2672 u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */
2673 /* Num Offloaded Connections TCP_IPv4. */
2674 u32 ipv4_ofld_cnt;
2675 /* Num Offloaded Connections TCP_IPv6. */
2676 u32 ipv6_ofld_cnt;
2677 u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */
2678 u32 txq_size; /* TX Descriptors Queue Size */
2679 u32 rxq_size; /* RX Descriptors Queue Size */
2680 /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
2681 u32 txq_avg_depth;
2682 /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
2683 u32 rxq_avg_depth;
2684 /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
2685 u32 iov_offload;
2686 /* Number of NetQueue/VMQ Config'd. */
2687 u32 netq_cnt;
2688 u32 vf_cnt; /* Num VF assigned to this PF. */
2689};
2690
2691/* Per PCI Function FCOE Statistics required from the driver */
2692struct fcoe_stats_info {
2693 u8 version[12]; /* Function's Driver Version. */
2694 u8 mac_local[8]; /* Locally Admin Addr. */
2695 u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
2696 u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
2697 /* QoS Priority (per 802.1p). 0-7255 */
2698 u32 qos_priority;
2699 u32 txq_size; /* FCoE TX Descriptors Queue Size. */
2700 u32 rxq_size; /* FCoE RX Descriptors Queue Size. */
2701 /* FCoE TX Descriptor Queue Avg Depth. */
2702 u32 txq_avg_depth;
2703 /* FCoE RX Descriptors Queue Avg Depth. */
2704 u32 rxq_avg_depth;
2705 u32 rx_frames_lo; /* FCoE RX Frames received. */
2706 u32 rx_frames_hi; /* FCoE RX Frames received. */
2707 u32 rx_bytes_lo; /* FCoE RX Bytes received. */
2708 u32 rx_bytes_hi; /* FCoE RX Bytes received. */
2709 u32 tx_frames_lo; /* FCoE TX Frames sent. */
2710 u32 tx_frames_hi; /* FCoE TX Frames sent. */
2711 u32 tx_bytes_lo; /* FCoE TX Bytes sent. */
2712 u32 tx_bytes_hi; /* FCoE TX Bytes sent. */
2713};
2714
2715/* Per PCI Function iSCSI Statistics required from the driver*/
2716struct iscsi_stats_info {
2717 u8 version[12]; /* Function's Driver Version. */
2718 u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */
2719 u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
2720 /* QoS Priority (per 802.1p). 0-7255 */
2721 u32 qos_priority;
2722 u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */
2723 u8 ww_port_name[64]; /* iSCSI World wide port name */
2724 u8 boot_target_name[64];/* iSCSI Boot Target Name. */
2725 u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */
2726 u32 boot_target_portal; /* iSCSI Boot Target Portal. */
2727 u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */
2728 u32 max_frame_size; /* Max Frame Size. bytes */
2729 u32 txq_size; /* PDU TX Descriptors Queue Size. */
2730 u32 rxq_size; /* PDU RX Descriptors Queue Size. */
2731 u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */
2732 u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */
2733 u32 rx_pdus_lo; /* iSCSI PDUs received. */
2734 u32 rx_pdus_hi; /* iSCSI PDUs received. */
2735 u32 rx_bytes_lo; /* iSCSI RX Bytes received. */
2736 u32 rx_bytes_hi; /* iSCSI RX Bytes received. */
2737 u32 tx_pdus_lo; /* iSCSI PDUs sent. */
2738 u32 tx_pdus_hi; /* iSCSI PDUs sent. */
2739 u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */
2740 u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */
2741 u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable.
2742 * 9 nibbles, the position of each nibble
2743 * represents the C-PCP value, the value
2744 * of the nibble = S-PCP value.
2745 */
2746};
2747
2748union drv_info_to_mcp {
2749 struct eth_stats_info ether_stat;
2750 struct fcoe_stats_info fcoe_stat;
2751 struct iscsi_stats_info iscsi_stat;
2752};
2753 2699
2754/* stats collected for afex. 2700/* stats collected for afex.
2755 * NOTE: structure is exactly as expected to be received by the switch. 2701 * NOTE: structure is exactly as expected to be received by the switch.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 6e7d5c0843b..e04b282c039 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -285,7 +285,6 @@
285#define ETS_E3B0_PBF_MIN_W_VAL (10000) 285#define ETS_E3B0_PBF_MIN_W_VAL (10000)
286 286
287#define MAX_PACKET_SIZE (9700) 287#define MAX_PACKET_SIZE (9700)
288#define WC_UC_TIMEOUT 100
289#define MAX_KR_LINK_RETRY 4 288#define MAX_KR_LINK_RETRY 4
290 289
291/**********************************************************/ 290/**********************************************************/
@@ -1306,6 +1305,94 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
1306 1305
1307 return 0; 1306 return 0;
1308} 1307}
1308
1309/******************************************************************/
1310/* EEE section */
1311/******************************************************************/
1312static u8 bnx2x_eee_has_cap(struct link_params *params)
1313{
1314 struct bnx2x *bp = params->bp;
1315
1316 if (REG_RD(bp, params->shmem2_base) <=
1317 offsetof(struct shmem2_region, eee_status[params->port]))
1318 return 0;
1319
1320 return 1;
1321}
1322
1323static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
1324{
1325 switch (nvram_mode) {
1326 case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
1327 *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
1328 break;
1329 case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
1330 *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
1331 break;
1332 case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
1333 *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
1334 break;
1335 default:
1336 *idle_timer = 0;
1337 break;
1338 }
1339
1340 return 0;
1341}
1342
1343static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
1344{
1345 switch (idle_timer) {
1346 case EEE_MODE_NVRAM_BALANCED_TIME:
1347 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
1348 break;
1349 case EEE_MODE_NVRAM_AGGRESSIVE_TIME:
1350 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
1351 break;
1352 case EEE_MODE_NVRAM_LATENCY_TIME:
1353 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
1354 break;
1355 default:
1356 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
1357 break;
1358 }
1359
1360 return 0;
1361}
1362
1363static u32 bnx2x_eee_calc_timer(struct link_params *params)
1364{
1365 u32 eee_mode, eee_idle;
1366 struct bnx2x *bp = params->bp;
1367
1368 if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
1369 if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
1370 /* time value in eee_mode --> used directly*/
1371 eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
1372 } else {
1373 /* hsi value in eee_mode --> time */
1374 if (bnx2x_eee_nvram_to_time(params->eee_mode &
1375 EEE_MODE_NVRAM_MASK,
1376 &eee_idle))
1377 return 0;
1378 }
1379 } else {
1380 /* hsi values in nvram --> time*/
1381 eee_mode = ((REG_RD(bp, params->shmem_base +
1382 offsetof(struct shmem_region, dev_info.
1383 port_feature_config[params->port].
1384 eee_power_mode)) &
1385 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
1386 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
1387
1388 if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
1389 return 0;
1390 }
1391
1392 return eee_idle;
1393}
1394
1395
1309/******************************************************************/ 1396/******************************************************************/
1310/* PFC section */ 1397/* PFC section */
1311/******************************************************************/ 1398/******************************************************************/
@@ -1540,7 +1627,7 @@ static void bnx2x_umac_enable(struct link_params *params,
1540 /* Reset UMAC */ 1627 /* Reset UMAC */
1541 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 1628 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1542 (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); 1629 (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
1543 usleep_range(1000, 1000); 1630 usleep_range(1000, 2000);
1544 1631
1545 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 1632 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1546 (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); 1633 (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
@@ -1642,7 +1729,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
1642 /* Hard reset */ 1729 /* Hard reset */
1643 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 1730 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1644 MISC_REGISTERS_RESET_REG_2_XMAC); 1731 MISC_REGISTERS_RESET_REG_2_XMAC);
1645 usleep_range(1000, 1000); 1732 usleep_range(1000, 2000);
1646 1733
1647 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 1734 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1648 MISC_REGISTERS_RESET_REG_2_XMAC); 1735 MISC_REGISTERS_RESET_REG_2_XMAC);
@@ -1672,7 +1759,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
1672 /* Soft reset */ 1759 /* Soft reset */
1673 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 1760 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1674 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); 1761 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
1675 usleep_range(1000, 1000); 1762 usleep_range(1000, 2000);
1676 1763
1677 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 1764 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1678 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); 1765 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
@@ -1730,6 +1817,14 @@ static int bnx2x_xmac_enable(struct link_params *params,
1730 /* update PFC */ 1817 /* update PFC */
1731 bnx2x_update_pfc_xmac(params, vars, 0); 1818 bnx2x_update_pfc_xmac(params, vars, 0);
1732 1819
1820 if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
1821 DP(NETIF_MSG_LINK, "Setting XMAC for EEE\n");
1822 REG_WR(bp, xmac_base + XMAC_REG_EEE_TIMERS_HI, 0x1380008);
1823 REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x1);
1824 } else {
1825 REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x0);
1826 }
1827
1733 /* Enable TX and RX */ 1828 /* Enable TX and RX */
1734 val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN; 1829 val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
1735 1830
@@ -1785,11 +1880,6 @@ static int bnx2x_emac_enable(struct link_params *params,
1785 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, 1880 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
1786 EMAC_TX_MODE_RESET); 1881 EMAC_TX_MODE_RESET);
1787 1882
1788 if (CHIP_REV_IS_SLOW(bp)) {
1789 /* config GMII mode */
1790 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
1791 EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
1792 } else { /* ASIC */
1793 /* pause enable/disable */ 1883 /* pause enable/disable */
1794 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE, 1884 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
1795 EMAC_RX_MODE_FLOW_EN); 1885 EMAC_RX_MODE_FLOW_EN);
@@ -1812,7 +1902,6 @@ static int bnx2x_emac_enable(struct link_params *params,
1812 } else 1902 } else
1813 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, 1903 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
1814 EMAC_TX_MODE_FLOW_EN); 1904 EMAC_TX_MODE_FLOW_EN);
1815 }
1816 1905
1817 /* KEEP_VLAN_TAG, promiscuous */ 1906 /* KEEP_VLAN_TAG, promiscuous */
1818 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); 1907 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
@@ -1851,23 +1940,23 @@ static int bnx2x_emac_enable(struct link_params *params,
1851 val &= ~0x810; 1940 val &= ~0x810;
1852 EMAC_WR(bp, EMAC_REG_EMAC_MODE, val); 1941 EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
1853 1942
1854 /* enable emac */ 1943 /* Enable emac */
1855 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1); 1944 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
1856 1945
1857 /* enable emac for jumbo packets */ 1946 /* Enable emac for jumbo packets */
1858 EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE, 1947 EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
1859 (EMAC_RX_MTU_SIZE_JUMBO_ENA | 1948 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
1860 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); 1949 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
1861 1950
1862 /* strip CRC */ 1951 /* Strip CRC */
1863 REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1); 1952 REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
1864 1953
1865 /* disable the NIG in/out to the bmac */ 1954 /* Disable the NIG in/out to the bmac */
1866 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0); 1955 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0);
1867 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0); 1956 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
1868 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0); 1957 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
1869 1958
1870 /* enable the NIG in/out to the emac */ 1959 /* Enable the NIG in/out to the emac */
1871 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1); 1960 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
1872 val = 0; 1961 val = 0;
1873 if ((params->feature_config_flags & 1962 if ((params->feature_config_flags &
@@ -1902,7 +1991,7 @@ static void bnx2x_update_pfc_bmac1(struct link_params *params,
1902 wb_data[1] = 0; 1991 wb_data[1] = 0;
1903 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2); 1992 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
1904 1993
1905 /* tx control */ 1994 /* TX control */
1906 val = 0xc0; 1995 val = 0xc0;
1907 if (!(params->feature_config_flags & 1996 if (!(params->feature_config_flags &
1908 FEATURE_CONFIG_PFC_ENABLED) && 1997 FEATURE_CONFIG_PFC_ENABLED) &&
@@ -1962,7 +2051,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
1962 wb_data[0] &= ~(1<<2); 2051 wb_data[0] &= ~(1<<2);
1963 } else { 2052 } else {
1964 DP(NETIF_MSG_LINK, "PFC is disabled\n"); 2053 DP(NETIF_MSG_LINK, "PFC is disabled\n");
1965 /* disable PFC RX & TX & STATS and set 8 COS */ 2054 /* Disable PFC RX & TX & STATS and set 8 COS */
1966 wb_data[0] = 0x8; 2055 wb_data[0] = 0x8;
1967 wb_data[1] = 0; 2056 wb_data[1] = 0;
1968 } 2057 }
@@ -2056,7 +2145,7 @@ static int bnx2x_pfc_brb_get_config_params(
2056 PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE; 2145 PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
2057 config_val->pauseable_th.full_xon = 2146 config_val->pauseable_th.full_xon =
2058 PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE; 2147 PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
2059 /* non pause able*/ 2148 /* Non pause able*/
2060 config_val->non_pauseable_th.pause_xoff = 2149 config_val->non_pauseable_th.pause_xoff =
2061 PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; 2150 PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
2062 config_val->non_pauseable_th.pause_xon = 2151 config_val->non_pauseable_th.pause_xon =
@@ -2084,7 +2173,7 @@ static int bnx2x_pfc_brb_get_config_params(
2084 PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE; 2173 PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
2085 config_val->pauseable_th.full_xon = 2174 config_val->pauseable_th.full_xon =
2086 PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE; 2175 PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
2087 /* non pause able*/ 2176 /* Non pause able*/
2088 config_val->non_pauseable_th.pause_xoff = 2177 config_val->non_pauseable_th.pause_xoff =
2089 PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; 2178 PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
2090 config_val->non_pauseable_th.pause_xon = 2179 config_val->non_pauseable_th.pause_xon =
@@ -2114,7 +2203,7 @@ static int bnx2x_pfc_brb_get_config_params(
2114 PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE; 2203 PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
2115 config_val->pauseable_th.full_xon = 2204 config_val->pauseable_th.full_xon =
2116 PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE; 2205 PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
2117 /* non pause able*/ 2206 /* Non pause able*/
2118 config_val->non_pauseable_th.pause_xoff = 2207 config_val->non_pauseable_th.pause_xoff =
2119 PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; 2208 PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
2120 config_val->non_pauseable_th.pause_xon = 2209 config_val->non_pauseable_th.pause_xon =
@@ -2132,7 +2221,7 @@ static int bnx2x_pfc_brb_get_config_params(
2132 PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE; 2221 PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
2133 config_val->pauseable_th.full_xon = 2222 config_val->pauseable_th.full_xon =
2134 PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE; 2223 PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
2135 /* non pause able*/ 2224 /* Non pause able*/
2136 config_val->non_pauseable_th.pause_xoff = 2225 config_val->non_pauseable_th.pause_xoff =
2137 PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; 2226 PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
2138 config_val->non_pauseable_th.pause_xon = 2227 config_val->non_pauseable_th.pause_xon =
@@ -2189,7 +2278,7 @@ static void bnx2x_pfc_brb_get_e3b0_config_params(
2189 2278
2190 if (pfc_params->cos0_pauseable != 2279 if (pfc_params->cos0_pauseable !=
2191 pfc_params->cos1_pauseable) { 2280 pfc_params->cos1_pauseable) {
2192 /* nonpauseable= Lossy + pauseable = Lossless*/ 2281 /* Nonpauseable= Lossy + pauseable = Lossless*/
2193 e3b0_val->lb_guarantied = 2282 e3b0_val->lb_guarantied =
2194 PFC_E3B0_2P_MIX_PAUSE_LB_GUART; 2283 PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
2195 e3b0_val->mac_0_class_t_guarantied = 2284 e3b0_val->mac_0_class_t_guarantied =
@@ -2388,9 +2477,9 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
2388* This function is needed because NIG ARB_CREDIT_WEIGHT_X are 2477* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
2389* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable. 2478* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
2390******************************************************************************/ 2479******************************************************************************/
2391int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp, 2480static int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
2392 u8 cos_entry, 2481 u8 cos_entry,
2393 u32 priority_mask, u8 port) 2482 u32 priority_mask, u8 port)
2394{ 2483{
2395 u32 nig_reg_rx_priority_mask_add = 0; 2484 u32 nig_reg_rx_priority_mask_add = 0;
2396 2485
@@ -2440,6 +2529,16 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
2440 port_mb[params->port].link_status), link_status); 2529 port_mb[params->port].link_status), link_status);
2441} 2530}
2442 2531
2532static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
2533{
2534 struct bnx2x *bp = params->bp;
2535
2536 if (bnx2x_eee_has_cap(params))
2537 REG_WR(bp, params->shmem2_base +
2538 offsetof(struct shmem2_region,
2539 eee_status[params->port]), eee_status);
2540}
2541
2443static void bnx2x_update_pfc_nig(struct link_params *params, 2542static void bnx2x_update_pfc_nig(struct link_params *params,
2444 struct link_vars *vars, 2543 struct link_vars *vars,
2445 struct bnx2x_nig_brb_pfc_port_params *nig_params) 2544 struct bnx2x_nig_brb_pfc_port_params *nig_params)
@@ -2507,7 +2606,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
2507 REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 : 2606 REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 :
2508 NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7); 2607 NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
2509 2608
2510 /* output enable for RX_XCM # IF */ 2609 /* Output enable for RX_XCM # IF */
2511 REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN : 2610 REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN :
2512 NIG_REG_XCM0_OUT_EN, xcm_out_en); 2611 NIG_REG_XCM0_OUT_EN, xcm_out_en);
2513 2612
@@ -2556,10 +2655,10 @@ int bnx2x_update_pfc(struct link_params *params,
2556 2655
2557 bnx2x_update_mng(params, vars->link_status); 2656 bnx2x_update_mng(params, vars->link_status);
2558 2657
2559 /* update NIG params */ 2658 /* Update NIG params */
2560 bnx2x_update_pfc_nig(params, vars, pfc_params); 2659 bnx2x_update_pfc_nig(params, vars, pfc_params);
2561 2660
2562 /* update BRB params */ 2661 /* Update BRB params */
2563 bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params); 2662 bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
2564 if (bnx2x_status) 2663 if (bnx2x_status)
2565 return bnx2x_status; 2664 return bnx2x_status;
@@ -2614,7 +2713,7 @@ static int bnx2x_bmac1_enable(struct link_params *params,
2614 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL, 2713 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
2615 wb_data, 2); 2714 wb_data, 2);
2616 2715
2617 /* tx MAC SA */ 2716 /* TX MAC SA */
2618 wb_data[0] = ((params->mac_addr[2] << 24) | 2717 wb_data[0] = ((params->mac_addr[2] << 24) |
2619 (params->mac_addr[3] << 16) | 2718 (params->mac_addr[3] << 16) |
2620 (params->mac_addr[4] << 8) | 2719 (params->mac_addr[4] << 8) |
@@ -2623,7 +2722,7 @@ static int bnx2x_bmac1_enable(struct link_params *params,
2623 params->mac_addr[1]); 2722 params->mac_addr[1]);
2624 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2); 2723 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
2625 2724
2626 /* mac control */ 2725 /* MAC control */
2627 val = 0x3; 2726 val = 0x3;
2628 if (is_lb) { 2727 if (is_lb) {
2629 val |= 0x4; 2728 val |= 0x4;
@@ -2633,24 +2732,24 @@ static int bnx2x_bmac1_enable(struct link_params *params,
2633 wb_data[1] = 0; 2732 wb_data[1] = 0;
2634 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2); 2733 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
2635 2734
2636 /* set rx mtu */ 2735 /* Set rx mtu */
2637 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 2736 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2638 wb_data[1] = 0; 2737 wb_data[1] = 0;
2639 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2); 2738 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
2640 2739
2641 bnx2x_update_pfc_bmac1(params, vars); 2740 bnx2x_update_pfc_bmac1(params, vars);
2642 2741
2643 /* set tx mtu */ 2742 /* Set tx mtu */
2644 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 2743 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2645 wb_data[1] = 0; 2744 wb_data[1] = 0;
2646 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2); 2745 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
2647 2746
2648 /* set cnt max size */ 2747 /* Set cnt max size */
2649 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 2748 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2650 wb_data[1] = 0; 2749 wb_data[1] = 0;
2651 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2); 2750 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
2652 2751
2653 /* configure safc */ 2752 /* Configure SAFC */
2654 wb_data[0] = 0x1000200; 2753 wb_data[0] = 0x1000200;
2655 wb_data[1] = 0; 2754 wb_data[1] = 0;
2656 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, 2755 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
@@ -2684,7 +2783,7 @@ static int bnx2x_bmac2_enable(struct link_params *params,
2684 2783
2685 udelay(30); 2784 udelay(30);
2686 2785
2687 /* tx MAC SA */ 2786 /* TX MAC SA */
2688 wb_data[0] = ((params->mac_addr[2] << 24) | 2787 wb_data[0] = ((params->mac_addr[2] << 24) |
2689 (params->mac_addr[3] << 16) | 2788 (params->mac_addr[3] << 16) |
2690 (params->mac_addr[4] << 8) | 2789 (params->mac_addr[4] << 8) |
@@ -2703,18 +2802,18 @@ static int bnx2x_bmac2_enable(struct link_params *params,
2703 wb_data, 2); 2802 wb_data, 2);
2704 udelay(30); 2803 udelay(30);
2705 2804
2706 /* set rx mtu */ 2805 /* Set RX MTU */
2707 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 2806 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2708 wb_data[1] = 0; 2807 wb_data[1] = 0;
2709 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2); 2808 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
2710 udelay(30); 2809 udelay(30);
2711 2810
2712 /* set tx mtu */ 2811 /* Set TX MTU */
2713 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 2812 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2714 wb_data[1] = 0; 2813 wb_data[1] = 0;
2715 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2); 2814 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
2716 udelay(30); 2815 udelay(30);
2717 /* set cnt max size */ 2816 /* Set cnt max size */
2718 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2; 2817 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
2719 wb_data[1] = 0; 2818 wb_data[1] = 0;
2720 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2); 2819 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
@@ -2732,15 +2831,15 @@ static int bnx2x_bmac_enable(struct link_params *params,
2732 u8 port = params->port; 2831 u8 port = params->port;
2733 struct bnx2x *bp = params->bp; 2832 struct bnx2x *bp = params->bp;
2734 u32 val; 2833 u32 val;
2735 /* reset and unreset the BigMac */ 2834 /* Reset and unreset the BigMac */
2736 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 2835 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2737 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 2836 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2738 msleep(1); 2837 usleep_range(1000, 2000);
2739 2838
2740 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 2839 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2741 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 2840 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2742 2841
2743 /* enable access for bmac registers */ 2842 /* Enable access for bmac registers */
2744 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1); 2843 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2745 2844
2746 /* Enable BMAC according to BMAC type*/ 2845 /* Enable BMAC according to BMAC type*/
@@ -2798,7 +2897,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
2798 BIGMAC_REGISTER_BMAC_CONTROL, 2897 BIGMAC_REGISTER_BMAC_CONTROL,
2799 wb_data, 2); 2898 wb_data, 2);
2800 } 2899 }
2801 msleep(1); 2900 usleep_range(1000, 2000);
2802 } 2901 }
2803} 2902}
2804 2903
@@ -2810,17 +2909,16 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
2810 u32 init_crd, crd; 2909 u32 init_crd, crd;
2811 u32 count = 1000; 2910 u32 count = 1000;
2812 2911
2813 /* disable port */ 2912 /* Disable port */
2814 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1); 2913 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
2815 2914
2816 /* wait for init credit */ 2915 /* Wait for init credit */
2817 init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4); 2916 init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
2818 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); 2917 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2819 DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd); 2918 DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
2820 2919
2821 while ((init_crd != crd) && count) { 2920 while ((init_crd != crd) && count) {
2822 msleep(5); 2921 usleep_range(5000, 10000);
2823
2824 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); 2922 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2825 count--; 2923 count--;
2826 } 2924 }
@@ -2837,18 +2935,18 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
2837 line_speed == SPEED_1000 || 2935 line_speed == SPEED_1000 ||
2838 line_speed == SPEED_2500) { 2936 line_speed == SPEED_2500) {
2839 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1); 2937 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
2840 /* update threshold */ 2938 /* Update threshold */
2841 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); 2939 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
2842 /* update init credit */ 2940 /* Update init credit */
2843 init_crd = 778; /* (800-18-4) */ 2941 init_crd = 778; /* (800-18-4) */
2844 2942
2845 } else { 2943 } else {
2846 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + 2944 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
2847 ETH_OVREHEAD)/16; 2945 ETH_OVREHEAD)/16;
2848 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 2946 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
2849 /* update threshold */ 2947 /* Update threshold */
2850 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh); 2948 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
2851 /* update init credit */ 2949 /* Update init credit */
2852 switch (line_speed) { 2950 switch (line_speed) {
2853 case SPEED_10000: 2951 case SPEED_10000:
2854 init_crd = thresh + 553 - 22; 2952 init_crd = thresh + 553 - 22;
@@ -2863,12 +2961,12 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
2863 DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n", 2961 DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
2864 line_speed, init_crd); 2962 line_speed, init_crd);
2865 2963
2866 /* probe the credit changes */ 2964 /* Probe the credit changes */
2867 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1); 2965 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
2868 msleep(5); 2966 usleep_range(5000, 10000);
2869 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0); 2967 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
2870 2968
2871 /* enable port */ 2969 /* Enable port */
2872 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0); 2970 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
2873 return 0; 2971 return 0;
2874} 2972}
@@ -2935,7 +3033,7 @@ static int bnx2x_cl22_write(struct bnx2x *bp,
2935 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, 3033 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
2936 mode & ~EMAC_MDIO_MODE_CLAUSE_45); 3034 mode & ~EMAC_MDIO_MODE_CLAUSE_45);
2937 3035
2938 /* address */ 3036 /* Address */
2939 tmp = ((phy->addr << 21) | (reg << 16) | val | 3037 tmp = ((phy->addr << 21) | (reg << 16) | val |
2940 EMAC_MDIO_COMM_COMMAND_WRITE_22 | 3038 EMAC_MDIO_COMM_COMMAND_WRITE_22 |
2941 EMAC_MDIO_COMM_START_BUSY); 3039 EMAC_MDIO_COMM_START_BUSY);
@@ -2971,7 +3069,7 @@ static int bnx2x_cl22_read(struct bnx2x *bp,
2971 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, 3069 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
2972 mode & ~EMAC_MDIO_MODE_CLAUSE_45); 3070 mode & ~EMAC_MDIO_MODE_CLAUSE_45);
2973 3071
2974 /* address */ 3072 /* Address */
2975 val = ((phy->addr << 21) | (reg << 16) | 3073 val = ((phy->addr << 21) | (reg << 16) |
2976 EMAC_MDIO_COMM_COMMAND_READ_22 | 3074 EMAC_MDIO_COMM_COMMAND_READ_22 |
2977 EMAC_MDIO_COMM_START_BUSY); 3075 EMAC_MDIO_COMM_START_BUSY);
@@ -3009,7 +3107,7 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
3009 if (phy->flags & FLAGS_MDC_MDIO_WA_B0) 3107 if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
3010 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, 3108 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
3011 EMAC_MDIO_STATUS_10MB); 3109 EMAC_MDIO_STATUS_10MB);
3012 /* address */ 3110 /* Address */
3013 val = ((phy->addr << 21) | (devad << 16) | reg | 3111 val = ((phy->addr << 21) | (devad << 16) | reg |
3014 EMAC_MDIO_COMM_COMMAND_ADDRESS | 3112 EMAC_MDIO_COMM_COMMAND_ADDRESS |
3015 EMAC_MDIO_COMM_START_BUSY); 3113 EMAC_MDIO_COMM_START_BUSY);
@@ -3030,7 +3128,7 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
3030 *ret_val = 0; 3128 *ret_val = 0;
3031 rc = -EFAULT; 3129 rc = -EFAULT;
3032 } else { 3130 } else {
3033 /* data */ 3131 /* Data */
3034 val = ((phy->addr << 21) | (devad << 16) | 3132 val = ((phy->addr << 21) | (devad << 16) |
3035 EMAC_MDIO_COMM_COMMAND_READ_45 | 3133 EMAC_MDIO_COMM_COMMAND_READ_45 |
3036 EMAC_MDIO_COMM_START_BUSY); 3134 EMAC_MDIO_COMM_START_BUSY);
@@ -3078,7 +3176,7 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
3078 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, 3176 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
3079 EMAC_MDIO_STATUS_10MB); 3177 EMAC_MDIO_STATUS_10MB);
3080 3178
3081 /* address */ 3179 /* Address */
3082 tmp = ((phy->addr << 21) | (devad << 16) | reg | 3180 tmp = ((phy->addr << 21) | (devad << 16) | reg |
3083 EMAC_MDIO_COMM_COMMAND_ADDRESS | 3181 EMAC_MDIO_COMM_COMMAND_ADDRESS |
3084 EMAC_MDIO_COMM_START_BUSY); 3182 EMAC_MDIO_COMM_START_BUSY);
@@ -3098,7 +3196,7 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
3098 netdev_err(bp->dev, "MDC/MDIO access timeout\n"); 3196 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
3099 rc = -EFAULT; 3197 rc = -EFAULT;
3100 } else { 3198 } else {
3101 /* data */ 3199 /* Data */
3102 tmp = ((phy->addr << 21) | (devad << 16) | val | 3200 tmp = ((phy->addr << 21) | (devad << 16) | val |
3103 EMAC_MDIO_COMM_COMMAND_WRITE_45 | 3201 EMAC_MDIO_COMM_COMMAND_WRITE_45 |
3104 EMAC_MDIO_COMM_START_BUSY); 3202 EMAC_MDIO_COMM_START_BUSY);
@@ -3188,23 +3286,23 @@ static int bnx2x_bsc_read(struct link_params *params,
3188 3286
3189 xfer_cnt = 16 - lc_addr; 3287 xfer_cnt = 16 - lc_addr;
3190 3288
3191 /* enable the engine */ 3289 /* Enable the engine */
3192 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); 3290 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
3193 val |= MCPR_IMC_COMMAND_ENABLE; 3291 val |= MCPR_IMC_COMMAND_ENABLE;
3194 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); 3292 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
3195 3293
3196 /* program slave device ID */ 3294 /* Program slave device ID */
3197 val = (sl_devid << 16) | sl_addr; 3295 val = (sl_devid << 16) | sl_addr;
3198 REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val); 3296 REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val);
3199 3297
3200 /* start xfer with 0 byte to update the address pointer ???*/ 3298 /* Start xfer with 0 byte to update the address pointer ???*/
3201 val = (MCPR_IMC_COMMAND_ENABLE) | 3299 val = (MCPR_IMC_COMMAND_ENABLE) |
3202 (MCPR_IMC_COMMAND_WRITE_OP << 3300 (MCPR_IMC_COMMAND_WRITE_OP <<
3203 MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | 3301 MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
3204 (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0); 3302 (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0);
3205 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); 3303 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
3206 3304
3207 /* poll for completion */ 3305 /* Poll for completion */
3208 i = 0; 3306 i = 0;
3209 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); 3307 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
3210 while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { 3308 while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
@@ -3220,7 +3318,7 @@ static int bnx2x_bsc_read(struct link_params *params,
3220 if (rc == -EFAULT) 3318 if (rc == -EFAULT)
3221 return rc; 3319 return rc;
3222 3320
3223 /* start xfer with read op */ 3321 /* Start xfer with read op */
3224 val = (MCPR_IMC_COMMAND_ENABLE) | 3322 val = (MCPR_IMC_COMMAND_ENABLE) |
3225 (MCPR_IMC_COMMAND_READ_OP << 3323 (MCPR_IMC_COMMAND_READ_OP <<
3226 MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | 3324 MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
@@ -3228,7 +3326,7 @@ static int bnx2x_bsc_read(struct link_params *params,
3228 (xfer_cnt); 3326 (xfer_cnt);
3229 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); 3327 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
3230 3328
3231 /* poll for completion */ 3329 /* Poll for completion */
3232 i = 0; 3330 i = 0;
3233 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); 3331 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
3234 while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { 3332 while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
@@ -3331,7 +3429,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
3331 port = port ^ 1; 3429 port = port ^ 1;
3332 3430
3333 lane = (port<<1) + path; 3431 lane = (port<<1) + path;
3334 } else { /* two port mode - no port swap */ 3432 } else { /* Two port mode - no port swap */
3335 3433
3336 /* Figure out path swap value */ 3434 /* Figure out path swap value */
3337 path_swap_ovr = 3435 path_swap_ovr =
@@ -3409,7 +3507,7 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
3409 3507
3410 val = SERDES_RESET_BITS << (port*16); 3508 val = SERDES_RESET_BITS << (port*16);
3411 3509
3412 /* reset and unreset the SerDes/XGXS */ 3510 /* Reset and unreset the SerDes/XGXS */
3413 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); 3511 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3414 udelay(500); 3512 udelay(500);
3415 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); 3513 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
@@ -3430,7 +3528,7 @@ static void bnx2x_xgxs_deassert(struct link_params *params)
3430 3528
3431 val = XGXS_RESET_BITS << (port*16); 3529 val = XGXS_RESET_BITS << (port*16);
3432 3530
3433 /* reset and unreset the SerDes/XGXS */ 3531 /* Reset and unreset the SerDes/XGXS */
3434 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); 3532 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3435 udelay(500); 3533 udelay(500);
3436 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); 3534 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
@@ -3522,7 +3620,7 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
3522{ 3620{
3523 u16 val; 3621 u16 val;
3524 struct bnx2x *bp = params->bp; 3622 struct bnx2x *bp = params->bp;
3525 /* read modify write pause advertizing */ 3623 /* Read modify write pause advertizing */
3526 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val); 3624 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
3527 3625
3528 val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH; 3626 val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
@@ -3657,44 +3755,35 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
3657static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, 3755static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3658 struct link_params *params, 3756 struct link_params *params,
3659 struct link_vars *vars) { 3757 struct link_vars *vars) {
3660 u16 val16 = 0, lane, bam37 = 0; 3758 u16 val16 = 0, lane, i;
3661 struct bnx2x *bp = params->bp; 3759 struct bnx2x *bp = params->bp;
3760 static struct bnx2x_reg_set reg_set[] = {
3761 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
3762 {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
3763 {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0},
3764 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff},
3765 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555},
3766 {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0},
3767 {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415},
3768 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190},
3769 /* Disable Autoneg: re-enable it after adv is done. */
3770 {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0}
3771 };
3662 DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n"); 3772 DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
3663 /* Set to default registers that may be overriden by 10G force */ 3773 /* Set to default registers that may be overriden by 10G force */
3664 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3774 for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
3665 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7); 3775 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
3666 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3776 reg_set[i].val);
3667 MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
3668 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3669 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0);
3670 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3671 MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff);
3672 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3673 MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555);
3674 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
3675 MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0);
3676 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3677 MDIO_WC_REG_RX66_CONTROL, 0x7415);
3678 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3679 MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190);
3680 /* Disable Autoneg: re-enable it after adv is done. */
3681 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3682 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0);
3683 3777
3684 /* Check adding advertisement for 1G KX */ 3778 /* Check adding advertisement for 1G KX */
3685 if (((vars->line_speed == SPEED_AUTO_NEG) && 3779 if (((vars->line_speed == SPEED_AUTO_NEG) &&
3686 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || 3780 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
3687 (vars->line_speed == SPEED_1000)) { 3781 (vars->line_speed == SPEED_1000)) {
3688 u16 sd_digital; 3782 u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
3689 val16 |= (1<<5); 3783 val16 |= (1<<5);
3690 3784
3691 /* Enable CL37 1G Parallel Detect */ 3785 /* Enable CL37 1G Parallel Detect */
3692 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3786 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1);
3693 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &sd_digital);
3694 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3695 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
3696 (sd_digital | 0x1));
3697
3698 DP(NETIF_MSG_LINK, "Advertize 1G\n"); 3787 DP(NETIF_MSG_LINK, "Advertize 1G\n");
3699 } 3788 }
3700 if (((vars->line_speed == SPEED_AUTO_NEG) && 3789 if (((vars->line_speed == SPEED_AUTO_NEG) &&
@@ -3704,7 +3793,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3704 val16 |= (1<<7); 3793 val16 |= (1<<7);
3705 /* Enable 10G Parallel Detect */ 3794 /* Enable 10G Parallel Detect */
3706 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3795 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3707 MDIO_WC_REG_PAR_DET_10G_CTRL, 1); 3796 MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
3708 3797
3709 DP(NETIF_MSG_LINK, "Advertize 10G\n"); 3798 DP(NETIF_MSG_LINK, "Advertize 10G\n");
3710 } 3799 }
@@ -3738,10 +3827,9 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3738 offsetof(struct shmem_region, dev_info. 3827 offsetof(struct shmem_region, dev_info.
3739 port_hw_config[params->port].default_cfg)) & 3828 port_hw_config[params->port].default_cfg)) &
3740 PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) { 3829 PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
3741 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3830 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3742 MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, &bam37); 3831 MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL,
3743 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3832 1);
3744 MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, bam37 | 1);
3745 DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n"); 3833 DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
3746 } 3834 }
3747 3835
@@ -3755,11 +3843,8 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3755 DP(NETIF_MSG_LINK, "Enable AN KR work-around\n"); 3843 DP(NETIF_MSG_LINK, "Enable AN KR work-around\n");
3756 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; 3844 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
3757 } 3845 }
3758 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3846 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3759 MDIO_WC_REG_DIGITAL5_MISC7, &val16); 3847 MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
3760
3761 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3762 MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100);
3763 3848
3764 /* Over 1G - AN local device user page 1 */ 3849 /* Over 1G - AN local device user page 1 */
3765 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3850 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -3776,50 +3861,35 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
3776 struct link_vars *vars) 3861 struct link_vars *vars)
3777{ 3862{
3778 struct bnx2x *bp = params->bp; 3863 struct bnx2x *bp = params->bp;
3779 u16 val; 3864 u16 i;
3780 3865 static struct bnx2x_reg_set reg_set[] = {
3781 /* Disable Autoneg */ 3866 /* Disable Autoneg */
3782 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3867 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
3783 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7); 3868 {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
3784 3869 {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
3785 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3870 0x3f00},
3786 MDIO_WC_REG_PAR_DET_10G_CTRL, 0); 3871 {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0},
3787 3872 {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0},
3788 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3873 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1},
3789 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00); 3874 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa},
3790 3875 /* Disable CL36 PCS Tx */
3791 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3876 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0},
3792 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0); 3877 /* Double Wide Single Data Rate @ pll rate */
3793 3878 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF},
3794 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3879 /* Leave cl72 training enable, needed for KR */
3795 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0); 3880 {MDIO_PMA_DEVAD,
3796
3797 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3798 MDIO_WC_REG_DIGITAL3_UP1, 0x1);
3799
3800 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3801 MDIO_WC_REG_DIGITAL5_MISC7, 0xa);
3802
3803 /* Disable CL36 PCS Tx */
3804 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3805 MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0);
3806
3807 /* Double Wide Single Data Rate @ pll rate */
3808 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3809 MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF);
3810
3811 /* Leave cl72 training enable, needed for KR */
3812 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
3813 MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150, 3881 MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
3814 0x2); 3882 0x2}
3883 };
3884
3885 for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
3886 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
3887 reg_set[i].val);
3815 3888
3816 /* Leave CL72 enabled */ 3889 /* Leave CL72 enabled */
3817 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3890 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3818 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 3891 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
3819 &val); 3892 0x3800);
3820 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3821 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
3822 val | 0x3800);
3823 3893
3824 /* Set speed via PMA/PMD register */ 3894 /* Set speed via PMA/PMD register */
3825 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 3895 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
@@ -3840,7 +3910,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
3840 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3910 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3841 MDIO_WC_REG_RX66_CONTROL, 0xF9); 3911 MDIO_WC_REG_RX66_CONTROL, 0xF9);
3842 3912
3843 /* set and clear loopback to cause a reset to 64/66 decoder */ 3913 /* Set and clear loopback to cause a reset to 64/66 decoder */
3844 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3914 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3845 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000); 3915 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000);
3846 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3916 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -3855,16 +3925,12 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
3855 struct bnx2x *bp = params->bp; 3925 struct bnx2x *bp = params->bp;
3856 u16 misc1_val, tap_val, tx_driver_val, lane, val; 3926 u16 misc1_val, tap_val, tx_driver_val, lane, val;
3857 /* Hold rxSeqStart */ 3927 /* Hold rxSeqStart */
3858 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3928 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3859 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val); 3929 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000);
3860 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3861 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val | 0x8000));
3862 3930
3863 /* Hold tx_fifo_reset */ 3931 /* Hold tx_fifo_reset */
3864 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3932 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3865 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val); 3933 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x1);
3866 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3867 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, (val | 0x1));
3868 3934
3869 /* Disable CL73 AN */ 3935 /* Disable CL73 AN */
3870 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); 3936 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
@@ -3876,10 +3942,8 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
3876 MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA)); 3942 MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA));
3877 3943
3878 /* Disable 100FX Idle detect */ 3944 /* Disable 100FX Idle detect */
3879 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3945 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3880 MDIO_WC_REG_FX100_CTRL3, &val); 3946 MDIO_WC_REG_FX100_CTRL3, 0x0080);
3881 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3882 MDIO_WC_REG_FX100_CTRL3, (val | 0x0080));
3883 3947
3884 /* Set Block address to Remote PHY & Clear forced_speed[5] */ 3948 /* Set Block address to Remote PHY & Clear forced_speed[5] */
3885 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3949 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -3940,16 +4004,20 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
3940 tx_driver_val); 4004 tx_driver_val);
3941 4005
3942 /* Enable fiber mode, enable and invert sig_det */ 4006 /* Enable fiber mode, enable and invert sig_det */
3943 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4007 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3944 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val); 4008 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0xd);
3945 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3946 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, val | 0xd);
3947 4009
3948 /* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */ 4010 /* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */
3949 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4011 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3950 MDIO_WC_REG_DIGITAL4_MISC3, &val); 4012 MDIO_WC_REG_DIGITAL4_MISC3, 0x8080);
4013
4014 /* Enable LPI pass through */
4015 DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
3951 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4016 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3952 MDIO_WC_REG_DIGITAL4_MISC3, val | 0x8080); 4017 MDIO_WC_REG_EEE_COMBO_CONTROL0,
4018 0x7c);
4019 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4020 MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
3953 4021
3954 /* 10G XFI Full Duplex */ 4022 /* 10G XFI Full Duplex */
3955 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4023 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -4139,40 +4207,35 @@ static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
4139 u16 lane) 4207 u16 lane)
4140{ 4208{
4141 struct bnx2x *bp = params->bp; 4209 struct bnx2x *bp = params->bp;
4142 u16 val16; 4210 u16 i;
4143 4211 static struct bnx2x_reg_set wc_regs[] = {
4212 {MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0},
4213 {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL1, 0x014a},
4214 {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL3, 0x0800},
4215 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL4_MISC3, 0x8008},
4216 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
4217 0x0195},
4218 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
4219 0x0007},
4220 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3,
4221 0x0002},
4222 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000},
4223 {MDIO_WC_DEVAD, MDIO_WC_REG_TX_FIR_TAP, 0x0000},
4224 {MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040},
4225 {MDIO_WC_DEVAD, MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140}
4226 };
4144 /* Set XFI clock comp as default. */ 4227 /* Set XFI clock comp as default. */
4145 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4228 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4146 MDIO_WC_REG_RX66_CONTROL, &val16); 4229 MDIO_WC_REG_RX66_CONTROL, (3<<13));
4147 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4230
4148 MDIO_WC_REG_RX66_CONTROL, val16 | (3<<13)); 4231 for (i = 0; i < sizeof(wc_regs)/sizeof(struct bnx2x_reg_set); i++)
4232 bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg,
4233 wc_regs[i].val);
4149 4234
4150 bnx2x_warpcore_reset_lane(bp, phy, 1);
4151 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
4152 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4153 MDIO_WC_REG_FX100_CTRL1, 0x014a);
4154 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4155 MDIO_WC_REG_FX100_CTRL3, 0x0800);
4156 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4157 MDIO_WC_REG_DIGITAL4_MISC3, 0x8008);
4158 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4159 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x0195);
4160 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4161 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x0007);
4162 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4163 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x0002);
4164 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4165 MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000);
4166 lane = bnx2x_get_warpcore_lane(phy, params); 4235 lane = bnx2x_get_warpcore_lane(phy, params);
4167 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4236 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4168 MDIO_WC_REG_TX_FIR_TAP, 0x0000);
4169 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4170 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990); 4237 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990);
4171 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4238
4172 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
4173 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4174 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140);
4175 bnx2x_warpcore_reset_lane(bp, phy, 0);
4176} 4239}
4177 4240
4178static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp, 4241static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
@@ -4260,7 +4323,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4260 if (!vars->turn_to_run_wc_rt) 4323 if (!vars->turn_to_run_wc_rt)
4261 return; 4324 return;
4262 4325
4263 /* return if there is no link partner */ 4326 /* Return if there is no link partner */
4264 if (!(bnx2x_warpcore_get_sigdet(phy, params))) { 4327 if (!(bnx2x_warpcore_get_sigdet(phy, params))) {
4265 DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n"); 4328 DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n");
4266 return; 4329 return;
@@ -4294,7 +4357,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4294 bnx2x_warpcore_reset_lane(bp, phy, 1); 4357 bnx2x_warpcore_reset_lane(bp, phy, 1);
4295 bnx2x_warpcore_reset_lane(bp, phy, 0); 4358 bnx2x_warpcore_reset_lane(bp, phy, 0);
4296 4359
4297 /* restart Autoneg */ 4360 /* Restart Autoneg */
4298 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 4361 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
4299 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200); 4362 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
4300 4363
@@ -4311,6 +4374,23 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4311 } /*params->rx_tx_asic_rst*/ 4374 } /*params->rx_tx_asic_rst*/
4312 4375
4313} 4376}
4377static void bnx2x_warpcore_config_sfi(struct bnx2x_phy *phy,
4378 struct link_params *params)
4379{
4380 u16 lane = bnx2x_get_warpcore_lane(phy, params);
4381 struct bnx2x *bp = params->bp;
4382 bnx2x_warpcore_clear_regs(phy, params, lane);
4383 if ((params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)] ==
4384 SPEED_10000) &&
4385 (phy->media_type != ETH_PHY_SFP_1G_FIBER)) {
4386 DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
4387 bnx2x_warpcore_set_10G_XFI(phy, params, 0);
4388 } else {
4389 DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
4390 bnx2x_warpcore_set_sgmii_speed(phy, params, 1, 0);
4391 }
4392}
4393
4314static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, 4394static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4315 struct link_params *params, 4395 struct link_params *params,
4316 struct link_vars *vars) 4396 struct link_vars *vars)
@@ -4371,19 +4451,11 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4371 break; 4451 break;
4372 4452
4373 case PORT_HW_CFG_NET_SERDES_IF_SFI: 4453 case PORT_HW_CFG_NET_SERDES_IF_SFI:
4374
4375 bnx2x_warpcore_clear_regs(phy, params, lane);
4376 if (vars->line_speed == SPEED_10000) {
4377 DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
4378 bnx2x_warpcore_set_10G_XFI(phy, params, 0);
4379 } else if (vars->line_speed == SPEED_1000) {
4380 DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
4381 bnx2x_warpcore_set_sgmii_speed(
4382 phy, params, 1, 0);
4383 }
4384 /* Issue Module detection */ 4454 /* Issue Module detection */
4385 if (bnx2x_is_sfp_module_plugged(phy, params)) 4455 if (bnx2x_is_sfp_module_plugged(phy, params))
4386 bnx2x_sfp_module_detection(phy, params); 4456 bnx2x_sfp_module_detection(phy, params);
4457
4458 bnx2x_warpcore_config_sfi(phy, params);
4387 break; 4459 break;
4388 4460
4389 case PORT_HW_CFG_NET_SERDES_IF_DXGXS: 4461 case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
@@ -4500,12 +4572,9 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
4500 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 4572 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
4501 MDIO_AER_BLOCK_AER_REG, 0); 4573 MDIO_AER_BLOCK_AER_REG, 0);
4502 /* Enable 1G MDIO (1-copy) */ 4574 /* Enable 1G MDIO (1-copy) */
4503 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4575 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4504 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, 4576 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
4505 &val16); 4577 0x10);
4506 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4507 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
4508 val16 | 0x10);
4509 /* Set 1G loopback based on lane (1-copy) */ 4578 /* Set 1G loopback based on lane (1-copy) */
4510 lane = bnx2x_get_warpcore_lane(phy, params); 4579 lane = bnx2x_get_warpcore_lane(phy, params);
4511 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4580 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4518,22 +4587,19 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
4518 bnx2x_set_aer_mmd(params, phy); 4587 bnx2x_set_aer_mmd(params, phy);
4519 } else { 4588 } else {
4520 /* 10G & 20G */ 4589 /* 10G & 20G */
4521 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4590 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4522 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); 4591 MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
4523 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4592 0x4000);
4524 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
4525 0x4000);
4526 4593
4527 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4594 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4528 MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16); 4595 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1);
4529 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4530 MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 | 0x1);
4531 } 4596 }
4532} 4597}
4533 4598
4534 4599
4535void bnx2x_sync_link(struct link_params *params, 4600
4536 struct link_vars *vars) 4601static void bnx2x_sync_link(struct link_params *params,
4602 struct link_vars *vars)
4537{ 4603{
4538 struct bnx2x *bp = params->bp; 4604 struct bnx2x *bp = params->bp;
4539 u8 link_10g_plus; 4605 u8 link_10g_plus;
@@ -4606,7 +4672,7 @@ void bnx2x_sync_link(struct link_params *params,
4606 USES_WARPCORE(bp) && 4672 USES_WARPCORE(bp) &&
4607 (vars->line_speed == SPEED_1000)) 4673 (vars->line_speed == SPEED_1000))
4608 vars->phy_flags |= PHY_SGMII_FLAG; 4674 vars->phy_flags |= PHY_SGMII_FLAG;
4609 /* anything 10 and over uses the bmac */ 4675 /* Anything 10 and over uses the bmac */
4610 link_10g_plus = (vars->line_speed >= SPEED_10000); 4676 link_10g_plus = (vars->line_speed >= SPEED_10000);
4611 4677
4612 if (link_10g_plus) { 4678 if (link_10g_plus) {
@@ -4620,7 +4686,7 @@ void bnx2x_sync_link(struct link_params *params,
4620 else 4686 else
4621 vars->mac_type = MAC_TYPE_EMAC; 4687 vars->mac_type = MAC_TYPE_EMAC;
4622 } 4688 }
4623 } else { /* link down */ 4689 } else { /* Link down */
4624 DP(NETIF_MSG_LINK, "phy link down\n"); 4690 DP(NETIF_MSG_LINK, "phy link down\n");
4625 4691
4626 vars->phy_link_up = 0; 4692 vars->phy_link_up = 0;
@@ -4629,10 +4695,12 @@ void bnx2x_sync_link(struct link_params *params,
4629 vars->duplex = DUPLEX_FULL; 4695 vars->duplex = DUPLEX_FULL;
4630 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 4696 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
4631 4697
4632 /* indicate no mac active */ 4698 /* Indicate no mac active */
4633 vars->mac_type = MAC_TYPE_NONE; 4699 vars->mac_type = MAC_TYPE_NONE;
4634 if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) 4700 if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
4635 vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; 4701 vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
4702 if (vars->link_status & LINK_STATUS_SFP_TX_FAULT)
4703 vars->phy_flags |= PHY_SFP_TX_FAULT_FLAG;
4636 } 4704 }
4637} 4705}
4638 4706
@@ -4698,7 +4766,7 @@ static void bnx2x_set_master_ln(struct link_params *params,
4698 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 4766 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
4699 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 4767 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
4700 4768
4701 /* set the master_ln for AN */ 4769 /* Set the master_ln for AN */
4702 CL22_RD_OVER_CL45(bp, phy, 4770 CL22_RD_OVER_CL45(bp, phy,
4703 MDIO_REG_BANK_XGXS_BLOCK2, 4771 MDIO_REG_BANK_XGXS_BLOCK2,
4704 MDIO_XGXS_BLOCK2_TEST_MODE_LANE, 4772 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
@@ -4721,7 +4789,7 @@ static int bnx2x_reset_unicore(struct link_params *params,
4721 MDIO_REG_BANK_COMBO_IEEE0, 4789 MDIO_REG_BANK_COMBO_IEEE0,
4722 MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control); 4790 MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
4723 4791
4724 /* reset the unicore */ 4792 /* Reset the unicore */
4725 CL22_WR_OVER_CL45(bp, phy, 4793 CL22_WR_OVER_CL45(bp, phy,
4726 MDIO_REG_BANK_COMBO_IEEE0, 4794 MDIO_REG_BANK_COMBO_IEEE0,
4727 MDIO_COMBO_IEEE0_MII_CONTROL, 4795 MDIO_COMBO_IEEE0_MII_CONTROL,
@@ -4730,11 +4798,11 @@ static int bnx2x_reset_unicore(struct link_params *params,
4730 if (set_serdes) 4798 if (set_serdes)
4731 bnx2x_set_serdes_access(bp, params->port); 4799 bnx2x_set_serdes_access(bp, params->port);
4732 4800
4733 /* wait for the reset to self clear */ 4801 /* Wait for the reset to self clear */
4734 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) { 4802 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
4735 udelay(5); 4803 udelay(5);
4736 4804
4737 /* the reset erased the previous bank value */ 4805 /* The reset erased the previous bank value */
4738 CL22_RD_OVER_CL45(bp, phy, 4806 CL22_RD_OVER_CL45(bp, phy,
4739 MDIO_REG_BANK_COMBO_IEEE0, 4807 MDIO_REG_BANK_COMBO_IEEE0,
4740 MDIO_COMBO_IEEE0_MII_CONTROL, 4808 MDIO_COMBO_IEEE0_MII_CONTROL,
@@ -4952,7 +5020,7 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
4952 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val); 5020 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
4953} 5021}
4954 5022
4955/* program SerDes, forced speed */ 5023/* Program SerDes, forced speed */
4956static void bnx2x_program_serdes(struct bnx2x_phy *phy, 5024static void bnx2x_program_serdes(struct bnx2x_phy *phy,
4957 struct link_params *params, 5025 struct link_params *params,
4958 struct link_vars *vars) 5026 struct link_vars *vars)
@@ -4960,7 +5028,7 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
4960 struct bnx2x *bp = params->bp; 5028 struct bnx2x *bp = params->bp;
4961 u16 reg_val; 5029 u16 reg_val;
4962 5030
4963 /* program duplex, disable autoneg and sgmii*/ 5031 /* Program duplex, disable autoneg and sgmii*/
4964 CL22_RD_OVER_CL45(bp, phy, 5032 CL22_RD_OVER_CL45(bp, phy,
4965 MDIO_REG_BANK_COMBO_IEEE0, 5033 MDIO_REG_BANK_COMBO_IEEE0,
4966 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val); 5034 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
@@ -4979,7 +5047,7 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
4979 CL22_RD_OVER_CL45(bp, phy, 5047 CL22_RD_OVER_CL45(bp, phy,
4980 MDIO_REG_BANK_SERDES_DIGITAL, 5048 MDIO_REG_BANK_SERDES_DIGITAL,
4981 MDIO_SERDES_DIGITAL_MISC1, &reg_val); 5049 MDIO_SERDES_DIGITAL_MISC1, &reg_val);
4982 /* clearing the speed value before setting the right speed */ 5050 /* Clearing the speed value before setting the right speed */
4983 DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val); 5051 DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
4984 5052
4985 reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK | 5053 reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
@@ -5008,7 +5076,7 @@ static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy,
5008 struct bnx2x *bp = params->bp; 5076 struct bnx2x *bp = params->bp;
5009 u16 val = 0; 5077 u16 val = 0;
5010 5078
5011 /* set extended capabilities */ 5079 /* Set extended capabilities */
5012 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) 5080 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
5013 val |= MDIO_OVER_1G_UP1_2_5G; 5081 val |= MDIO_OVER_1G_UP1_2_5G;
5014 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 5082 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
@@ -5028,7 +5096,7 @@ static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy,
5028{ 5096{
5029 struct bnx2x *bp = params->bp; 5097 struct bnx2x *bp = params->bp;
5030 u16 val; 5098 u16 val;
5031 /* for AN, we are always publishing full duplex */ 5099 /* For AN, we are always publishing full duplex */
5032 5100
5033 CL22_WR_OVER_CL45(bp, phy, 5101 CL22_WR_OVER_CL45(bp, phy,
5034 MDIO_REG_BANK_COMBO_IEEE0, 5102 MDIO_REG_BANK_COMBO_IEEE0,
@@ -5090,14 +5158,14 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
5090 struct bnx2x *bp = params->bp; 5158 struct bnx2x *bp = params->bp;
5091 u16 control1; 5159 u16 control1;
5092 5160
5093 /* in SGMII mode, the unicore is always slave */ 5161 /* In SGMII mode, the unicore is always slave */
5094 5162
5095 CL22_RD_OVER_CL45(bp, phy, 5163 CL22_RD_OVER_CL45(bp, phy,
5096 MDIO_REG_BANK_SERDES_DIGITAL, 5164 MDIO_REG_BANK_SERDES_DIGITAL,
5097 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, 5165 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
5098 &control1); 5166 &control1);
5099 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT; 5167 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
5100 /* set sgmii mode (and not fiber) */ 5168 /* Set sgmii mode (and not fiber) */
5101 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE | 5169 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
5102 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET | 5170 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
5103 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE); 5171 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
@@ -5106,9 +5174,9 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
5106 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, 5174 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
5107 control1); 5175 control1);
5108 5176
5109 /* if forced speed */ 5177 /* If forced speed */
5110 if (!(vars->line_speed == SPEED_AUTO_NEG)) { 5178 if (!(vars->line_speed == SPEED_AUTO_NEG)) {
5111 /* set speed, disable autoneg */ 5179 /* Set speed, disable autoneg */
5112 u16 mii_control; 5180 u16 mii_control;
5113 5181
5114 CL22_RD_OVER_CL45(bp, phy, 5182 CL22_RD_OVER_CL45(bp, phy,
@@ -5129,16 +5197,16 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
5129 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000; 5197 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
5130 break; 5198 break;
5131 case SPEED_10: 5199 case SPEED_10:
5132 /* there is nothing to set for 10M */ 5200 /* There is nothing to set for 10M */
5133 break; 5201 break;
5134 default: 5202 default:
5135 /* invalid speed for SGMII */ 5203 /* Invalid speed for SGMII */
5136 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", 5204 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
5137 vars->line_speed); 5205 vars->line_speed);
5138 break; 5206 break;
5139 } 5207 }
5140 5208
5141 /* setting the full duplex */ 5209 /* Setting the full duplex */
5142 if (phy->req_duplex == DUPLEX_FULL) 5210 if (phy->req_duplex == DUPLEX_FULL)
5143 mii_control |= 5211 mii_control |=
5144 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; 5212 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
@@ -5148,7 +5216,7 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
5148 mii_control); 5216 mii_control);
5149 5217
5150 } else { /* AN mode */ 5218 } else { /* AN mode */
5151 /* enable and restart AN */ 5219 /* Enable and restart AN */
5152 bnx2x_restart_autoneg(phy, params, 0); 5220 bnx2x_restart_autoneg(phy, params, 0);
5153 } 5221 }
5154} 5222}
@@ -5244,7 +5312,7 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
5244 struct bnx2x *bp = params->bp; 5312 struct bnx2x *bp = params->bp;
5245 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 5313 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
5246 5314
5247 /* resolve from gp_status in case of AN complete and not sgmii */ 5315 /* Resolve from gp_status in case of AN complete and not sgmii */
5248 if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) { 5316 if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) {
5249 /* Update the advertised flow-controled of LD/LP in AN */ 5317 /* Update the advertised flow-controled of LD/LP in AN */
5250 if (phy->req_line_speed == SPEED_AUTO_NEG) 5318 if (phy->req_line_speed == SPEED_AUTO_NEG)
@@ -5468,7 +5536,7 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
5468 bnx2x_xgxs_an_resolve(phy, params, vars, 5536 bnx2x_xgxs_an_resolve(phy, params, vars,
5469 gp_status); 5537 gp_status);
5470 } 5538 }
5471 } else { /* link_down */ 5539 } else { /* Link_down */
5472 if ((phy->req_line_speed == SPEED_AUTO_NEG) && 5540 if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
5473 SINGLE_MEDIA_DIRECT(params)) { 5541 SINGLE_MEDIA_DIRECT(params)) {
5474 /* Check signal is detected */ 5542 /* Check signal is detected */
@@ -5617,12 +5685,12 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
5617 u16 tx_driver; 5685 u16 tx_driver;
5618 u16 bank; 5686 u16 bank;
5619 5687
5620 /* read precomp */ 5688 /* Read precomp */
5621 CL22_RD_OVER_CL45(bp, phy, 5689 CL22_RD_OVER_CL45(bp, phy,
5622 MDIO_REG_BANK_OVER_1G, 5690 MDIO_REG_BANK_OVER_1G,
5623 MDIO_OVER_1G_LP_UP2, &lp_up2); 5691 MDIO_OVER_1G_LP_UP2, &lp_up2);
5624 5692
5625 /* bits [10:7] at lp_up2, positioned at [15:12] */ 5693 /* Bits [10:7] at lp_up2, positioned at [15:12] */
5626 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >> 5694 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
5627 MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) << 5695 MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
5628 MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT); 5696 MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
@@ -5636,7 +5704,7 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
5636 bank, 5704 bank,
5637 MDIO_TX0_TX_DRIVER, &tx_driver); 5705 MDIO_TX0_TX_DRIVER, &tx_driver);
5638 5706
5639 /* replace tx_driver bits [15:12] */ 5707 /* Replace tx_driver bits [15:12] */
5640 if (lp_up2 != 5708 if (lp_up2 !=
5641 (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) { 5709 (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
5642 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK; 5710 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
@@ -5732,16 +5800,16 @@ static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
5732 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) 5800 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
5733 bnx2x_set_preemphasis(phy, params); 5801 bnx2x_set_preemphasis(phy, params);
5734 5802
5735 /* forced speed requested? */ 5803 /* Forced speed requested? */
5736 if (vars->line_speed != SPEED_AUTO_NEG || 5804 if (vars->line_speed != SPEED_AUTO_NEG ||
5737 (SINGLE_MEDIA_DIRECT(params) && 5805 (SINGLE_MEDIA_DIRECT(params) &&
5738 params->loopback_mode == LOOPBACK_EXT)) { 5806 params->loopback_mode == LOOPBACK_EXT)) {
5739 DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); 5807 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
5740 5808
5741 /* disable autoneg */ 5809 /* Disable autoneg */
5742 bnx2x_set_autoneg(phy, params, vars, 0); 5810 bnx2x_set_autoneg(phy, params, vars, 0);
5743 5811
5744 /* program speed and duplex */ 5812 /* Program speed and duplex */
5745 bnx2x_program_serdes(phy, params, vars); 5813 bnx2x_program_serdes(phy, params, vars);
5746 5814
5747 } else { /* AN_mode */ 5815 } else { /* AN_mode */
@@ -5750,14 +5818,14 @@ static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
5750 /* AN enabled */ 5818 /* AN enabled */
5751 bnx2x_set_brcm_cl37_advertisement(phy, params); 5819 bnx2x_set_brcm_cl37_advertisement(phy, params);
5752 5820
5753 /* program duplex & pause advertisement (for aneg) */ 5821 /* Program duplex & pause advertisement (for aneg) */
5754 bnx2x_set_ieee_aneg_advertisement(phy, params, 5822 bnx2x_set_ieee_aneg_advertisement(phy, params,
5755 vars->ieee_fc); 5823 vars->ieee_fc);
5756 5824
5757 /* enable autoneg */ 5825 /* Enable autoneg */
5758 bnx2x_set_autoneg(phy, params, vars, enable_cl73); 5826 bnx2x_set_autoneg(phy, params, vars, enable_cl73);
5759 5827
5760 /* enable and restart AN */ 5828 /* Enable and restart AN */
5761 bnx2x_restart_autoneg(phy, params, enable_cl73); 5829 bnx2x_restart_autoneg(phy, params, enable_cl73);
5762 } 5830 }
5763 5831
@@ -5793,12 +5861,12 @@ static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy,
5793 bnx2x_set_master_ln(params, phy); 5861 bnx2x_set_master_ln(params, phy);
5794 5862
5795 rc = bnx2x_reset_unicore(params, phy, 0); 5863 rc = bnx2x_reset_unicore(params, phy, 0);
5796 /* reset the SerDes and wait for reset bit return low */ 5864 /* Reset the SerDes and wait for reset bit return low */
5797 if (rc != 0) 5865 if (rc)
5798 return rc; 5866 return rc;
5799 5867
5800 bnx2x_set_aer_mmd(params, phy); 5868 bnx2x_set_aer_mmd(params, phy);
5801 /* setting the masterLn_def again after the reset */ 5869 /* Setting the masterLn_def again after the reset */
5802 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) { 5870 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
5803 bnx2x_set_master_ln(params, phy); 5871 bnx2x_set_master_ln(params, phy);
5804 bnx2x_set_swap_lanes(params, phy); 5872 bnx2x_set_swap_lanes(params, phy);
@@ -5823,7 +5891,7 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
5823 MDIO_PMA_REG_CTRL, &ctrl); 5891 MDIO_PMA_REG_CTRL, &ctrl);
5824 if (!(ctrl & (1<<15))) 5892 if (!(ctrl & (1<<15)))
5825 break; 5893 break;
5826 msleep(1); 5894 usleep_range(1000, 2000);
5827 } 5895 }
5828 5896
5829 if (cnt == 1000) 5897 if (cnt == 1000)
@@ -6054,7 +6122,7 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
6054 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n"); 6122 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
6055 6123
6056 if (!CHIP_IS_E3(bp)) { 6124 if (!CHIP_IS_E3(bp)) {
6057 /* change the uni_phy_addr in the nig */ 6125 /* Change the uni_phy_addr in the nig */
6058 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + 6126 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
6059 port*0x18)); 6127 port*0x18));
6060 6128
@@ -6074,11 +6142,11 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
6074 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), 6142 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
6075 0x6041); 6143 0x6041);
6076 msleep(200); 6144 msleep(200);
6077 /* set aer mmd back */ 6145 /* Set aer mmd back */
6078 bnx2x_set_aer_mmd(params, phy); 6146 bnx2x_set_aer_mmd(params, phy);
6079 6147
6080 if (!CHIP_IS_E3(bp)) { 6148 if (!CHIP_IS_E3(bp)) {
6081 /* and md_devad */ 6149 /* And md_devad */
6082 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 6150 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
6083 md_devad); 6151 md_devad);
6084 } 6152 }
@@ -6275,7 +6343,7 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
6275 MDIO_REG_BANK_GP_STATUS, 6343 MDIO_REG_BANK_GP_STATUS,
6276 MDIO_GP_STATUS_TOP_AN_STATUS1, 6344 MDIO_GP_STATUS_TOP_AN_STATUS1,
6277 &gp_status); 6345 &gp_status);
6278 /* link is up only if both local phy and external phy are up */ 6346 /* Link is up only if both local phy and external phy are up */
6279 if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)) 6347 if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
6280 return -ESRCH; 6348 return -ESRCH;
6281 } 6349 }
@@ -6296,7 +6364,9 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
6296 for (phy_index = EXT_PHY1; phy_index < params->num_phys; 6364 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
6297 phy_index++) { 6365 phy_index++) {
6298 serdes_phy_type = ((params->phy[phy_index].media_type == 6366 serdes_phy_type = ((params->phy[phy_index].media_type ==
6299 ETH_PHY_SFP_FIBER) || 6367 ETH_PHY_SFPP_10G_FIBER) ||
6368 (params->phy[phy_index].media_type ==
6369 ETH_PHY_SFP_1G_FIBER) ||
6300 (params->phy[phy_index].media_type == 6370 (params->phy[phy_index].media_type ==
6301 ETH_PHY_XFP_FIBER) || 6371 ETH_PHY_XFP_FIBER) ||
6302 (params->phy[phy_index].media_type == 6372 (params->phy[phy_index].media_type ==
@@ -6397,7 +6467,7 @@ static int bnx2x_link_initialize(struct link_params *params,
6397static void bnx2x_int_link_reset(struct bnx2x_phy *phy, 6467static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
6398 struct link_params *params) 6468 struct link_params *params)
6399{ 6469{
6400 /* reset the SerDes/XGXS */ 6470 /* Reset the SerDes/XGXS */
6401 REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, 6471 REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
6402 (0x1ff << (params->port*16))); 6472 (0x1ff << (params->port*16)));
6403} 6473}
@@ -6430,10 +6500,10 @@ static int bnx2x_update_link_down(struct link_params *params,
6430 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port); 6500 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
6431 bnx2x_set_led(params, vars, LED_MODE_OFF, 0); 6501 bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
6432 vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG; 6502 vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG;
6433 /* indicate no mac active */ 6503 /* Indicate no mac active */
6434 vars->mac_type = MAC_TYPE_NONE; 6504 vars->mac_type = MAC_TYPE_NONE;
6435 6505
6436 /* update shared memory */ 6506 /* Update shared memory */
6437 vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK | 6507 vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK |
6438 LINK_STATUS_LINK_UP | 6508 LINK_STATUS_LINK_UP |
6439 LINK_STATUS_PHYSICAL_LINK_FLAG | 6509 LINK_STATUS_PHYSICAL_LINK_FLAG |
@@ -6446,15 +6516,15 @@ static int bnx2x_update_link_down(struct link_params *params,
6446 vars->line_speed = 0; 6516 vars->line_speed = 0;
6447 bnx2x_update_mng(params, vars->link_status); 6517 bnx2x_update_mng(params, vars->link_status);
6448 6518
6449 /* activate nig drain */ 6519 /* Activate nig drain */
6450 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); 6520 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
6451 6521
6452 /* disable emac */ 6522 /* Disable emac */
6453 if (!CHIP_IS_E3(bp)) 6523 if (!CHIP_IS_E3(bp))
6454 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); 6524 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
6455 6525
6456 msleep(10); 6526 usleep_range(10000, 20000);
6457 /* reset BigMac/Xmac */ 6527 /* Reset BigMac/Xmac */
6458 if (CHIP_IS_E1x(bp) || 6528 if (CHIP_IS_E1x(bp) ||
6459 CHIP_IS_E2(bp)) { 6529 CHIP_IS_E2(bp)) {
6460 bnx2x_bmac_rx_disable(bp, params->port); 6530 bnx2x_bmac_rx_disable(bp, params->port);
@@ -6463,6 +6533,16 @@ static int bnx2x_update_link_down(struct link_params *params,
6463 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 6533 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
6464 } 6534 }
6465 if (CHIP_IS_E3(bp)) { 6535 if (CHIP_IS_E3(bp)) {
6536 /* Prevent LPI Generation by chip */
6537 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2),
6538 0);
6539 REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
6540 REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2),
6541 0);
6542 vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
6543 SHMEM_EEE_ACTIVE_BIT);
6544
6545 bnx2x_update_mng_eee(params, vars->eee_status);
6466 bnx2x_xmac_disable(params); 6546 bnx2x_xmac_disable(params);
6467 bnx2x_umac_disable(params); 6547 bnx2x_umac_disable(params);
6468 } 6548 }
@@ -6502,6 +6582,16 @@ static int bnx2x_update_link_up(struct link_params *params,
6502 bnx2x_umac_enable(params, vars, 0); 6582 bnx2x_umac_enable(params, vars, 0);
6503 bnx2x_set_led(params, vars, 6583 bnx2x_set_led(params, vars,
6504 LED_MODE_OPER, vars->line_speed); 6584 LED_MODE_OPER, vars->line_speed);
6585
6586 if ((vars->eee_status & SHMEM_EEE_ACTIVE_BIT) &&
6587 (vars->eee_status & SHMEM_EEE_LPI_REQUESTED_BIT)) {
6588 DP(NETIF_MSG_LINK, "Enabling LPI assertion\n");
6589 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 +
6590 (params->port << 2), 1);
6591 REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 1);
6592 REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 +
6593 (params->port << 2), 0xfc20);
6594 }
6505 } 6595 }
6506 if ((CHIP_IS_E1x(bp) || 6596 if ((CHIP_IS_E1x(bp) ||
6507 CHIP_IS_E2(bp))) { 6597 CHIP_IS_E2(bp))) {
@@ -6534,12 +6624,12 @@ static int bnx2x_update_link_up(struct link_params *params,
6534 rc |= bnx2x_pbf_update(params, vars->flow_ctrl, 6624 rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
6535 vars->line_speed); 6625 vars->line_speed);
6536 6626
6537 /* disable drain */ 6627 /* Disable drain */
6538 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0); 6628 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
6539 6629
6540 /* update shared memory */ 6630 /* Update shared memory */
6541 bnx2x_update_mng(params, vars->link_status); 6631 bnx2x_update_mng(params, vars->link_status);
6542 6632 bnx2x_update_mng_eee(params, vars->eee_status);
6543 /* Check remote fault */ 6633 /* Check remote fault */
6544 for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { 6634 for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
6545 if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) { 6635 if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
@@ -6583,6 +6673,8 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6583 phy_vars[phy_index].phy_link_up = 0; 6673 phy_vars[phy_index].phy_link_up = 0;
6584 phy_vars[phy_index].link_up = 0; 6674 phy_vars[phy_index].link_up = 0;
6585 phy_vars[phy_index].fault_detected = 0; 6675 phy_vars[phy_index].fault_detected = 0;
6676 /* different consideration, since vars holds inner state */
6677 phy_vars[phy_index].eee_status = vars->eee_status;
6586 } 6678 }
6587 6679
6588 if (USES_WARPCORE(bp)) 6680 if (USES_WARPCORE(bp))
@@ -6603,7 +6695,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6603 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), 6695 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
6604 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); 6696 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
6605 6697
6606 /* disable emac */ 6698 /* Disable emac */
6607 if (!CHIP_IS_E3(bp)) 6699 if (!CHIP_IS_E3(bp))
6608 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); 6700 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
6609 6701
@@ -6712,6 +6804,9 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6712 vars->link_status |= LINK_STATUS_SERDES_LINK; 6804 vars->link_status |= LINK_STATUS_SERDES_LINK;
6713 else 6805 else
6714 vars->link_status &= ~LINK_STATUS_SERDES_LINK; 6806 vars->link_status &= ~LINK_STATUS_SERDES_LINK;
6807
6808 vars->eee_status = phy_vars[active_external_phy].eee_status;
6809
6715 DP(NETIF_MSG_LINK, "Active external phy selected: %x\n", 6810 DP(NETIF_MSG_LINK, "Active external phy selected: %x\n",
6716 active_external_phy); 6811 active_external_phy);
6717 } 6812 }
@@ -6745,11 +6840,11 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6745 } else if (prev_line_speed != vars->line_speed) { 6840 } else if (prev_line_speed != vars->line_speed) {
6746 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 6841 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
6747 0); 6842 0);
6748 msleep(1); 6843 usleep_range(1000, 2000);
6749 } 6844 }
6750 } 6845 }
6751 6846
6752 /* anything 10 and over uses the bmac */ 6847 /* Anything 10 and over uses the bmac */
6753 link_10g_plus = (vars->line_speed >= SPEED_10000); 6848 link_10g_plus = (vars->line_speed >= SPEED_10000);
6754 6849
6755 bnx2x_link_int_ack(params, vars, link_10g_plus); 6850 bnx2x_link_int_ack(params, vars, link_10g_plus);
@@ -6815,7 +6910,7 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
6815{ 6910{
6816 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 6911 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
6817 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 6912 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
6818 msleep(1); 6913 usleep_range(1000, 2000);
6819 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 6914 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
6820 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); 6915 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
6821} 6916}
@@ -6912,7 +7007,7 @@ static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
6912 MDIO_PMA_REG_GEN_CTRL, 7007 MDIO_PMA_REG_GEN_CTRL,
6913 0x0001); 7008 0x0001);
6914 7009
6915 /* ucode reboot and rst */ 7010 /* Ucode reboot and rst */
6916 bnx2x_cl45_write(bp, phy, 7011 bnx2x_cl45_write(bp, phy,
6917 MDIO_PMA_DEVAD, 7012 MDIO_PMA_DEVAD,
6918 MDIO_PMA_REG_GEN_CTRL, 7013 MDIO_PMA_REG_GEN_CTRL,
@@ -6956,7 +7051,7 @@ static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
6956 MDIO_PMA_DEVAD, 7051 MDIO_PMA_DEVAD,
6957 MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout); 7052 MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
6958 7053
6959 msleep(1); 7054 usleep_range(1000, 2000);
6960 } while (fw_ver1 == 0 || fw_ver1 == 0x4321 || 7055 } while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
6961 ((fw_msgout & 0xff) != 0x03 && (phy->type == 7056 ((fw_msgout & 0xff) != 0x03 && (phy->type ==
6962 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))); 7057 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
@@ -7050,11 +7145,11 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
7050 "XAUI workaround has completed\n"); 7145 "XAUI workaround has completed\n");
7051 return 0; 7146 return 0;
7052 } 7147 }
7053 msleep(3); 7148 usleep_range(3000, 6000);
7054 } 7149 }
7055 break; 7150 break;
7056 } 7151 }
7057 msleep(3); 7152 usleep_range(3000, 6000);
7058 } 7153 }
7059 DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n"); 7154 DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n");
7060 return -EINVAL; 7155 return -EINVAL;
@@ -7128,7 +7223,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
7128 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 7223 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
7129 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); 7224 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
7130 7225
7131 /* enable LASI */ 7226 /* Enable LASI */
7132 bnx2x_cl45_write(bp, phy, 7227 bnx2x_cl45_write(bp, phy,
7133 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2)); 7228 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
7134 bnx2x_cl45_write(bp, phy, 7229 bnx2x_cl45_write(bp, phy,
@@ -7276,7 +7371,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
7276 7371
7277 DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1); 7372 DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1);
7278 7373
7279 /* clear the interrupt LASI status register */ 7374 /* Clear the interrupt LASI status register */
7280 bnx2x_cl45_read(bp, phy, 7375 bnx2x_cl45_read(bp, phy,
7281 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2); 7376 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
7282 bnx2x_cl45_read(bp, phy, 7377 bnx2x_cl45_read(bp, phy,
@@ -7601,7 +7696,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7601 struct bnx2x *bp = params->bp; 7696 struct bnx2x *bp = params->bp;
7602 u16 val = 0; 7697 u16 val = 0;
7603 u16 i; 7698 u16 i;
7604 if (byte_cnt > 16) { 7699 if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
7605 DP(NETIF_MSG_LINK, 7700 DP(NETIF_MSG_LINK,
7606 "Reading from eeprom is limited to 0xf\n"); 7701 "Reading from eeprom is limited to 0xf\n");
7607 return -EINVAL; 7702 return -EINVAL;
@@ -7655,7 +7750,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7655 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 7750 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
7656 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 7751 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
7657 return 0; 7752 return 0;
7658 msleep(1); 7753 usleep_range(1000, 2000);
7659 } 7754 }
7660 return -EINVAL; 7755 return -EINVAL;
7661} 7756}
@@ -7692,7 +7787,8 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7692 u32 data_array[4]; 7787 u32 data_array[4];
7693 u16 addr32; 7788 u16 addr32;
7694 struct bnx2x *bp = params->bp; 7789 struct bnx2x *bp = params->bp;
7695 if (byte_cnt > 16) { 7790
7791 if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
7696 DP(NETIF_MSG_LINK, 7792 DP(NETIF_MSG_LINK,
7697 "Reading from eeprom is limited to 16 bytes\n"); 7793 "Reading from eeprom is limited to 16 bytes\n");
7698 return -EINVAL; 7794 return -EINVAL;
@@ -7728,7 +7824,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7728 struct bnx2x *bp = params->bp; 7824 struct bnx2x *bp = params->bp;
7729 u16 val, i; 7825 u16 val, i;
7730 7826
7731 if (byte_cnt > 16) { 7827 if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
7732 DP(NETIF_MSG_LINK, 7828 DP(NETIF_MSG_LINK,
7733 "Reading from eeprom is limited to 0xf\n"); 7829 "Reading from eeprom is limited to 0xf\n");
7734 return -EINVAL; 7830 return -EINVAL;
@@ -7765,7 +7861,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7765 /* Wait appropriate time for two-wire command to finish before 7861 /* Wait appropriate time for two-wire command to finish before
7766 * polling the status register 7862 * polling the status register
7767 */ 7863 */
7768 msleep(1); 7864 usleep_range(1000, 2000);
7769 7865
7770 /* Wait up to 500us for command complete status */ 7866 /* Wait up to 500us for command complete status */
7771 for (i = 0; i < 100; i++) { 7867 for (i = 0; i < 100; i++) {
@@ -7801,7 +7897,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7801 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 7897 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
7802 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 7898 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
7803 return 0; 7899 return 0;
7804 msleep(1); 7900 usleep_range(1000, 2000);
7805 } 7901 }
7806 7902
7807 return -EINVAL; 7903 return -EINVAL;
@@ -7811,7 +7907,7 @@ int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7811 struct link_params *params, u16 addr, 7907 struct link_params *params, u16 addr,
7812 u8 byte_cnt, u8 *o_buf) 7908 u8 byte_cnt, u8 *o_buf)
7813{ 7909{
7814 int rc = -EINVAL; 7910 int rc = -EOPNOTSUPP;
7815 switch (phy->type) { 7911 switch (phy->type) {
7816 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 7912 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7817 rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr, 7913 rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
@@ -7836,7 +7932,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
7836{ 7932{
7837 struct bnx2x *bp = params->bp; 7933 struct bnx2x *bp = params->bp;
7838 u32 sync_offset = 0, phy_idx, media_types; 7934 u32 sync_offset = 0, phy_idx, media_types;
7839 u8 val, check_limiting_mode = 0; 7935 u8 val[2], check_limiting_mode = 0;
7840 *edc_mode = EDC_MODE_LIMITING; 7936 *edc_mode = EDC_MODE_LIMITING;
7841 7937
7842 phy->media_type = ETH_PHY_UNSPECIFIED; 7938 phy->media_type = ETH_PHY_UNSPECIFIED;
@@ -7844,13 +7940,13 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
7844 if (bnx2x_read_sfp_module_eeprom(phy, 7940 if (bnx2x_read_sfp_module_eeprom(phy,
7845 params, 7941 params,
7846 SFP_EEPROM_CON_TYPE_ADDR, 7942 SFP_EEPROM_CON_TYPE_ADDR,
7847 1, 7943 2,
7848 &val) != 0) { 7944 (u8 *)val) != 0) {
7849 DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n"); 7945 DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
7850 return -EINVAL; 7946 return -EINVAL;
7851 } 7947 }
7852 7948
7853 switch (val) { 7949 switch (val[0]) {
7854 case SFP_EEPROM_CON_TYPE_VAL_COPPER: 7950 case SFP_EEPROM_CON_TYPE_VAL_COPPER:
7855 { 7951 {
7856 u8 copper_module_type; 7952 u8 copper_module_type;
@@ -7888,13 +7984,29 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
7888 break; 7984 break;
7889 } 7985 }
7890 case SFP_EEPROM_CON_TYPE_VAL_LC: 7986 case SFP_EEPROM_CON_TYPE_VAL_LC:
7891 phy->media_type = ETH_PHY_SFP_FIBER;
7892 DP(NETIF_MSG_LINK, "Optic module detected\n");
7893 check_limiting_mode = 1; 7987 check_limiting_mode = 1;
7988 if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK |
7989 SFP_EEPROM_COMP_CODE_LR_MASK |
7990 SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
7991 DP(NETIF_MSG_LINK, "1G Optic module detected\n");
7992 phy->media_type = ETH_PHY_SFP_1G_FIBER;
7993 phy->req_line_speed = SPEED_1000;
7994 } else {
7995 int idx, cfg_idx = 0;
7996 DP(NETIF_MSG_LINK, "10G Optic module detected\n");
7997 for (idx = INT_PHY; idx < MAX_PHYS; idx++) {
7998 if (params->phy[idx].type == phy->type) {
7999 cfg_idx = LINK_CONFIG_IDX(idx);
8000 break;
8001 }
8002 }
8003 phy->media_type = ETH_PHY_SFPP_10G_FIBER;
8004 phy->req_line_speed = params->req_line_speed[cfg_idx];
8005 }
7894 break; 8006 break;
7895 default: 8007 default:
7896 DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n", 8008 DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
7897 val); 8009 val[0]);
7898 return -EINVAL; 8010 return -EINVAL;
7899 } 8011 }
7900 sync_offset = params->shmem_base + 8012 sync_offset = params->shmem_base +
@@ -7980,7 +8092,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
7980 return 0; 8092 return 0;
7981 } 8093 }
7982 8094
7983 /* format the warning message */ 8095 /* Format the warning message */
7984 if (bnx2x_read_sfp_module_eeprom(phy, 8096 if (bnx2x_read_sfp_module_eeprom(phy,
7985 params, 8097 params,
7986 SFP_EEPROM_VENDOR_NAME_ADDR, 8098 SFP_EEPROM_VENDOR_NAME_ADDR,
@@ -8026,7 +8138,7 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
8026 timeout * 5); 8138 timeout * 5);
8027 return 0; 8139 return 0;
8028 } 8140 }
8029 msleep(5); 8141 usleep_range(5000, 10000);
8030 } 8142 }
8031 return -EINVAL; 8143 return -EINVAL;
8032} 8144}
@@ -8338,7 +8450,7 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
8338 DP(NETIF_MSG_LINK, "Failed to get valid module type\n"); 8450 DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
8339 return -EINVAL; 8451 return -EINVAL;
8340 } else if (bnx2x_verify_sfp_module(phy, params) != 0) { 8452 } else if (bnx2x_verify_sfp_module(phy, params) != 0) {
8341 /* check SFP+ module compatibility */ 8453 /* Check SFP+ module compatibility */
8342 DP(NETIF_MSG_LINK, "Module verification failed!!\n"); 8454 DP(NETIF_MSG_LINK, "Module verification failed!!\n");
8343 rc = -EINVAL; 8455 rc = -EINVAL;
8344 /* Turn on fault module-detected led */ 8456 /* Turn on fault module-detected led */
@@ -8401,14 +8513,34 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
8401 8513
8402 /* Call the handling function in case module is detected */ 8514 /* Call the handling function in case module is detected */
8403 if (gpio_val == 0) { 8515 if (gpio_val == 0) {
8516 bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
8517 bnx2x_set_aer_mmd(params, phy);
8518
8404 bnx2x_power_sfp_module(params, phy, 1); 8519 bnx2x_power_sfp_module(params, phy, 1);
8405 bnx2x_set_gpio_int(bp, gpio_num, 8520 bnx2x_set_gpio_int(bp, gpio_num,
8406 MISC_REGISTERS_GPIO_INT_OUTPUT_CLR, 8521 MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
8407 gpio_port); 8522 gpio_port);
8408 if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) 8523 if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) {
8409 bnx2x_sfp_module_detection(phy, params); 8524 bnx2x_sfp_module_detection(phy, params);
8410 else 8525 if (CHIP_IS_E3(bp)) {
8526 u16 rx_tx_in_reset;
8527 /* In case WC is out of reset, reconfigure the
8528 * link speed while taking into account 1G
8529 * module limitation.
8530 */
8531 bnx2x_cl45_read(bp, phy,
8532 MDIO_WC_DEVAD,
8533 MDIO_WC_REG_DIGITAL5_MISC6,
8534 &rx_tx_in_reset);
8535 if (!rx_tx_in_reset) {
8536 bnx2x_warpcore_reset_lane(bp, phy, 1);
8537 bnx2x_warpcore_config_sfi(phy, params);
8538 bnx2x_warpcore_reset_lane(bp, phy, 0);
8539 }
8540 }
8541 } else {
8411 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); 8542 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
8543 }
8412 } else { 8544 } else {
8413 u32 val = REG_RD(bp, params->shmem_base + 8545 u32 val = REG_RD(bp, params->shmem_base +
8414 offsetof(struct shmem_region, dev_info. 8546 offsetof(struct shmem_region, dev_info.
@@ -8469,7 +8601,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
8469 bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT, 8601 bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
8470 MDIO_PMA_LASI_TXCTRL); 8602 MDIO_PMA_LASI_TXCTRL);
8471 8603
8472 /* clear LASI indication*/ 8604 /* Clear LASI indication*/
8473 bnx2x_cl45_read(bp, phy, 8605 bnx2x_cl45_read(bp, phy,
8474 MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); 8606 MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
8475 bnx2x_cl45_read(bp, phy, 8607 bnx2x_cl45_read(bp, phy,
@@ -8537,7 +8669,7 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
8537 MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val); 8669 MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val);
8538 if (val) 8670 if (val)
8539 break; 8671 break;
8540 msleep(10); 8672 usleep_range(10000, 20000);
8541 } 8673 }
8542 DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt); 8674 DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt);
8543 if ((params->feature_config_flags & 8675 if ((params->feature_config_flags &
@@ -8666,7 +8798,7 @@ static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
8666 MDIO_PMA_REG_GEN_CTRL, 8798 MDIO_PMA_REG_GEN_CTRL,
8667 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 8799 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
8668 8800
8669 /* wait for 150ms for microcode load */ 8801 /* Wait for 150ms for microcode load */
8670 msleep(150); 8802 msleep(150);
8671 8803
8672 /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */ 8804 /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
@@ -8860,6 +8992,63 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
8860 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 8992 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
8861} 8993}
8862 8994
8995static void bnx2x_8727_config_speed(struct bnx2x_phy *phy,
8996 struct link_params *params)
8997{
8998 struct bnx2x *bp = params->bp;
8999 u16 tmp1, val;
9000 /* Set option 1G speed */
9001 if ((phy->req_line_speed == SPEED_1000) ||
9002 (phy->media_type == ETH_PHY_SFP_1G_FIBER)) {
9003 DP(NETIF_MSG_LINK, "Setting 1G force\n");
9004 bnx2x_cl45_write(bp, phy,
9005 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
9006 bnx2x_cl45_write(bp, phy,
9007 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
9008 bnx2x_cl45_read(bp, phy,
9009 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
9010 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
9011 /* Power down the XAUI until link is up in case of dual-media
9012 * and 1G
9013 */
9014 if (DUAL_MEDIA(params)) {
9015 bnx2x_cl45_read(bp, phy,
9016 MDIO_PMA_DEVAD,
9017 MDIO_PMA_REG_8727_PCS_GP, &val);
9018 val |= (3<<10);
9019 bnx2x_cl45_write(bp, phy,
9020 MDIO_PMA_DEVAD,
9021 MDIO_PMA_REG_8727_PCS_GP, val);
9022 }
9023 } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
9024 ((phy->speed_cap_mask &
9025 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
9026 ((phy->speed_cap_mask &
9027 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
9028 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
9029
9030 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
9031 bnx2x_cl45_write(bp, phy,
9032 MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
9033 bnx2x_cl45_write(bp, phy,
9034 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
9035 } else {
9036 /* Since the 8727 has only single reset pin, need to set the 10G
9037 * registers although it is default
9038 */
9039 bnx2x_cl45_write(bp, phy,
9040 MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
9041 0x0020);
9042 bnx2x_cl45_write(bp, phy,
9043 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
9044 bnx2x_cl45_write(bp, phy,
9045 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
9046 bnx2x_cl45_write(bp, phy,
9047 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
9048 0x0008);
9049 }
9050}
9051
8863static int bnx2x_8727_config_init(struct bnx2x_phy *phy, 9052static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
8864 struct link_params *params, 9053 struct link_params *params,
8865 struct link_vars *vars) 9054 struct link_vars *vars)
@@ -8877,7 +9066,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
8877 lasi_ctrl_val = 0x0006; 9066 lasi_ctrl_val = 0x0006;
8878 9067
8879 DP(NETIF_MSG_LINK, "Initializing BCM8727\n"); 9068 DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
8880 /* enable LASI */ 9069 /* Enable LASI */
8881 bnx2x_cl45_write(bp, phy, 9070 bnx2x_cl45_write(bp, phy,
8882 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, 9071 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
8883 rx_alarm_ctrl_val); 9072 rx_alarm_ctrl_val);
@@ -8929,56 +9118,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
8929 bnx2x_cl45_read(bp, phy, 9118 bnx2x_cl45_read(bp, phy,
8930 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); 9119 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
8931 9120
8932 /* Set option 1G speed */ 9121 bnx2x_8727_config_speed(phy, params);
8933 if (phy->req_line_speed == SPEED_1000) {
8934 DP(NETIF_MSG_LINK, "Setting 1G force\n");
8935 bnx2x_cl45_write(bp, phy,
8936 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
8937 bnx2x_cl45_write(bp, phy,
8938 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
8939 bnx2x_cl45_read(bp, phy,
8940 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
8941 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
8942 /* Power down the XAUI until link is up in case of dual-media
8943 * and 1G
8944 */
8945 if (DUAL_MEDIA(params)) {
8946 bnx2x_cl45_read(bp, phy,
8947 MDIO_PMA_DEVAD,
8948 MDIO_PMA_REG_8727_PCS_GP, &val);
8949 val |= (3<<10);
8950 bnx2x_cl45_write(bp, phy,
8951 MDIO_PMA_DEVAD,
8952 MDIO_PMA_REG_8727_PCS_GP, val);
8953 }
8954 } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
8955 ((phy->speed_cap_mask &
8956 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
8957 ((phy->speed_cap_mask &
8958 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
8959 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
8960
8961 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
8962 bnx2x_cl45_write(bp, phy,
8963 MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
8964 bnx2x_cl45_write(bp, phy,
8965 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
8966 } else {
8967 /* Since the 8727 has only single reset pin, need to set the 10G
8968 * registers although it is default
8969 */
8970 bnx2x_cl45_write(bp, phy,
8971 MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
8972 0x0020);
8973 bnx2x_cl45_write(bp, phy,
8974 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
8975 bnx2x_cl45_write(bp, phy,
8976 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
8977 bnx2x_cl45_write(bp, phy,
8978 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
8979 0x0008);
8980 }
8981
8982 /* Set 2-wire transfer rate of SFP+ module EEPROM 9122 /* Set 2-wire transfer rate of SFP+ module EEPROM
8983 * to 100Khz since some DACs(direct attached cables) do 9123 * to 100Khz since some DACs(direct attached cables) do
8984 * not work at 400Khz. 9124 * not work at 400Khz.
@@ -9105,6 +9245,9 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
9105 bnx2x_sfp_module_detection(phy, params); 9245 bnx2x_sfp_module_detection(phy, params);
9106 else 9246 else
9107 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); 9247 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
9248
9249 /* Reconfigure link speed based on module type limitations */
9250 bnx2x_8727_config_speed(phy, params);
9108 } 9251 }
9109 9252
9110 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", 9253 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
@@ -9585,9 +9728,9 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
9585static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, 9728static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
9586 struct link_params *params, 9729 struct link_params *params,
9587 u16 fw_cmd, 9730 u16 fw_cmd,
9588 u16 cmd_args[]) 9731 u16 cmd_args[], int argc)
9589{ 9732{
9590 u32 idx; 9733 int idx;
9591 u16 val; 9734 u16 val;
9592 struct bnx2x *bp = params->bp; 9735 struct bnx2x *bp = params->bp;
9593 /* Write CMD_OPEN_OVERRIDE to STATUS reg */ 9736 /* Write CMD_OPEN_OVERRIDE to STATUS reg */
@@ -9599,7 +9742,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
9599 MDIO_84833_CMD_HDLR_STATUS, &val); 9742 MDIO_84833_CMD_HDLR_STATUS, &val);
9600 if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS) 9743 if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
9601 break; 9744 break;
9602 msleep(1); 9745 usleep_range(1000, 2000);
9603 } 9746 }
9604 if (idx >= PHY84833_CMDHDLR_WAIT) { 9747 if (idx >= PHY84833_CMDHDLR_WAIT) {
9605 DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); 9748 DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
@@ -9607,7 +9750,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
9607 } 9750 }
9608 9751
9609 /* Prepare argument(s) and issue command */ 9752 /* Prepare argument(s) and issue command */
9610 for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) { 9753 for (idx = 0; idx < argc; idx++) {
9611 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 9754 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
9612 MDIO_84833_CMD_HDLR_DATA1 + idx, 9755 MDIO_84833_CMD_HDLR_DATA1 + idx,
9613 cmd_args[idx]); 9756 cmd_args[idx]);
@@ -9620,7 +9763,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
9620 if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) || 9763 if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
9621 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) 9764 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
9622 break; 9765 break;
9623 msleep(1); 9766 usleep_range(1000, 2000);
9624 } 9767 }
9625 if ((idx >= PHY84833_CMDHDLR_WAIT) || 9768 if ((idx >= PHY84833_CMDHDLR_WAIT) ||
9626 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { 9769 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
@@ -9628,7 +9771,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
9628 return -EINVAL; 9771 return -EINVAL;
9629 } 9772 }
9630 /* Gather returning data */ 9773 /* Gather returning data */
9631 for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) { 9774 for (idx = 0; idx < argc; idx++) {
9632 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 9775 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
9633 MDIO_84833_CMD_HDLR_DATA1 + idx, 9776 MDIO_84833_CMD_HDLR_DATA1 + idx,
9634 &cmd_args[idx]); 9777 &cmd_args[idx]);
@@ -9662,7 +9805,7 @@ static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
9662 data[1] = (u16)pair_swap; 9805 data[1] = (u16)pair_swap;
9663 9806
9664 status = bnx2x_84833_cmd_hdlr(phy, params, 9807 status = bnx2x_84833_cmd_hdlr(phy, params,
9665 PHY84833_CMD_SET_PAIR_SWAP, data); 9808 PHY84833_CMD_SET_PAIR_SWAP, data, PHY84833_CMDHDLR_MAX_ARGS);
9666 if (status == 0) 9809 if (status == 0)
9667 DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]); 9810 DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
9668 9811
@@ -9740,6 +9883,95 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
9740 return 0; 9883 return 0;
9741} 9884}
9742 9885
9886static int bnx2x_8483x_eee_timers(struct link_params *params,
9887 struct link_vars *vars)
9888{
9889 u32 eee_idle = 0, eee_mode;
9890 struct bnx2x *bp = params->bp;
9891
9892 eee_idle = bnx2x_eee_calc_timer(params);
9893
9894 if (eee_idle) {
9895 REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
9896 eee_idle);
9897 } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
9898 (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
9899 (params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
9900 DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
9901 return -EINVAL;
9902 }
9903
9904 vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
9905 if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
9906 /* eee_idle in 1u --> eee_status in 16u */
9907 eee_idle >>= 4;
9908 vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
9909 SHMEM_EEE_TIME_OUTPUT_BIT;
9910 } else {
9911 if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
9912 return -EINVAL;
9913 vars->eee_status |= eee_mode;
9914 }
9915
9916 return 0;
9917}
9918
9919static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
9920 struct link_params *params,
9921 struct link_vars *vars)
9922{
9923 int rc;
9924 struct bnx2x *bp = params->bp;
9925 u16 cmd_args = 0;
9926
9927 DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
9928
9929 /* Make Certain LPI is disabled */
9930 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
9931 REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
9932
9933 /* Prevent Phy from working in EEE and advertising it */
9934 rc = bnx2x_84833_cmd_hdlr(phy, params,
9935 PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
9936 if (rc) {
9937 DP(NETIF_MSG_LINK, "EEE disable failed.\n");
9938 return rc;
9939 }
9940
9941 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0);
9942 vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
9943
9944 return 0;
9945}
9946
9947static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
9948 struct link_params *params,
9949 struct link_vars *vars)
9950{
9951 int rc;
9952 struct bnx2x *bp = params->bp;
9953 u16 cmd_args = 1;
9954
9955 DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
9956
9957 rc = bnx2x_84833_cmd_hdlr(phy, params,
9958 PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
9959 if (rc) {
9960 DP(NETIF_MSG_LINK, "EEE enable failed.\n");
9961 return rc;
9962 }
9963
9964 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x8);
9965
9966 /* Mask events preventing LPI generation */
9967 REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
9968
9969 vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
9970 vars->eee_status |= (SHMEM_EEE_10G_ADV << SHMEM_EEE_ADV_STATUS_SHIFT);
9971
9972 return 0;
9973}
9974
9743#define PHY84833_CONSTANT_LATENCY 1193 9975#define PHY84833_CONSTANT_LATENCY 1193
9744static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, 9976static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9745 struct link_params *params, 9977 struct link_params *params,
@@ -9752,7 +9984,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9752 u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS]; 9984 u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
9753 int rc = 0; 9985 int rc = 0;
9754 9986
9755 msleep(1); 9987 usleep_range(1000, 2000);
9756 9988
9757 if (!(CHIP_IS_E1x(bp))) 9989 if (!(CHIP_IS_E1x(bp)))
9758 port = BP_PATH(bp); 9990 port = BP_PATH(bp);
@@ -9839,8 +10071,9 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9839 cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1; 10071 cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
9840 cmd_args[3] = PHY84833_CONSTANT_LATENCY; 10072 cmd_args[3] = PHY84833_CONSTANT_LATENCY;
9841 rc = bnx2x_84833_cmd_hdlr(phy, params, 10073 rc = bnx2x_84833_cmd_hdlr(phy, params,
9842 PHY84833_CMD_SET_EEE_MODE, cmd_args); 10074 PHY84833_CMD_SET_EEE_MODE, cmd_args,
9843 if (rc != 0) 10075 PHY84833_CMDHDLR_MAX_ARGS);
10076 if (rc)
9844 DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n"); 10077 DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
9845 } 10078 }
9846 if (initialize) 10079 if (initialize)
@@ -9864,6 +10097,48 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9864 MDIO_CTL_REG_84823_USER_CTRL_REG, val); 10097 MDIO_CTL_REG_84823_USER_CTRL_REG, val);
9865 } 10098 }
9866 10099
10100 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
10101 MDIO_84833_TOP_CFG_FW_REV, &val);
10102
10103 /* Configure EEE support */
10104 if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && bnx2x_eee_has_cap(params)) {
10105 phy->flags |= FLAGS_EEE_10GBT;
10106 vars->eee_status |= SHMEM_EEE_10G_ADV <<
10107 SHMEM_EEE_SUPPORTED_SHIFT;
10108 /* Propogate params' bits --> vars (for migration exposure) */
10109 if (params->eee_mode & EEE_MODE_ENABLE_LPI)
10110 vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
10111 else
10112 vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
10113
10114 if (params->eee_mode & EEE_MODE_ADV_LPI)
10115 vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
10116 else
10117 vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
10118
10119 rc = bnx2x_8483x_eee_timers(params, vars);
10120 if (rc) {
10121 DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
10122 bnx2x_8483x_disable_eee(phy, params, vars);
10123 return rc;
10124 }
10125
10126 if ((params->req_duplex[actual_phy_selection] == DUPLEX_FULL) &&
10127 (params->eee_mode & EEE_MODE_ADV_LPI) &&
10128 (bnx2x_eee_calc_timer(params) ||
10129 !(params->eee_mode & EEE_MODE_ENABLE_LPI)))
10130 rc = bnx2x_8483x_enable_eee(phy, params, vars);
10131 else
10132 rc = bnx2x_8483x_disable_eee(phy, params, vars);
10133 if (rc) {
10134 DP(NETIF_MSG_LINK, "Failed to set EEE advertisment\n");
10135 return rc;
10136 }
10137 } else {
10138 phy->flags &= ~FLAGS_EEE_10GBT;
10139 vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
10140 }
10141
9867 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { 10142 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
9868 /* Bring PHY out of super isolate mode as the final step. */ 10143 /* Bring PHY out of super isolate mode as the final step. */
9869 bnx2x_cl45_read(bp, phy, 10144 bnx2x_cl45_read(bp, phy,
@@ -9918,17 +10193,19 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
9918 DP(NETIF_MSG_LINK, "Legacy speed status = 0x%x\n", 10193 DP(NETIF_MSG_LINK, "Legacy speed status = 0x%x\n",
9919 legacy_status); 10194 legacy_status);
9920 link_up = ((legacy_status & (1<<11)) == (1<<11)); 10195 link_up = ((legacy_status & (1<<11)) == (1<<11));
9921 if (link_up) { 10196 legacy_speed = (legacy_status & (3<<9));
9922 legacy_speed = (legacy_status & (3<<9)); 10197 if (legacy_speed == (0<<9))
9923 if (legacy_speed == (0<<9)) 10198 vars->line_speed = SPEED_10;
9924 vars->line_speed = SPEED_10; 10199 else if (legacy_speed == (1<<9))
9925 else if (legacy_speed == (1<<9)) 10200 vars->line_speed = SPEED_100;
9926 vars->line_speed = SPEED_100; 10201 else if (legacy_speed == (2<<9))
9927 else if (legacy_speed == (2<<9)) 10202 vars->line_speed = SPEED_1000;
9928 vars->line_speed = SPEED_1000; 10203 else { /* Should not happen: Treat as link down */
9929 else /* Should not happen */ 10204 vars->line_speed = 0;
9930 vars->line_speed = 0; 10205 link_up = 0;
10206 }
9931 10207
10208 if (link_up) {
9932 if (legacy_status & (1<<8)) 10209 if (legacy_status & (1<<8))
9933 vars->duplex = DUPLEX_FULL; 10210 vars->duplex = DUPLEX_FULL;
9934 else 10211 else
@@ -9956,7 +10233,7 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
9956 } 10233 }
9957 } 10234 }
9958 if (link_up) { 10235 if (link_up) {
9959 DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n", 10236 DP(NETIF_MSG_LINK, "BCM848x3: link speed is %d\n",
9960 vars->line_speed); 10237 vars->line_speed);
9961 bnx2x_ext_phy_resolve_fc(phy, params, vars); 10238 bnx2x_ext_phy_resolve_fc(phy, params, vars);
9962 10239
@@ -9995,6 +10272,31 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
9995 if (val & (1<<11)) 10272 if (val & (1<<11))
9996 vars->link_status |= 10273 vars->link_status |=
9997 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; 10274 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
10275
10276 /* Determine if EEE was negotiated */
10277 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
10278 u32 eee_shmem = 0;
10279
10280 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10281 MDIO_AN_REG_EEE_ADV, &val1);
10282 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10283 MDIO_AN_REG_LP_EEE_ADV, &val2);
10284 if ((val1 & val2) & 0x8) {
10285 DP(NETIF_MSG_LINK, "EEE negotiated\n");
10286 vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
10287 }
10288
10289 if (val2 & 0x12)
10290 eee_shmem |= SHMEM_EEE_100M_ADV;
10291 if (val2 & 0x4)
10292 eee_shmem |= SHMEM_EEE_1G_ADV;
10293 if (val2 & 0x68)
10294 eee_shmem |= SHMEM_EEE_10G_ADV;
10295
10296 vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
10297 vars->eee_status |= (eee_shmem <<
10298 SHMEM_EEE_LP_ADV_STATUS_SHIFT);
10299 }
9998 } 10300 }
9999 10301
10000 return link_up; 10302 return link_up;
@@ -10273,7 +10575,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10273 u32 cfg_pin; 10575 u32 cfg_pin;
10274 10576
10275 DP(NETIF_MSG_LINK, "54618SE cfg init\n"); 10577 DP(NETIF_MSG_LINK, "54618SE cfg init\n");
10276 usleep_range(1000, 1000); 10578 usleep_range(1000, 2000);
10277 10579
10278 /* This works with E3 only, no need to check the chip 10580 /* This works with E3 only, no need to check the chip
10279 * before determining the port. 10581 * before determining the port.
@@ -10342,7 +10644,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10342 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) 10644 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
10343 fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; 10645 fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
10344 10646
10345 /* read all advertisement */ 10647 /* Read all advertisement */
10346 bnx2x_cl22_read(bp, phy, 10648 bnx2x_cl22_read(bp, phy,
10347 0x09, 10649 0x09,
10348 &an_1000_val); 10650 &an_1000_val);
@@ -10379,7 +10681,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10379 0x09, 10681 0x09,
10380 &an_1000_val); 10682 &an_1000_val);
10381 10683
10382 /* set 100 speed advertisement */ 10684 /* Set 100 speed advertisement */
10383 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 10685 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
10384 (phy->speed_cap_mask & 10686 (phy->speed_cap_mask &
10385 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | 10687 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
@@ -10393,7 +10695,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10393 DP(NETIF_MSG_LINK, "Advertising 100M\n"); 10695 DP(NETIF_MSG_LINK, "Advertising 100M\n");
10394 } 10696 }
10395 10697
10396 /* set 10 speed advertisement */ 10698 /* Set 10 speed advertisement */
10397 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 10699 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
10398 (phy->speed_cap_mask & 10700 (phy->speed_cap_mask &
10399 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | 10701 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
@@ -10532,7 +10834,7 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
10532 10834
10533 /* Get speed operation status */ 10835 /* Get speed operation status */
10534 bnx2x_cl22_read(bp, phy, 10836 bnx2x_cl22_read(bp, phy,
10535 0x19, 10837 MDIO_REG_GPHY_AUX_STATUS,
10536 &legacy_status); 10838 &legacy_status);
10537 DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status); 10839 DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status);
10538 10840
@@ -10759,7 +11061,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
10759 DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n", 11061 DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
10760 val2, val1); 11062 val2, val1);
10761 link_up = ((val1 & 4) == 4); 11063 link_up = ((val1 & 4) == 4);
10762 /* if link is up print the AN outcome of the SFX7101 PHY */ 11064 /* If link is up print the AN outcome of the SFX7101 PHY */
10763 if (link_up) { 11065 if (link_up) {
10764 bnx2x_cl45_read(bp, phy, 11066 bnx2x_cl45_read(bp, phy,
10765 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, 11067 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
@@ -10771,7 +11073,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
10771 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); 11073 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
10772 bnx2x_ext_phy_resolve_fc(phy, params, vars); 11074 bnx2x_ext_phy_resolve_fc(phy, params, vars);
10773 11075
10774 /* read LP advertised speeds */ 11076 /* Read LP advertised speeds */
10775 if (val2 & (1<<11)) 11077 if (val2 & (1<<11))
10776 vars->link_status |= 11078 vars->link_status |=
10777 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; 11079 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
@@ -11090,7 +11392,7 @@ static struct bnx2x_phy phy_8706 = {
11090 SUPPORTED_FIBRE | 11392 SUPPORTED_FIBRE |
11091 SUPPORTED_Pause | 11393 SUPPORTED_Pause |
11092 SUPPORTED_Asym_Pause), 11394 SUPPORTED_Asym_Pause),
11093 .media_type = ETH_PHY_SFP_FIBER, 11395 .media_type = ETH_PHY_SFPP_10G_FIBER,
11094 .ver_addr = 0, 11396 .ver_addr = 0,
11095 .req_flow_ctrl = 0, 11397 .req_flow_ctrl = 0,
11096 .req_line_speed = 0, 11398 .req_line_speed = 0,
@@ -11249,7 +11551,8 @@ static struct bnx2x_phy phy_84833 = {
11249 .def_md_devad = 0, 11551 .def_md_devad = 0,
11250 .flags = (FLAGS_FAN_FAILURE_DET_REQ | 11552 .flags = (FLAGS_FAN_FAILURE_DET_REQ |
11251 FLAGS_REARM_LATCH_SIGNAL | 11553 FLAGS_REARM_LATCH_SIGNAL |
11252 FLAGS_TX_ERROR_CHECK), 11554 FLAGS_TX_ERROR_CHECK |
11555 FLAGS_EEE_10GBT),
11253 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11556 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11254 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11557 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11255 .mdio_ctrl = 0, 11558 .mdio_ctrl = 0,
@@ -11428,7 +11731,7 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
11428 SUPPORTED_FIBRE | 11731 SUPPORTED_FIBRE |
11429 SUPPORTED_Pause | 11732 SUPPORTED_Pause |
11430 SUPPORTED_Asym_Pause); 11733 SUPPORTED_Asym_Pause);
11431 phy->media_type = ETH_PHY_SFP_FIBER; 11734 phy->media_type = ETH_PHY_SFPP_10G_FIBER;
11432 break; 11735 break;
11433 case PORT_HW_CFG_NET_SERDES_IF_KR: 11736 case PORT_HW_CFG_NET_SERDES_IF_KR:
11434 phy->media_type = ETH_PHY_KR; 11737 phy->media_type = ETH_PHY_KR;
@@ -11968,7 +12271,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
11968 vars->mac_type = MAC_TYPE_NONE; 12271 vars->mac_type = MAC_TYPE_NONE;
11969 vars->phy_flags = 0; 12272 vars->phy_flags = 0;
11970 12273
11971 /* disable attentions */ 12274 /* Disable attentions */
11972 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, 12275 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
11973 (NIG_MASK_XGXS0_LINK_STATUS | 12276 (NIG_MASK_XGXS0_LINK_STATUS |
11974 NIG_MASK_XGXS0_LINK10G | 12277 NIG_MASK_XGXS0_LINK10G |
@@ -12017,6 +12320,8 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
12017 break; 12320 break;
12018 } 12321 }
12019 bnx2x_update_mng(params, vars->link_status); 12322 bnx2x_update_mng(params, vars->link_status);
12323
12324 bnx2x_update_mng_eee(params, vars->eee_status);
12020 return 0; 12325 return 0;
12021} 12326}
12022 12327
@@ -12026,19 +12331,22 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
12026 struct bnx2x *bp = params->bp; 12331 struct bnx2x *bp = params->bp;
12027 u8 phy_index, port = params->port, clear_latch_ind = 0; 12332 u8 phy_index, port = params->port, clear_latch_ind = 0;
12028 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port); 12333 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
12029 /* disable attentions */ 12334 /* Disable attentions */
12030 vars->link_status = 0; 12335 vars->link_status = 0;
12031 bnx2x_update_mng(params, vars->link_status); 12336 bnx2x_update_mng(params, vars->link_status);
12337 vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
12338 SHMEM_EEE_ACTIVE_BIT);
12339 bnx2x_update_mng_eee(params, vars->eee_status);
12032 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 12340 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
12033 (NIG_MASK_XGXS0_LINK_STATUS | 12341 (NIG_MASK_XGXS0_LINK_STATUS |
12034 NIG_MASK_XGXS0_LINK10G | 12342 NIG_MASK_XGXS0_LINK10G |
12035 NIG_MASK_SERDES0_LINK_STATUS | 12343 NIG_MASK_SERDES0_LINK_STATUS |
12036 NIG_MASK_MI_INT)); 12344 NIG_MASK_MI_INT));
12037 12345
12038 /* activate nig drain */ 12346 /* Activate nig drain */
12039 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); 12347 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
12040 12348
12041 /* disable nig egress interface */ 12349 /* Disable nig egress interface */
12042 if (!CHIP_IS_E3(bp)) { 12350 if (!CHIP_IS_E3(bp)) {
12043 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0); 12351 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
12044 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); 12352 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
@@ -12051,15 +12359,15 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
12051 bnx2x_xmac_disable(params); 12359 bnx2x_xmac_disable(params);
12052 bnx2x_umac_disable(params); 12360 bnx2x_umac_disable(params);
12053 } 12361 }
12054 /* disable emac */ 12362 /* Disable emac */
12055 if (!CHIP_IS_E3(bp)) 12363 if (!CHIP_IS_E3(bp))
12056 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); 12364 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
12057 12365
12058 msleep(10); 12366 usleep_range(10000, 20000);
12059 /* The PHY reset is controlled by GPIO 1 12367 /* The PHY reset is controlled by GPIO 1
12060 * Hold it as vars low 12368 * Hold it as vars low
12061 */ 12369 */
12062 /* clear link led */ 12370 /* Clear link led */
12063 bnx2x_set_mdio_clk(bp, params->chip_id, port); 12371 bnx2x_set_mdio_clk(bp, params->chip_id, port);
12064 bnx2x_set_led(params, vars, LED_MODE_OFF, 0); 12372 bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
12065 12373
@@ -12089,9 +12397,9 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
12089 params->phy[INT_PHY].link_reset( 12397 params->phy[INT_PHY].link_reset(
12090 &params->phy[INT_PHY], params); 12398 &params->phy[INT_PHY], params);
12091 12399
12092 /* disable nig ingress interface */ 12400 /* Disable nig ingress interface */
12093 if (!CHIP_IS_E3(bp)) { 12401 if (!CHIP_IS_E3(bp)) {
12094 /* reset BigMac */ 12402 /* Reset BigMac */
12095 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 12403 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
12096 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 12404 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
12097 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0); 12405 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0);
@@ -12148,7 +12456,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
12148 DP(NETIF_MSG_LINK, "populate_phy failed\n"); 12456 DP(NETIF_MSG_LINK, "populate_phy failed\n");
12149 return -EINVAL; 12457 return -EINVAL;
12150 } 12458 }
12151 /* disable attentions */ 12459 /* Disable attentions */
12152 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + 12460 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
12153 port_of_path*4, 12461 port_of_path*4,
12154 (NIG_MASK_XGXS0_LINK_STATUS | 12462 (NIG_MASK_XGXS0_LINK_STATUS |
@@ -12222,7 +12530,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
12222 bnx2x_cl45_write(bp, phy_blk[port], 12530 bnx2x_cl45_write(bp, phy_blk[port],
12223 MDIO_PMA_DEVAD, 12531 MDIO_PMA_DEVAD,
12224 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); 12532 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
12225 msleep(15); 12533 usleep_range(15000, 30000);
12226 12534
12227 /* Read modify write the SPI-ROM version select register */ 12535 /* Read modify write the SPI-ROM version select register */
12228 bnx2x_cl45_read(bp, phy_blk[port], 12536 bnx2x_cl45_read(bp, phy_blk[port],
@@ -12254,7 +12562,7 @@ static int bnx2x_8726_common_init_phy(struct bnx2x *bp,
12254 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); 12562 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
12255 12563
12256 bnx2x_ext_phy_hw_reset(bp, 0); 12564 bnx2x_ext_phy_hw_reset(bp, 0);
12257 msleep(5); 12565 usleep_range(5000, 10000);
12258 for (port = 0; port < PORT_MAX; port++) { 12566 for (port = 0; port < PORT_MAX; port++) {
12259 u32 shmem_base, shmem2_base; 12567 u32 shmem_base, shmem2_base;
12260 12568
@@ -12361,11 +12669,11 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
12361 /* Initiate PHY reset*/ 12669 /* Initiate PHY reset*/
12362 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW, 12670 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
12363 port); 12671 port);
12364 msleep(1); 12672 usleep_range(1000, 2000);
12365 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH, 12673 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
12366 port); 12674 port);
12367 12675
12368 msleep(5); 12676 usleep_range(5000, 10000);
12369 12677
12370 /* PART1 - Reset both phys */ 12678 /* PART1 - Reset both phys */
12371 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 12679 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
@@ -12459,7 +12767,7 @@ static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
12459 MDIO_PMA_REG_CTRL, &val); 12767 MDIO_PMA_REG_CTRL, &val);
12460 if (!(val & (1<<15))) 12768 if (!(val & (1<<15)))
12461 break; 12769 break;
12462 msleep(1); 12770 usleep_range(1000, 2000);
12463 } 12771 }
12464 if (cnt >= 1500) { 12772 if (cnt >= 1500) {
12465 DP(NETIF_MSG_LINK, "84833 reset timeout\n"); 12773 DP(NETIF_MSG_LINK, "84833 reset timeout\n");
@@ -12549,7 +12857,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
12549 break; 12857 break;
12550 } 12858 }
12551 12859
12552 if (rc != 0) 12860 if (rc)
12553 netdev_err(bp->dev, "Warning: PHY was not initialized," 12861 netdev_err(bp->dev, "Warning: PHY was not initialized,"
12554 " Port %d\n", 12862 " Port %d\n",
12555 0); 12863 0);
@@ -12630,30 +12938,41 @@ static void bnx2x_check_over_curr(struct link_params *params,
12630 vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG; 12938 vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
12631} 12939}
12632 12940
12633static void bnx2x_analyze_link_error(struct link_params *params, 12941/* Returns 0 if no change occured since last check; 1 otherwise. */
12634 struct link_vars *vars, u32 lss_status, 12942static u8 bnx2x_analyze_link_error(struct link_params *params,
12635 u8 notify) 12943 struct link_vars *vars, u32 status,
12944 u32 phy_flag, u32 link_flag, u8 notify)
12636{ 12945{
12637 struct bnx2x *bp = params->bp; 12946 struct bnx2x *bp = params->bp;
12638 /* Compare new value with previous value */ 12947 /* Compare new value with previous value */
12639 u8 led_mode; 12948 u8 led_mode;
12640 u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0; 12949 u32 old_status = (vars->phy_flags & phy_flag) ? 1 : 0;
12641 12950
12642 if ((lss_status ^ half_open_conn) == 0) 12951 if ((status ^ old_status) == 0)
12643 return; 12952 return 0;
12644 12953
12645 /* If values differ */ 12954 /* If values differ */
12646 DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up, 12955 switch (phy_flag) {
12647 half_open_conn, lss_status); 12956 case PHY_HALF_OPEN_CONN_FLAG:
12957 DP(NETIF_MSG_LINK, "Analyze Remote Fault\n");
12958 break;
12959 case PHY_SFP_TX_FAULT_FLAG:
12960 DP(NETIF_MSG_LINK, "Analyze TX Fault\n");
12961 break;
12962 default:
12963 DP(NETIF_MSG_LINK, "Analyze UNKOWN\n");
12964 }
12965 DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
12966 old_status, status);
12648 12967
12649 /* a. Update shmem->link_status accordingly 12968 /* a. Update shmem->link_status accordingly
12650 * b. Update link_vars->link_up 12969 * b. Update link_vars->link_up
12651 */ 12970 */
12652 if (lss_status) { 12971 if (status) {
12653 DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n");
12654 vars->link_status &= ~LINK_STATUS_LINK_UP; 12972 vars->link_status &= ~LINK_STATUS_LINK_UP;
12973 vars->link_status |= link_flag;
12655 vars->link_up = 0; 12974 vars->link_up = 0;
12656 vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; 12975 vars->phy_flags |= phy_flag;
12657 12976
12658 /* activate nig drain */ 12977 /* activate nig drain */
12659 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1); 12978 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
@@ -12662,10 +12981,10 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12662 */ 12981 */
12663 led_mode = LED_MODE_OFF; 12982 led_mode = LED_MODE_OFF;
12664 } else { 12983 } else {
12665 DP(NETIF_MSG_LINK, "Remote Fault cleared\n");
12666 vars->link_status |= LINK_STATUS_LINK_UP; 12984 vars->link_status |= LINK_STATUS_LINK_UP;
12985 vars->link_status &= ~link_flag;
12667 vars->link_up = 1; 12986 vars->link_up = 1;
12668 vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; 12987 vars->phy_flags &= ~phy_flag;
12669 led_mode = LED_MODE_OPER; 12988 led_mode = LED_MODE_OPER;
12670 12989
12671 /* Clear nig drain */ 12990 /* Clear nig drain */
@@ -12682,6 +13001,8 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12682 vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT; 13001 vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT;
12683 if (notify) 13002 if (notify)
12684 bnx2x_notify_link_changed(bp); 13003 bnx2x_notify_link_changed(bp);
13004
13005 return 1;
12685} 13006}
12686 13007
12687/****************************************************************************** 13008/******************************************************************************
@@ -12723,7 +13044,9 @@ int bnx2x_check_half_open_conn(struct link_params *params,
12723 if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS)) 13044 if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
12724 lss_status = 1; 13045 lss_status = 1;
12725 13046
12726 bnx2x_analyze_link_error(params, vars, lss_status, notify); 13047 bnx2x_analyze_link_error(params, vars, lss_status,
13048 PHY_HALF_OPEN_CONN_FLAG,
13049 LINK_STATUS_NONE, notify);
12727 } else if (REG_RD(bp, MISC_REG_RESET_REG_2) & 13050 } else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
12728 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) { 13051 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
12729 /* Check E1X / E2 BMAC */ 13052 /* Check E1X / E2 BMAC */
@@ -12740,11 +13063,55 @@ int bnx2x_check_half_open_conn(struct link_params *params,
12740 REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2); 13063 REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2);
12741 lss_status = (wb_data[0] > 0); 13064 lss_status = (wb_data[0] > 0);
12742 13065
12743 bnx2x_analyze_link_error(params, vars, lss_status, notify); 13066 bnx2x_analyze_link_error(params, vars, lss_status,
13067 PHY_HALF_OPEN_CONN_FLAG,
13068 LINK_STATUS_NONE, notify);
12744 } 13069 }
12745 return 0; 13070 return 0;
12746} 13071}
13072static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
13073 struct link_params *params,
13074 struct link_vars *vars)
13075{
13076 struct bnx2x *bp = params->bp;
13077 u32 cfg_pin, value = 0;
13078 u8 led_change, port = params->port;
12747 13079
13080 /* Get The SFP+ TX_Fault controlling pin ([eg]pio) */
13081 cfg_pin = (REG_RD(bp, params->shmem_base + offsetof(struct shmem_region,
13082 dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
13083 PORT_HW_CFG_E3_TX_FAULT_MASK) >>
13084 PORT_HW_CFG_E3_TX_FAULT_SHIFT;
13085
13086 if (bnx2x_get_cfg_pin(bp, cfg_pin, &value)) {
13087 DP(NETIF_MSG_LINK, "Failed to read pin 0x%02x\n", cfg_pin);
13088 return;
13089 }
13090
13091 led_change = bnx2x_analyze_link_error(params, vars, value,
13092 PHY_SFP_TX_FAULT_FLAG,
13093 LINK_STATUS_SFP_TX_FAULT, 1);
13094
13095 if (led_change) {
13096 /* Change TX_Fault led, set link status for further syncs */
13097 u8 led_mode;
13098
13099 if (vars->phy_flags & PHY_SFP_TX_FAULT_FLAG) {
13100 led_mode = MISC_REGISTERS_GPIO_HIGH;
13101 vars->link_status |= LINK_STATUS_SFP_TX_FAULT;
13102 } else {
13103 led_mode = MISC_REGISTERS_GPIO_LOW;
13104 vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT;
13105 }
13106
13107 /* If module is unapproved, led should be on regardless */
13108 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) {
13109 DP(NETIF_MSG_LINK, "Change TX_Fault LED: ->%x\n",
13110 led_mode);
13111 bnx2x_set_e3_module_fault_led(params, led_mode);
13112 }
13113 }
13114}
12748void bnx2x_period_func(struct link_params *params, struct link_vars *vars) 13115void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
12749{ 13116{
12750 u16 phy_idx; 13117 u16 phy_idx;
@@ -12763,7 +13130,26 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
12763 struct bnx2x_phy *phy = &params->phy[INT_PHY]; 13130 struct bnx2x_phy *phy = &params->phy[INT_PHY];
12764 bnx2x_set_aer_mmd(params, phy); 13131 bnx2x_set_aer_mmd(params, phy);
12765 bnx2x_check_over_curr(params, vars); 13132 bnx2x_check_over_curr(params, vars);
12766 bnx2x_warpcore_config_runtime(phy, params, vars); 13133 if (vars->rx_tx_asic_rst)
13134 bnx2x_warpcore_config_runtime(phy, params, vars);
13135
13136 if ((REG_RD(bp, params->shmem_base +
13137 offsetof(struct shmem_region, dev_info.
13138 port_hw_config[params->port].default_cfg))
13139 & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
13140 PORT_HW_CFG_NET_SERDES_IF_SFI) {
13141 if (bnx2x_is_sfp_module_plugged(phy, params)) {
13142 bnx2x_sfp_tx_fault_detection(phy, params, vars);
13143 } else if (vars->link_status &
13144 LINK_STATUS_SFP_TX_FAULT) {
13145 /* Clean trail, interrupt corrects the leds */
13146 vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT;
13147 vars->phy_flags &= ~PHY_SFP_TX_FAULT_FLAG;
13148 /* Update link status in the shared memory */
13149 bnx2x_update_mng(params, vars->link_status);
13150 }
13151 }
13152
12767 } 13153 }
12768 13154
12769} 13155}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index ea4371f4335..51cac813005 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -41,6 +41,7 @@
41#define SPEED_AUTO_NEG 0 41#define SPEED_AUTO_NEG 0
42#define SPEED_20000 20000 42#define SPEED_20000 20000
43 43
44#define SFP_EEPROM_PAGE_SIZE 16
44#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14 45#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14
45#define SFP_EEPROM_VENDOR_NAME_SIZE 16 46#define SFP_EEPROM_VENDOR_NAME_SIZE 16
46#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25 47#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
@@ -125,6 +126,11 @@ typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
125 struct link_params *params, u8 mode); 126 struct link_params *params, u8 mode);
126typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy, 127typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy,
127 struct link_params *params, u32 action); 128 struct link_params *params, u32 action);
129struct bnx2x_reg_set {
130 u8 devad;
131 u16 reg;
132 u16 val;
133};
128 134
129struct bnx2x_phy { 135struct bnx2x_phy {
130 u32 type; 136 u32 type;
@@ -149,6 +155,7 @@ struct bnx2x_phy {
149#define FLAGS_DUMMY_READ (1<<9) 155#define FLAGS_DUMMY_READ (1<<9)
150#define FLAGS_MDC_MDIO_WA_B0 (1<<10) 156#define FLAGS_MDC_MDIO_WA_B0 (1<<10)
151#define FLAGS_TX_ERROR_CHECK (1<<12) 157#define FLAGS_TX_ERROR_CHECK (1<<12)
158#define FLAGS_EEE_10GBT (1<<13)
152 159
153 /* preemphasis values for the rx side */ 160 /* preemphasis values for the rx side */
154 u16 rx_preemphasis[4]; 161 u16 rx_preemphasis[4];
@@ -162,14 +169,15 @@ struct bnx2x_phy {
162 u32 supported; 169 u32 supported;
163 170
164 u32 media_type; 171 u32 media_type;
165#define ETH_PHY_UNSPECIFIED 0x0 172#define ETH_PHY_UNSPECIFIED 0x0
166#define ETH_PHY_SFP_FIBER 0x1 173#define ETH_PHY_SFPP_10G_FIBER 0x1
167#define ETH_PHY_XFP_FIBER 0x2 174#define ETH_PHY_XFP_FIBER 0x2
168#define ETH_PHY_DA_TWINAX 0x3 175#define ETH_PHY_DA_TWINAX 0x3
169#define ETH_PHY_BASE_T 0x4 176#define ETH_PHY_BASE_T 0x4
170#define ETH_PHY_KR 0xf0 177#define ETH_PHY_SFP_1G_FIBER 0x5
171#define ETH_PHY_CX4 0xf1 178#define ETH_PHY_KR 0xf0
172#define ETH_PHY_NOT_PRESENT 0xff 179#define ETH_PHY_CX4 0xf1
180#define ETH_PHY_NOT_PRESENT 0xff
173 181
174 /* The address in which version is located*/ 182 /* The address in which version is located*/
175 u32 ver_addr; 183 u32 ver_addr;
@@ -265,6 +273,30 @@ struct link_params {
265 u8 num_phys; 273 u8 num_phys;
266 274
267 u8 rsrv; 275 u8 rsrv;
276
277 /* Used to configure the EEE Tx LPI timer, has several modes of
278 * operation, according to bits 29:28 -
279 * 2'b00: Timer will be configured by nvram, output will be the value
280 * from nvram.
281 * 2'b01: Timer will be configured by nvram, output will be in
282 * microseconds.
283 * 2'b10: bits 1:0 contain an nvram value which will be used instead
284 * of the one located in the nvram. Output will be that value.
285 * 2'b11: bits 19:0 contain the idle timer in microseconds; output
286 * will be in microseconds.
287 * Bits 31:30 should be 2'b11 in order for EEE to be enabled.
288 */
289 u32 eee_mode;
290#define EEE_MODE_NVRAM_BALANCED_TIME (0xa00)
291#define EEE_MODE_NVRAM_AGGRESSIVE_TIME (0x100)
292#define EEE_MODE_NVRAM_LATENCY_TIME (0x6000)
293#define EEE_MODE_NVRAM_MASK (0x3)
294#define EEE_MODE_TIMER_MASK (0xfffff)
295#define EEE_MODE_OUTPUT_TIME (1<<28)
296#define EEE_MODE_OVERRIDE_NVRAM (1<<29)
297#define EEE_MODE_ENABLE_LPI (1<<30)
298#define EEE_MODE_ADV_LPI (1<<31)
299
268 u16 hw_led_mode; /* part of the hw_config read from the shmem */ 300 u16 hw_led_mode; /* part of the hw_config read from the shmem */
269 u32 multi_phy_config; 301 u32 multi_phy_config;
270 302
@@ -282,6 +314,7 @@ struct link_vars {
282#define PHY_PHYSICAL_LINK_FLAG (1<<2) 314#define PHY_PHYSICAL_LINK_FLAG (1<<2)
283#define PHY_HALF_OPEN_CONN_FLAG (1<<3) 315#define PHY_HALF_OPEN_CONN_FLAG (1<<3)
284#define PHY_OVER_CURRENT_FLAG (1<<4) 316#define PHY_OVER_CURRENT_FLAG (1<<4)
317#define PHY_SFP_TX_FAULT_FLAG (1<<5)
285 318
286 u8 mac_type; 319 u8 mac_type;
287#define MAC_TYPE_NONE 0 320#define MAC_TYPE_NONE 0
@@ -301,6 +334,7 @@ struct link_vars {
301 334
302 /* The same definitions as the shmem parameter */ 335 /* The same definitions as the shmem parameter */
303 u32 link_status; 336 u32 link_status;
337 u32 eee_status;
304 u8 fault_detected; 338 u8 fault_detected;
305 u8 rsrv1; 339 u8 rsrv1;
306 u16 periodic_flags; 340 u16 periodic_flags;
@@ -459,8 +493,7 @@ struct bnx2x_ets_params {
459 struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS]; 493 struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS];
460}; 494};
461 495
462/** 496/* Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
463 * Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
464 * when link is already up 497 * when link is already up
465 */ 498 */
466int bnx2x_update_pfc(struct link_params *params, 499int bnx2x_update_pfc(struct link_params *params,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index f755a665dab..08eca3ff7db 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -74,6 +74,8 @@
74#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" 74#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
75#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw" 75#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
76 76
77#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
78
77/* Time in jiffies before concluding the transmitter is hung */ 79/* Time in jiffies before concluding the transmitter is hung */
78#define TX_TIMEOUT (5*HZ) 80#define TX_TIMEOUT (5*HZ)
79 81
@@ -104,7 +106,7 @@ MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
104 106
105#define INT_MODE_INTx 1 107#define INT_MODE_INTx 1
106#define INT_MODE_MSI 2 108#define INT_MODE_MSI 2
107static int int_mode; 109int int_mode;
108module_param(int_mode, int, 0); 110module_param(int_mode, int, 0);
109MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " 111MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
110 "(1 INT#x; 2 MSI)"); 112 "(1 INT#x; 2 MSI)");
@@ -758,7 +760,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
758 /* Tx */ 760 /* Tx */
759 for_each_cos_in_tx_queue(fp, cos) 761 for_each_cos_in_tx_queue(fp, cos)
760 { 762 {
761 txdata = fp->txdata[cos]; 763 txdata = *fp->txdata_ptr[cos];
762 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n", 764 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
763 i, txdata.tx_pkt_prod, 765 i, txdata.tx_pkt_prod,
764 txdata.tx_pkt_cons, txdata.tx_bd_prod, 766 txdata.tx_pkt_cons, txdata.tx_bd_prod,
@@ -876,7 +878,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
876 for_each_tx_queue(bp, i) { 878 for_each_tx_queue(bp, i) {
877 struct bnx2x_fastpath *fp = &bp->fp[i]; 879 struct bnx2x_fastpath *fp = &bp->fp[i];
878 for_each_cos_in_tx_queue(fp, cos) { 880 for_each_cos_in_tx_queue(fp, cos) {
879 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; 881 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
880 882
881 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); 883 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
882 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); 884 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
@@ -1583,7 +1585,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1583 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1585 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1584 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1586 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1585 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; 1587 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1586 struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj; 1588 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1587 1589
1588 DP(BNX2X_MSG_SP, 1590 DP(BNX2X_MSG_SP,
1589 "fp %d cid %d got ramrod #%d state is %x type is %d\n", 1591 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
@@ -1710,7 +1712,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1710 /* Handle Rx or Tx according to SB id */ 1712 /* Handle Rx or Tx according to SB id */
1711 prefetch(fp->rx_cons_sb); 1713 prefetch(fp->rx_cons_sb);
1712 for_each_cos_in_tx_queue(fp, cos) 1714 for_each_cos_in_tx_queue(fp, cos)
1713 prefetch(fp->txdata[cos].tx_cons_sb); 1715 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1714 prefetch(&fp->sb_running_index[SM_RX_ID]); 1716 prefetch(&fp->sb_running_index[SM_RX_ID]);
1715 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 1717 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1716 status &= ~mask; 1718 status &= ~mask;
@@ -2124,6 +2126,11 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2124 } 2126 }
2125 } 2127 }
2126 2128
2129 if (load_mode == LOAD_LOOPBACK_EXT) {
2130 struct link_params *lp = &bp->link_params;
2131 lp->loopback_mode = LOOPBACK_EXT;
2132 }
2133
2127 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2134 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2128 2135
2129 bnx2x_release_phy_lock(bp); 2136 bnx2x_release_phy_lock(bp);
@@ -2916,7 +2923,7 @@ static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
2916 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init, 2923 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
2917 u8 cos) 2924 u8 cos)
2918{ 2925{
2919 txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping; 2926 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
2920 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 2927 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
2921 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 2928 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2922 txq_init->fw_sb_id = fp->fw_sb_id; 2929 txq_init->fw_sb_id = fp->fw_sb_id;
@@ -3030,9 +3037,9 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3030 memcpy(ether_stat->version, DRV_MODULE_VERSION, 3037 memcpy(ether_stat->version, DRV_MODULE_VERSION,
3031 ETH_STAT_INFO_VERSION_LEN - 1); 3038 ETH_STAT_INFO_VERSION_LEN - 1);
3032 3039
3033 bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj, 3040 bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3034 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 3041 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3035 ether_stat->mac_local); 3042 ether_stat->mac_local);
3036 3043
3037 ether_stat->mtu_size = bp->dev->mtu; 3044 ether_stat->mtu_size = bp->dev->mtu;
3038 3045
@@ -3055,7 +3062,8 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3055 struct fcoe_stats_info *fcoe_stat = 3062 struct fcoe_stats_info *fcoe_stat =
3056 &bp->slowpath->drv_info_to_mcp.fcoe_stat; 3063 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3057 3064
3058 memcpy(fcoe_stat->mac_local, bp->fip_mac, ETH_ALEN); 3065 memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
3066 bp->fip_mac, ETH_ALEN);
3059 3067
3060 fcoe_stat->qos_priority = 3068 fcoe_stat->qos_priority =
3061 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE]; 3069 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
@@ -3063,11 +3071,11 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3063 /* insert FCoE stats from ramrod response */ 3071 /* insert FCoE stats from ramrod response */
3064 if (!NO_FCOE(bp)) { 3072 if (!NO_FCOE(bp)) {
3065 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = 3073 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3066 &bp->fw_stats_data->queue_stats[FCOE_IDX]. 3074 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3067 tstorm_queue_statistics; 3075 tstorm_queue_statistics;
3068 3076
3069 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = 3077 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3070 &bp->fw_stats_data->queue_stats[FCOE_IDX]. 3078 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3071 xstorm_queue_statistics; 3079 xstorm_queue_statistics;
3072 3080
3073 struct fcoe_statistics_params *fw_fcoe_stat = 3081 struct fcoe_statistics_params *fw_fcoe_stat =
@@ -3146,7 +3154,8 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3146 struct iscsi_stats_info *iscsi_stat = 3154 struct iscsi_stats_info *iscsi_stat =
3147 &bp->slowpath->drv_info_to_mcp.iscsi_stat; 3155 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3148 3156
3149 memcpy(iscsi_stat->mac_local, bp->cnic_eth_dev.iscsi_mac, ETH_ALEN); 3157 memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
3158 bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
3150 3159
3151 iscsi_stat->qos_priority = 3160 iscsi_stat->qos_priority =
3152 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI]; 3161 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
@@ -3176,6 +3185,12 @@ static void bnx2x_set_mf_bw(struct bnx2x *bp)
3176 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 3185 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3177} 3186}
3178 3187
3188static void bnx2x_handle_eee_event(struct bnx2x *bp)
3189{
3190 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3191 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3192}
3193
3179static void bnx2x_handle_drv_info_req(struct bnx2x *bp) 3194static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3180{ 3195{
3181 enum drv_info_opcode op_code; 3196 enum drv_info_opcode op_code;
@@ -3742,6 +3757,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3742 if (val & DRV_STATUS_AFEX_EVENT_MASK) 3757 if (val & DRV_STATUS_AFEX_EVENT_MASK)
3743 bnx2x_handle_afex_cmd(bp, 3758 bnx2x_handle_afex_cmd(bp,
3744 val & DRV_STATUS_AFEX_EVENT_MASK); 3759 val & DRV_STATUS_AFEX_EVENT_MASK);
3760 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
3761 bnx2x_handle_eee_event(bp);
3745 if (bp->link_vars.periodic_flags & 3762 if (bp->link_vars.periodic_flags &
3746 PERIODIC_FLAGS_LINK_EVENT) { 3763 PERIODIC_FLAGS_LINK_EVENT) {
3747 /* sync with link */ 3764 /* sync with link */
@@ -4615,11 +4632,11 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4615 case BNX2X_FILTER_MAC_PENDING: 4632 case BNX2X_FILTER_MAC_PENDING:
4616 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); 4633 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
4617#ifdef BCM_CNIC 4634#ifdef BCM_CNIC
4618 if (cid == BNX2X_ISCSI_ETH_CID) 4635 if (cid == BNX2X_ISCSI_ETH_CID(bp))
4619 vlan_mac_obj = &bp->iscsi_l2_mac_obj; 4636 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
4620 else 4637 else
4621#endif 4638#endif
4622 vlan_mac_obj = &bp->fp[cid].mac_obj; 4639 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
4623 4640
4624 break; 4641 break;
4625 case BNX2X_FILTER_MCAST_PENDING: 4642 case BNX2X_FILTER_MCAST_PENDING:
@@ -4717,7 +4734,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
4717 for_each_eth_queue(bp, q) { 4734 for_each_eth_queue(bp, q) {
4718 /* Set the appropriate Queue object */ 4735 /* Set the appropriate Queue object */
4719 fp = &bp->fp[q]; 4736 fp = &bp->fp[q];
4720 queue_params.q_obj = &fp->q_obj; 4737 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
4721 4738
4722 /* send the ramrod */ 4739 /* send the ramrod */
4723 rc = bnx2x_queue_state_change(bp, &queue_params); 4740 rc = bnx2x_queue_state_change(bp, &queue_params);
@@ -4728,8 +4745,8 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
4728 4745
4729#ifdef BCM_CNIC 4746#ifdef BCM_CNIC
4730 if (!NO_FCOE(bp)) { 4747 if (!NO_FCOE(bp)) {
4731 fp = &bp->fp[FCOE_IDX]; 4748 fp = &bp->fp[FCOE_IDX(bp)];
4732 queue_params.q_obj = &fp->q_obj; 4749 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
4733 4750
4734 /* clear pending completion bit */ 4751 /* clear pending completion bit */
4735 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); 4752 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
@@ -4761,11 +4778,11 @@ static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
4761{ 4778{
4762 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); 4779 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
4763#ifdef BCM_CNIC 4780#ifdef BCM_CNIC
4764 if (cid == BNX2X_FCOE_ETH_CID) 4781 if (cid == BNX2X_FCOE_ETH_CID(bp))
4765 return &bnx2x_fcoe(bp, q_obj); 4782 return &bnx2x_fcoe_sp_obj(bp, q_obj);
4766 else 4783 else
4767#endif 4784#endif
4768 return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj); 4785 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
4769} 4786}
4770 4787
4771static void bnx2x_eq_int(struct bnx2x *bp) 4788static void bnx2x_eq_int(struct bnx2x *bp)
@@ -5647,15 +5664,15 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
5647 5664
5648 /* init tx data */ 5665 /* init tx data */
5649 for_each_cos_in_tx_queue(fp, cos) { 5666 for_each_cos_in_tx_queue(fp, cos) {
5650 bnx2x_init_txdata(bp, &fp->txdata[cos], 5667 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
5651 CID_COS_TO_TX_ONLY_CID(fp->cid, cos), 5668 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
5652 FP_COS_TO_TXQ(fp, cos), 5669 FP_COS_TO_TXQ(fp, cos, bp),
5653 BNX2X_TX_SB_INDEX_BASE + cos); 5670 BNX2X_TX_SB_INDEX_BASE + cos, fp);
5654 cids[cos] = fp->txdata[cos].cid; 5671 cids[cos] = fp->txdata_ptr[cos]->cid;
5655 } 5672 }
5656 5673
5657 bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos, 5674 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
5658 BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 5675 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
5659 bnx2x_sp_mapping(bp, q_rdata), q_type); 5676 bnx2x_sp_mapping(bp, q_rdata), q_type);
5660 5677
5661 /** 5678 /**
@@ -5706,7 +5723,7 @@ static void bnx2x_init_tx_rings(struct bnx2x *bp)
5706 5723
5707 for_each_tx_queue(bp, i) 5724 for_each_tx_queue(bp, i)
5708 for_each_cos_in_tx_queue(&bp->fp[i], cos) 5725 for_each_cos_in_tx_queue(&bp->fp[i], cos)
5709 bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]); 5726 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
5710} 5727}
5711 5728
5712void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) 5729void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
@@ -7055,12 +7072,10 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
7055 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 7072 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7056 7073
7057 for (i = 0; i < L2_ILT_LINES(bp); i++) { 7074 for (i = 0; i < L2_ILT_LINES(bp); i++) {
7058 ilt->lines[cdu_ilt_start + i].page = 7075 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7059 bp->context.vcxt + (ILT_PAGE_CIDS * i);
7060 ilt->lines[cdu_ilt_start + i].page_mapping = 7076 ilt->lines[cdu_ilt_start + i].page_mapping =
7061 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i); 7077 bp->context[i].cxt_mapping;
7062 /* cdu ilt pages are allocated manually so there's no need to 7078 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
7063 set the size */
7064 } 7079 }
7065 bnx2x_ilt_init_op(bp, INITOP_SET); 7080 bnx2x_ilt_init_op(bp, INITOP_SET);
7066 7081
@@ -7327,6 +7342,8 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
7327 7342
7328void bnx2x_free_mem(struct bnx2x *bp) 7343void bnx2x_free_mem(struct bnx2x *bp)
7329{ 7344{
7345 int i;
7346
7330 /* fastpath */ 7347 /* fastpath */
7331 bnx2x_free_fp_mem(bp); 7348 bnx2x_free_fp_mem(bp);
7332 /* end of fastpath */ 7349 /* end of fastpath */
@@ -7340,9 +7357,9 @@ void bnx2x_free_mem(struct bnx2x *bp)
7340 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, 7357 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7341 sizeof(struct bnx2x_slowpath)); 7358 sizeof(struct bnx2x_slowpath));
7342 7359
7343 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping, 7360 for (i = 0; i < L2_ILT_LINES(bp); i++)
7344 bp->context.size); 7361 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
7345 7362 bp->context[i].size);
7346 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); 7363 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
7347 7364
7348 BNX2X_FREE(bp->ilt->lines); 7365 BNX2X_FREE(bp->ilt->lines);
@@ -7428,6 +7445,8 @@ alloc_mem_err:
7428 7445
7429int bnx2x_alloc_mem(struct bnx2x *bp) 7446int bnx2x_alloc_mem(struct bnx2x *bp)
7430{ 7447{
7448 int i, allocated, context_size;
7449
7431#ifdef BCM_CNIC 7450#ifdef BCM_CNIC
7432 if (!CHIP_IS_E1x(bp)) 7451 if (!CHIP_IS_E1x(bp))
7433 /* size = the status block + ramrod buffers */ 7452 /* size = the status block + ramrod buffers */
@@ -7457,11 +7476,29 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
7457 if (bnx2x_alloc_fw_stats_mem(bp)) 7476 if (bnx2x_alloc_fw_stats_mem(bp))
7458 goto alloc_mem_err; 7477 goto alloc_mem_err;
7459 7478
7460 bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); 7479 /* Allocate memory for CDU context:
7461 7480 * This memory is allocated separately and not in the generic ILT
7462 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping, 7481 * functions because CDU differs in few aspects:
7463 bp->context.size); 7482 * 1. There are multiple entities allocating memory for context -
7483 * 'regular' driver, CNIC and SRIOV driver. Each separately controls
7484 * its own ILT lines.
7485 * 2. Since CDU page-size is not a single 4KB page (which is the case
7486 * for the other ILT clients), to be efficient we want to support
7487 * allocation of sub-page-size in the last entry.
7488 * 3. Context pointers are used by the driver to pass to FW / update
7489 * the context (for the other ILT clients the pointers are used just to
7490 * free the memory during unload).
7491 */
7492 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
7464 7493
7494 for (i = 0, allocated = 0; allocated < context_size; i++) {
7495 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
7496 (context_size - allocated));
7497 BNX2X_PCI_ALLOC(bp->context[i].vcxt,
7498 &bp->context[i].cxt_mapping,
7499 bp->context[i].size);
7500 allocated += bp->context[i].size;
7501 }
7465 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES); 7502 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
7466 7503
7467 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) 7504 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
@@ -7563,8 +7600,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
7563 7600
7564 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 7601 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
7565 /* Eth MAC is set on RSS leading client (fp[0]) */ 7602 /* Eth MAC is set on RSS leading client (fp[0]) */
7566 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set, 7603 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj,
7567 BNX2X_ETH_MAC, &ramrod_flags); 7604 set, BNX2X_ETH_MAC, &ramrod_flags);
7568} 7605}
7569 7606
7570int bnx2x_setup_leading(struct bnx2x *bp) 7607int bnx2x_setup_leading(struct bnx2x *bp)
@@ -7579,7 +7616,7 @@ int bnx2x_setup_leading(struct bnx2x *bp)
7579 * 7616 *
7580 * In case of MSI-X it will also try to enable MSI-X. 7617 * In case of MSI-X it will also try to enable MSI-X.
7581 */ 7618 */
7582static void __devinit bnx2x_set_int_mode(struct bnx2x *bp) 7619void bnx2x_set_int_mode(struct bnx2x *bp)
7583{ 7620{
7584 switch (int_mode) { 7621 switch (int_mode) {
7585 case INT_MODE_MSI: 7622 case INT_MODE_MSI:
@@ -7590,11 +7627,6 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
7590 BNX2X_DEV_INFO("set number of queues to 1\n"); 7627 BNX2X_DEV_INFO("set number of queues to 1\n");
7591 break; 7628 break;
7592 default: 7629 default:
7593 /* Set number of queues for MSI-X mode */
7594 bnx2x_set_num_queues(bp);
7595
7596 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
7597
7598 /* if we can't use MSI-X we only need one fp, 7630 /* if we can't use MSI-X we only need one fp,
7599 * so try to enable MSI-X with the requested number of fp's 7631 * so try to enable MSI-X with the requested number of fp's
7600 * and fallback to MSI or legacy INTx with one fp 7632 * and fallback to MSI or legacy INTx with one fp
@@ -7735,6 +7767,8 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
7735{ 7767{
7736 7768
7737 u8 cos; 7769 u8 cos;
7770 int cxt_index, cxt_offset;
7771
7738 /* FCoE Queue uses Default SB, thus has no HC capabilities */ 7772 /* FCoE Queue uses Default SB, thus has no HC capabilities */
7739 if (!IS_FCOE_FP(fp)) { 7773 if (!IS_FCOE_FP(fp)) {
7740 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); 7774 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
@@ -7771,9 +7805,13 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
7771 fp->index, init_params->max_cos); 7805 fp->index, init_params->max_cos);
7772 7806
7773 /* set the context pointers queue object */ 7807 /* set the context pointers queue object */
7774 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) 7808 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
7809 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
7810 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
7811 ILT_PAGE_CIDS);
7775 init_params->cxts[cos] = 7812 init_params->cxts[cos] =
7776 &bp->context.vcxt[fp->txdata[cos].cid].eth; 7813 &bp->context[cxt_index].vcxt[cxt_offset].eth;
7814 }
7777} 7815}
7778 7816
7779int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, 7817int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
@@ -7838,7 +7876,7 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
7838 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, 7876 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
7839 IGU_INT_ENABLE, 0); 7877 IGU_INT_ENABLE, 0);
7840 7878
7841 q_params.q_obj = &fp->q_obj; 7879 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
7842 /* We want to wait for completion in this context */ 7880 /* We want to wait for completion in this context */
7843 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 7881 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
7844 7882
@@ -7911,7 +7949,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
7911 7949
7912 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid); 7950 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
7913 7951
7914 q_params.q_obj = &fp->q_obj; 7952 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
7915 /* We want to wait for completion in this context */ 7953 /* We want to wait for completion in this context */
7916 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 7954 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
7917 7955
@@ -7922,7 +7960,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
7922 tx_index++){ 7960 tx_index++){
7923 7961
7924 /* ascertain this is a normal queue*/ 7962 /* ascertain this is a normal queue*/
7925 txdata = &fp->txdata[tx_index]; 7963 txdata = fp->txdata_ptr[tx_index];
7926 7964
7927 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n", 7965 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
7928 txdata->txq_index); 7966 txdata->txq_index);
@@ -8289,7 +8327,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8289 struct bnx2x_fastpath *fp = &bp->fp[i]; 8327 struct bnx2x_fastpath *fp = &bp->fp[i];
8290 8328
8291 for_each_cos_in_tx_queue(fp, cos) 8329 for_each_cos_in_tx_queue(fp, cos)
8292 rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]); 8330 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
8293#ifdef BNX2X_STOP_ON_ERROR 8331#ifdef BNX2X_STOP_ON_ERROR
8294 if (rc) 8332 if (rc)
8295 return; 8333 return;
@@ -8300,12 +8338,13 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8300 usleep_range(1000, 1000); 8338 usleep_range(1000, 1000);
8301 8339
8302 /* Clean all ETH MACs */ 8340 /* Clean all ETH MACs */
8303 rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false); 8341 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
8342 false);
8304 if (rc < 0) 8343 if (rc < 0)
8305 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); 8344 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
8306 8345
8307 /* Clean up UC list */ 8346 /* Clean up UC list */
8308 rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC, 8347 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
8309 true); 8348 true);
8310 if (rc < 0) 8349 if (rc < 0)
8311 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n", 8350 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
@@ -9697,6 +9736,11 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9697 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? 9736 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
9698 BC_SUPPORTS_PFC_STATS : 0; 9737 BC_SUPPORTS_PFC_STATS : 0;
9699 9738
9739 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
9740 BC_SUPPORTS_FCOE_FEATURES : 0;
9741
9742 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
9743 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
9700 boot_mode = SHMEM_RD(bp, 9744 boot_mode = SHMEM_RD(bp,
9701 dev_info.port_feature_config[BP_PORT(bp)].mba_config) & 9745 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
9702 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; 9746 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
@@ -10082,7 +10126,7 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
10082{ 10126{
10083 int port = BP_PORT(bp); 10127 int port = BP_PORT(bp);
10084 u32 config; 10128 u32 config;
10085 u32 ext_phy_type, ext_phy_config; 10129 u32 ext_phy_type, ext_phy_config, eee_mode;
10086 10130
10087 bp->link_params.bp = bp; 10131 bp->link_params.bp = bp;
10088 bp->link_params.port = port; 10132 bp->link_params.port = port;
@@ -10149,6 +10193,19 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
10149 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, 10193 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
10150 bp->common.shmem_base, 10194 bp->common.shmem_base,
10151 bp->common.shmem2_base); 10195 bp->common.shmem2_base);
10196
10197 /* Configure link feature according to nvram value */
10198 eee_mode = (((SHMEM_RD(bp, dev_info.
10199 port_feature_config[port].eee_power_mode)) &
10200 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
10201 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
10202 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
10203 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
10204 EEE_MODE_ENABLE_LPI |
10205 EEE_MODE_OUTPUT_TIME;
10206 } else {
10207 bp->link_params.eee_mode = 0;
10208 }
10152} 10209}
10153 10210
10154void bnx2x_get_iscsi_info(struct bnx2x *bp) 10211void bnx2x_get_iscsi_info(struct bnx2x *bp)
@@ -10997,7 +11054,7 @@ static int bnx2x_set_uc_list(struct bnx2x *bp)
10997 int rc; 11054 int rc;
10998 struct net_device *dev = bp->dev; 11055 struct net_device *dev = bp->dev;
10999 struct netdev_hw_addr *ha; 11056 struct netdev_hw_addr *ha;
11000 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; 11057 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
11001 unsigned long ramrod_flags = 0; 11058 unsigned long ramrod_flags = 0;
11002 11059
11003 /* First schedule a cleanup up of old configuration */ 11060 /* First schedule a cleanup up of old configuration */
@@ -11503,8 +11560,7 @@ static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11503 } 11560 }
11504} 11561}
11505 11562
11506/** 11563/* IRO array is stored in the following format:
11507 * IRO array is stored in the following format:
11508 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) } 11564 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
11509 */ 11565 */
11510static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) 11566static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
@@ -11672,7 +11728,7 @@ void bnx2x__init_func_obj(struct bnx2x *bp)
11672/* must be called after sriov-enable */ 11728/* must be called after sriov-enable */
11673static int bnx2x_set_qm_cid_count(struct bnx2x *bp) 11729static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
11674{ 11730{
11675 int cid_count = BNX2X_L2_CID_COUNT(bp); 11731 int cid_count = BNX2X_L2_MAX_CID(bp);
11676 11732
11677#ifdef BCM_CNIC 11733#ifdef BCM_CNIC
11678 cid_count += CNIC_CID_MAX; 11734 cid_count += CNIC_CID_MAX;
@@ -11717,7 +11773,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11717 struct bnx2x *bp; 11773 struct bnx2x *bp;
11718 int pcie_width, pcie_speed; 11774 int pcie_width, pcie_speed;
11719 int rc, max_non_def_sbs; 11775 int rc, max_non_def_sbs;
11720 int rx_count, tx_count, rss_count; 11776 int rx_count, tx_count, rss_count, doorbell_size;
11721 /* 11777 /*
11722 * An estimated maximum supported CoS number according to the chip 11778 * An estimated maximum supported CoS number according to the chip
11723 * version. 11779 * version.
@@ -11760,13 +11816,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11760 11816
11761 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev); 11817 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
11762 11818
11763 /* !!! FIXME !!!
11764 * Do not allow the maximum SB count to grow above 16
11765 * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48.
11766 * We will use the FP_SB_MAX_E1x macro for this matter.
11767 */
11768 max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs);
11769
11770 WARN_ON(!max_non_def_sbs); 11819 WARN_ON(!max_non_def_sbs);
11771 11820
11772 /* Maximum number of RSS queues: one IGU SB goes to CNIC */ 11821 /* Maximum number of RSS queues: one IGU SB goes to CNIC */
@@ -11777,9 +11826,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11777 11826
11778 /* 11827 /*
11779 * Maximum number of netdev Tx queues: 11828 * Maximum number of netdev Tx queues:
11780 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 11829 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
11781 */ 11830 */
11782 tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT; 11831 tx_count = rss_count * max_cos_est + FCOE_PRESENT;
11783 11832
11784 /* dev zeroed in init_etherdev */ 11833 /* dev zeroed in init_etherdev */
11785 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); 11834 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
@@ -11788,9 +11837,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11788 11837
11789 bp = netdev_priv(dev); 11838 bp = netdev_priv(dev);
11790 11839
11791 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
11792 tx_count, rx_count);
11793
11794 bp->igu_sb_cnt = max_non_def_sbs; 11840 bp->igu_sb_cnt = max_non_def_sbs;
11795 bp->msg_enable = debug; 11841 bp->msg_enable = debug;
11796 pci_set_drvdata(pdev, dev); 11842 pci_set_drvdata(pdev, dev);
@@ -11803,6 +11849,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11803 11849
11804 BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs); 11850 BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
11805 11851
11852 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
11853 tx_count, rx_count);
11854
11806 rc = bnx2x_init_bp(bp); 11855 rc = bnx2x_init_bp(bp);
11807 if (rc) 11856 if (rc)
11808 goto init_one_exit; 11857 goto init_one_exit;
@@ -11811,9 +11860,15 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11811 * Map doorbels here as we need the real value of bp->max_cos which 11860 * Map doorbels here as we need the real value of bp->max_cos which
11812 * is initialized in bnx2x_init_bp(). 11861 * is initialized in bnx2x_init_bp().
11813 */ 11862 */
11863 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
11864 if (doorbell_size > pci_resource_len(pdev, 2)) {
11865 dev_err(&bp->pdev->dev,
11866 "Cannot map doorbells, bar size too small, aborting\n");
11867 rc = -ENOMEM;
11868 goto init_one_exit;
11869 }
11814 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), 11870 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11815 min_t(u64, BNX2X_DB_SIZE(bp), 11871 doorbell_size);
11816 pci_resource_len(pdev, 2)));
11817 if (!bp->doorbells) { 11872 if (!bp->doorbells) {
11818 dev_err(&bp->pdev->dev, 11873 dev_err(&bp->pdev->dev,
11819 "Cannot map doorbell space, aborting\n"); 11874 "Cannot map doorbell space, aborting\n");
@@ -11831,8 +11886,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11831 11886
11832#endif 11887#endif
11833 11888
11889
11890 /* Set bp->num_queues for MSI-X mode*/
11891 bnx2x_set_num_queues(bp);
11892
11834 /* Configure interrupt mode: try to enable MSI-X/MSI if 11893 /* Configure interrupt mode: try to enable MSI-X/MSI if
11835 * needed, set bp->num_queues appropriately. 11894 * needed.
11836 */ 11895 */
11837 bnx2x_set_int_mode(bp); 11896 bnx2x_set_int_mode(bp);
11838 11897
@@ -12176,6 +12235,7 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
12176static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) 12235static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12177{ 12236{
12178 struct eth_spe *spe; 12237 struct eth_spe *spe;
12238 int cxt_index, cxt_offset;
12179 12239
12180#ifdef BNX2X_STOP_ON_ERROR 12240#ifdef BNX2X_STOP_ON_ERROR
12181 if (unlikely(bp->panic)) 12241 if (unlikely(bp->panic))
@@ -12198,10 +12258,16 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12198 * ramrod 12258 * ramrod
12199 */ 12259 */
12200 if (type == ETH_CONNECTION_TYPE) { 12260 if (type == ETH_CONNECTION_TYPE) {
12201 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) 12261 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
12202 bnx2x_set_ctx_validation(bp, &bp->context. 12262 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
12203 vcxt[BNX2X_ISCSI_ETH_CID].eth, 12263 ILT_PAGE_CIDS;
12204 BNX2X_ISCSI_ETH_CID); 12264 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
12265 (cxt_index * ILT_PAGE_CIDS);
12266 bnx2x_set_ctx_validation(bp,
12267 &bp->context[cxt_index].
12268 vcxt[cxt_offset].eth,
12269 BNX2X_ISCSI_ETH_CID(bp));
12270 }
12205 } 12271 }
12206 12272
12207 /* 12273 /*
@@ -12488,21 +12554,45 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12488 break; 12554 break;
12489 } 12555 }
12490 case DRV_CTL_ULP_REGISTER_CMD: { 12556 case DRV_CTL_ULP_REGISTER_CMD: {
12491 int ulp_type = ctl->data.ulp_type; 12557 int ulp_type = ctl->data.register_data.ulp_type;
12492 12558
12493 if (CHIP_IS_E3(bp)) { 12559 if (CHIP_IS_E3(bp)) {
12494 int idx = BP_FW_MB_IDX(bp); 12560 int idx = BP_FW_MB_IDX(bp);
12495 u32 cap; 12561 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
12562 int path = BP_PATH(bp);
12563 int port = BP_PORT(bp);
12564 int i;
12565 u32 scratch_offset;
12566 u32 *host_addr;
12496 12567
12497 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); 12568 /* first write capability to shmem2 */
12498 if (ulp_type == CNIC_ULP_ISCSI) 12569 if (ulp_type == CNIC_ULP_ISCSI)
12499 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; 12570 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
12500 else if (ulp_type == CNIC_ULP_FCOE) 12571 else if (ulp_type == CNIC_ULP_FCOE)
12501 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE; 12572 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
12502 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); 12573 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
12574
12575 if ((ulp_type != CNIC_ULP_FCOE) ||
12576 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
12577 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
12578 break;
12579
12580 /* if reached here - should write fcoe capabilities */
12581 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
12582 if (!scratch_offset)
12583 break;
12584 scratch_offset += offsetof(struct glob_ncsi_oem_data,
12585 fcoe_features[path][port]);
12586 host_addr = (u32 *) &(ctl->data.register_data.
12587 fcoe_features);
12588 for (i = 0; i < sizeof(struct fcoe_capabilities);
12589 i += 4)
12590 REG_WR(bp, scratch_offset + i,
12591 *(host_addr + i/4));
12503 } 12592 }
12504 break; 12593 break;
12505 } 12594 }
12595
12506 case DRV_CTL_ULP_UNREGISTER_CMD: { 12596 case DRV_CTL_ULP_UNREGISTER_CMD: {
12507 int ulp_type = ctl->data.ulp_type; 12597 int ulp_type = ctl->data.ulp_type;
12508 12598
@@ -12554,6 +12644,21 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12554 cp->num_irq = 2; 12644 cp->num_irq = 2;
12555} 12645}
12556 12646
12647void bnx2x_setup_cnic_info(struct bnx2x *bp)
12648{
12649 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12650
12651
12652 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
12653 bnx2x_cid_ilt_lines(bp);
12654 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
12655 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
12656 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
12657
12658 if (NO_ISCSI_OOO(bp))
12659 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
12660}
12661
12557static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, 12662static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12558 void *data) 12663 void *data)
12559{ 12664{
@@ -12632,10 +12737,10 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12632 cp->drv_ctl = bnx2x_drv_ctl; 12737 cp->drv_ctl = bnx2x_drv_ctl;
12633 cp->drv_register_cnic = bnx2x_register_cnic; 12738 cp->drv_register_cnic = bnx2x_register_cnic;
12634 cp->drv_unregister_cnic = bnx2x_unregister_cnic; 12739 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12635 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID; 12740 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
12636 cp->iscsi_l2_client_id = 12741 cp->iscsi_l2_client_id =
12637 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); 12742 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
12638 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; 12743 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
12639 12744
12640 if (NO_ISCSI_OOO(bp)) 12745 if (NO_ISCSI_OOO(bp))
12641 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; 12746 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
new file mode 100644
index 00000000000..ddd5106ad2f
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
@@ -0,0 +1,168 @@
1/* bnx2x_mfw_req.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2012 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10#ifndef BNX2X_MFW_REQ_H
11#define BNX2X_MFW_REQ_H
12
13#define PORT_0 0
14#define PORT_1 1
15#define PORT_MAX 2
16#define NVM_PATH_MAX 2
17
18/* FCoE capabilities required from the driver */
19struct fcoe_capabilities {
20 u32 capability1;
21 /* Maximum number of I/Os per connection */
22 #define FCOE_IOS_PER_CONNECTION_MASK 0x0000ffff
23 #define FCOE_IOS_PER_CONNECTION_SHIFT 0
24 /* Maximum number of Logins per port */
25 #define FCOE_LOGINS_PER_PORT_MASK 0xffff0000
26 #define FCOE_LOGINS_PER_PORT_SHIFT 16
27
28 u32 capability2;
29 /* Maximum number of exchanges */
30 #define FCOE_NUMBER_OF_EXCHANGES_MASK 0x0000ffff
31 #define FCOE_NUMBER_OF_EXCHANGES_SHIFT 0
32 /* Maximum NPIV WWN per port */
33 #define FCOE_NPIV_WWN_PER_PORT_MASK 0xffff0000
34 #define FCOE_NPIV_WWN_PER_PORT_SHIFT 16
35
36 u32 capability3;
37 /* Maximum number of targets supported */
38 #define FCOE_TARGETS_SUPPORTED_MASK 0x0000ffff
39 #define FCOE_TARGETS_SUPPORTED_SHIFT 0
40 /* Maximum number of outstanding commands across all connections */
41 #define FCOE_OUTSTANDING_COMMANDS_MASK 0xffff0000
42 #define FCOE_OUTSTANDING_COMMANDS_SHIFT 16
43
44 u32 capability4;
45 #define FCOE_CAPABILITY4_STATEFUL 0x00000001
46 #define FCOE_CAPABILITY4_STATELESS 0x00000002
47 #define FCOE_CAPABILITY4_CAPABILITIES_REPORTED_VALID 0x00000004
48};
49
50struct glob_ncsi_oem_data {
51 u32 driver_version;
52 u32 unused[3];
53 struct fcoe_capabilities fcoe_features[NVM_PATH_MAX][PORT_MAX];
54};
55
56/* current drv_info version */
57#define DRV_INFO_CUR_VER 2
58
59/* drv_info op codes supported */
60enum drv_info_opcode {
61 ETH_STATS_OPCODE,
62 FCOE_STATS_OPCODE,
63 ISCSI_STATS_OPCODE
64};
65
66#define ETH_STAT_INFO_VERSION_LEN 12
67/* Per PCI Function Ethernet Statistics required from the driver */
68struct eth_stats_info {
69 /* Function's Driver Version. padded to 12 */
70 u8 version[ETH_STAT_INFO_VERSION_LEN];
71 /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
72 u8 mac_local[8];
73 u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
74 u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
75 u32 mtu_size; /* MTU Size. Note : Negotiated MTU */
76 u32 feature_flags; /* Feature_Flags. */
77#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01
78#define FEATURE_ETH_LSO_MASK 0x02
79#define FEATURE_ETH_BOOTMODE_MASK 0x1C
80#define FEATURE_ETH_BOOTMODE_SHIFT 2
81#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2)
82#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2)
83#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2)
84#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2)
85#define FEATURE_ETH_TOE_MASK 0x20
86 u32 lso_max_size; /* LSO MaxOffloadSize. */
87 u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */
88 /* Num Offloaded Connections TCP_IPv4. */
89 u32 ipv4_ofld_cnt;
90 /* Num Offloaded Connections TCP_IPv6. */
91 u32 ipv6_ofld_cnt;
92 u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */
93 u32 txq_size; /* TX Descriptors Queue Size */
94 u32 rxq_size; /* RX Descriptors Queue Size */
95 /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
96 u32 txq_avg_depth;
97 /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
98 u32 rxq_avg_depth;
99 /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
100 u32 iov_offload;
101 /* Number of NetQueue/VMQ Config'd. */
102 u32 netq_cnt;
103 u32 vf_cnt; /* Num VF assigned to this PF. */
104};
105
106/* Per PCI Function FCOE Statistics required from the driver */
107struct fcoe_stats_info {
108 u8 version[12]; /* Function's Driver Version. */
109 u8 mac_local[8]; /* Locally Admin Addr. */
110 u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
111 u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
112 /* QoS Priority (per 802.1p). 0-7255 */
113 u32 qos_priority;
114 u32 txq_size; /* FCoE TX Descriptors Queue Size. */
115 u32 rxq_size; /* FCoE RX Descriptors Queue Size. */
116 /* FCoE TX Descriptor Queue Avg Depth. */
117 u32 txq_avg_depth;
118 /* FCoE RX Descriptors Queue Avg Depth. */
119 u32 rxq_avg_depth;
120 u32 rx_frames_lo; /* FCoE RX Frames received. */
121 u32 rx_frames_hi; /* FCoE RX Frames received. */
122 u32 rx_bytes_lo; /* FCoE RX Bytes received. */
123 u32 rx_bytes_hi; /* FCoE RX Bytes received. */
124 u32 tx_frames_lo; /* FCoE TX Frames sent. */
125 u32 tx_frames_hi; /* FCoE TX Frames sent. */
126 u32 tx_bytes_lo; /* FCoE TX Bytes sent. */
127 u32 tx_bytes_hi; /* FCoE TX Bytes sent. */
128};
129
130/* Per PCI Function iSCSI Statistics required from the driver*/
131struct iscsi_stats_info {
132 u8 version[12]; /* Function's Driver Version. */
133 u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */
134 u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
135 /* QoS Priority (per 802.1p). 0-7255 */
136 u32 qos_priority;
137 u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */
138 u8 ww_port_name[64]; /* iSCSI World wide port name */
139 u8 boot_target_name[64];/* iSCSI Boot Target Name. */
140 u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */
141 u32 boot_target_portal; /* iSCSI Boot Target Portal. */
142 u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */
143 u32 max_frame_size; /* Max Frame Size. bytes */
144 u32 txq_size; /* PDU TX Descriptors Queue Size. */
145 u32 rxq_size; /* PDU RX Descriptors Queue Size. */
146 u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */
147 u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */
148 u32 rx_pdus_lo; /* iSCSI PDUs received. */
149 u32 rx_pdus_hi; /* iSCSI PDUs received. */
150 u32 rx_bytes_lo; /* iSCSI RX Bytes received. */
151 u32 rx_bytes_hi; /* iSCSI RX Bytes received. */
152 u32 tx_pdus_lo; /* iSCSI PDUs sent. */
153 u32 tx_pdus_hi; /* iSCSI PDUs sent. */
154 u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */
155 u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */
156 u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable.
157 * 9 nibbles, the position of each nibble
158 * represents the C-PCP value, the value
159 * of the nibble = S-PCP value.
160 */
161};
162
163union drv_info_to_mcp {
164 struct eth_stats_info ether_stat;
165 struct fcoe_stats_info fcoe_stat;
166 struct iscsi_stats_info iscsi_stat;
167};
168#endif /* BNX2X_MFW_REQ_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index bbd387492a8..ec62a5c8bd3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1488,6 +1488,121 @@
1488 * 2:1 - otp_misc_do[51:50]; 0 - otp_misc_do[1]. */ 1488 * 2:1 - otp_misc_do[51:50]; 0 - otp_misc_do[1]. */
1489#define MISC_REG_CHIP_TYPE 0xac60 1489#define MISC_REG_CHIP_TYPE 0xac60
1490#define MISC_REG_CHIP_TYPE_57811_MASK (1<<1) 1490#define MISC_REG_CHIP_TYPE_57811_MASK (1<<1)
1491#define MISC_REG_CPMU_LP_DR_ENABLE 0xa858
1492/* [RW 1] FW EEE LPI Enable. When 1 indicates that EEE LPI mode is enabled
1493 * by FW. When 0 indicates that the EEE LPI mode is disabled by FW. Clk
1494 * 25MHz. Reset on hard reset. */
1495#define MISC_REG_CPMU_LP_FW_ENABLE_P0 0xa84c
1496/* [RW 32] EEE LPI Idle Threshold. The threshold value for the idle EEE LPI
1497 * counter. Timer tick is 1 us. Clock 25MHz. Reset on hard reset. */
1498#define MISC_REG_CPMU_LP_IDLE_THR_P0 0xa8a0
1499/* [RW 18] LPI entry events mask. [0] - Vmain SM Mask. When 1 indicates that
1500 * the Vmain SM end state is disabled. When 0 indicates that the Vmain SM
1501 * end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates that
1502 * the FW command that all Queues are empty is disabled. When 0 indicates
1503 * that the FW command that all Queues are empty is enabled. [2] - FW Early
1504 * Exit Mask / Reserved (Entry mask). When 1 indicates that the FW Early
1505 * Exit command is disabled. When 0 indicates that the FW Early Exit command
1506 * is enabled. This bit applicable only in the EXIT Events Mask registers.
1507 * [3] - PBF Request Mask. When 1 indicates that the PBF Request indication
1508 * is disabled. When 0 indicates that the PBF Request indication is enabled.
1509 * [4] - Tx Request Mask. When =1 indicates that the Tx other Than PBF
1510 * Request indication is disabled. When 0 indicates that the Tx Other Than
1511 * PBF Request indication is enabled. [5] - Rx EEE LPI Status Mask. When 1
1512 * indicates that the RX EEE LPI Status indication is disabled. When 0
1513 * indicates that the RX EEE LPI Status indication is enabled. In the EXIT
1514 * Events Masks registers; this bit masks the falling edge detect of the LPI
1515 * Status (Rx LPI is on - off). [6] - Tx Pause Mask. When 1 indicates that
1516 * the Tx Pause indication is disabled. When 0 indicates that the Tx Pause
1517 * indication is enabled. [7] - BRB1 Empty Mask. When 1 indicates that the
1518 * BRB1 EMPTY indication is disabled. When 0 indicates that the BRB1 EMPTY
1519 * indication is enabled. [8] - QM Idle Mask. When 1 indicates that the QM
1520 * IDLE indication is disabled. When 0 indicates that the QM IDLE indication
1521 * is enabled. (One bit for both VOQ0 and VOQ1). [9] - QM LB Idle Mask. When
1522 * 1 indicates that the QM IDLE indication for LOOPBACK is disabled. When 0
1523 * indicates that the QM IDLE indication for LOOPBACK is enabled. [10] - L1
1524 * Status Mask. When 1 indicates that the L1 Status indication from the PCIE
1525 * CORE is disabled. When 0 indicates that the RX EEE LPI Status indication
1526 * from the PCIE CORE is enabled. In the EXIT Events Masks registers; this
1527 * bit masks the falling edge detect of the L1 status (L1 is on - off). [11]
1528 * - P0 E0 EEE EEE LPI REQ Mask. When =1 indicates that the P0 E0 EEE EEE
1529 * LPI REQ indication is disabled. When =0 indicates that the P0 E0 EEE LPI
1530 * REQ indication is enabled. [12] - P1 E0 EEE LPI REQ Mask. When =1
1531 * indicates that the P0 EEE LPI REQ indication is disabled. When =0
1532 * indicates that the P0 EEE LPI REQ indication is enabled. [13] - P0 E1 EEE
1533 * LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication is
1534 * disabled. When =0 indicates that the P0 EEE LPI REQ indication is
1535 * enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE
1536 * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ
1537 * indication is enabled. [15] - L1 REQ Mask. When =1 indicates that the L1
1538 * REQ indication is disabled. When =0 indicates that the L1 indication is
1539 * enabled. [16] - Rx EEE LPI Status Edge Detect Mask. When =1 indicates
1540 * that the RX EEE LPI Status Falling Edge Detect indication is disabled (Rx
1541 * EEE LPI is on - off). When =0 indicates that the RX EEE LPI Status
1542 * Falling Edge Detec indication is enabled (Rx EEE LPI is on - off). This
1543 * bit is applicable only in the EXIT Events Masks registers. [17] - L1
1544 * Status Edge Detect Mask. When =1 indicates that the L1 Status Falling
1545 * Edge Detect indication from the PCIE CORE is disabled (L1 is on - off).
1546 * When =0 indicates that the L1 Status Falling Edge Detect indication from
1547 * the PCIE CORE is enabled (L1 is on - off). This bit is applicable only in
1548 * the EXIT Events Masks registers. Clock 25MHz. Reset on hard reset. */
1549#define MISC_REG_CPMU_LP_MASK_ENT_P0 0xa880
1550/* [RW 18] EEE LPI exit events mask. [0] - Vmain SM Mask. When 1 indicates
1551 * that the Vmain SM end state is disabled. When 0 indicates that the Vmain
1552 * SM end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates
1553 * that the FW command that all Queues are empty is disabled. When 0
1554 * indicates that the FW command that all Queues are empty is enabled. [2] -
1555 * FW Early Exit Mask / Reserved (Entry mask). When 1 indicates that the FW
1556 * Early Exit command is disabled. When 0 indicates that the FW Early Exit
1557 * command is enabled. This bit applicable only in the EXIT Events Mask
1558 * registers. [3] - PBF Request Mask. When 1 indicates that the PBF Request
1559 * indication is disabled. When 0 indicates that the PBF Request indication
1560 * is enabled. [4] - Tx Request Mask. When =1 indicates that the Tx other
1561 * Than PBF Request indication is disabled. When 0 indicates that the Tx
1562 * Other Than PBF Request indication is enabled. [5] - Rx EEE LPI Status
1563 * Mask. When 1 indicates that the RX EEE LPI Status indication is disabled.
1564 * When 0 indicates that the RX LPI Status indication is enabled. In the
1565 * EXIT Events Masks registers; this bit masks the falling edge detect of
1566 * the EEE LPI Status (Rx EEE LPI is on - off). [6] - Tx Pause Mask. When 1
1567 * indicates that the Tx Pause indication is disabled. When 0 indicates that
1568 * the Tx Pause indication is enabled. [7] - BRB1 Empty Mask. When 1
1569 * indicates that the BRB1 EMPTY indication is disabled. When 0 indicates
1570 * that the BRB1 EMPTY indication is enabled. [8] - QM Idle Mask. When 1
1571 * indicates that the QM IDLE indication is disabled. When 0 indicates that
1572 * the QM IDLE indication is enabled. (One bit for both VOQ0 and VOQ1). [9]
1573 * - QM LB Idle Mask. When 1 indicates that the QM IDLE indication for
1574 * LOOPBACK is disabled. When 0 indicates that the QM IDLE indication for
1575 * LOOPBACK is enabled. [10] - L1 Status Mask. When 1 indicates that the L1
1576 * Status indication from the PCIE CORE is disabled. When 0 indicates that
1577 * the RX EEE LPI Status indication from the PCIE CORE is enabled. In the
1578 * EXIT Events Masks registers; this bit masks the falling edge detect of
1579 * the L1 status (L1 is on - off). [11] - P0 E0 EEE EEE LPI REQ Mask. When
1580 * =1 indicates that the P0 E0 EEE EEE LPI REQ indication is disabled. When
1581 * =0 indicates that the P0 E0 EEE LPI REQ indication is enabled. [12] - P1
1582 * E0 EEE LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication
1583 * is disabled. When =0 indicates that the P0 EEE LPI REQ indication is
1584 * enabled. [13] - P0 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE
1585 * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ
1586 * indication is enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates
1587 * that the P0 EEE LPI REQ indication is disabled. When =0 indicates that
1588 * the P0 EEE LPI REQ indication is enabled. [15] - L1 REQ Mask. When =1
1589 * indicates that the L1 REQ indication is disabled. When =0 indicates that
1590 * the L1 indication is enabled. [16] - Rx EEE LPI Status Edge Detect Mask.
1591 * When =1 indicates that the RX EEE LPI Status Falling Edge Detect
1592 * indication is disabled (Rx EEE LPI is on - off). When =0 indicates that
1593 * the RX EEE LPI Status Falling Edge Detec indication is enabled (Rx EEE
1594 * LPI is on - off). This bit is applicable only in the EXIT Events Masks
1595 * registers. [17] - L1 Status Edge Detect Mask. When =1 indicates that the
1596 * L1 Status Falling Edge Detect indication from the PCIE CORE is disabled
1597 * (L1 is on - off). When =0 indicates that the L1 Status Falling Edge
1598 * Detect indication from the PCIE CORE is enabled (L1 is on - off). This
1599 * bit is applicable only in the EXIT Events Masks registers.Clock 25MHz.
1600 * Reset on hard reset. */
1601#define MISC_REG_CPMU_LP_MASK_EXT_P0 0xa888
1602/* [RW 16] EEE LPI Entry Events Counter. A statistic counter with the number
1603 * of counts that the SM entered the EEE LPI state. Clock 25MHz. Read only
1604 * register. Reset on hard reset. */
1605#define MISC_REG_CPMU_LP_SM_ENT_CNT_P0 0xa8b8
1491/* [RW 32] The following driver registers(1...16) represent 16 drivers and 1606/* [RW 32] The following driver registers(1...16) represent 16 drivers and
1492 32 clients. Each client can be controlled by one driver only. One in each 1607 32 clients. Each client can be controlled by one driver only. One in each
1493 bit represent that this driver control the appropriate client (Ex: bit 5 1608 bit represent that this driver control the appropriate client (Ex: bit 5
@@ -5372,6 +5487,8 @@
5372/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC 5487/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
5373 * packets transmitted by the MAC */ 5488 * packets transmitted by the MAC */
5374#define XMAC_REG_CTRL_SA_LO 0x28 5489#define XMAC_REG_CTRL_SA_LO 0x28
5490#define XMAC_REG_EEE_CTRL 0xd8
5491#define XMAC_REG_EEE_TIMERS_HI 0xe4
5375#define XMAC_REG_PAUSE_CTRL 0x68 5492#define XMAC_REG_PAUSE_CTRL 0x68
5376#define XMAC_REG_PFC_CTRL 0x70 5493#define XMAC_REG_PFC_CTRL 0x70
5377#define XMAC_REG_PFC_CTRL_HI 0x74 5494#define XMAC_REG_PFC_CTRL_HI 0x74
@@ -5796,6 +5913,7 @@
5796#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0 5913#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
5797#define MISC_REGISTERS_SPIO_SET_POS 8 5914#define MISC_REGISTERS_SPIO_SET_POS 8
5798#define HW_LOCK_MAX_RESOURCE_VALUE 31 5915#define HW_LOCK_MAX_RESOURCE_VALUE 31
5916#define HW_LOCK_RESOURCE_DCBX_ADMIN_MIB 13
5799#define HW_LOCK_RESOURCE_DRV_FLAGS 10 5917#define HW_LOCK_RESOURCE_DRV_FLAGS 10
5800#define HW_LOCK_RESOURCE_GPIO 1 5918#define HW_LOCK_RESOURCE_GPIO 1
5801#define HW_LOCK_RESOURCE_MDIO 0 5919#define HW_LOCK_RESOURCE_MDIO 0
@@ -6813,6 +6931,8 @@ Theotherbitsarereservedandshouldbezero*/
6813#define MDIO_AN_REG_LP_AUTO_NEG 0x0013 6931#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
6814#define MDIO_AN_REG_LP_AUTO_NEG2 0x0014 6932#define MDIO_AN_REG_LP_AUTO_NEG2 0x0014
6815#define MDIO_AN_REG_MASTER_STATUS 0x0021 6933#define MDIO_AN_REG_MASTER_STATUS 0x0021
6934#define MDIO_AN_REG_EEE_ADV 0x003c
6935#define MDIO_AN_REG_LP_EEE_ADV 0x003d
6816/*bcm*/ 6936/*bcm*/
6817#define MDIO_AN_REG_LINK_STATUS 0x8304 6937#define MDIO_AN_REG_LINK_STATUS 0x8304
6818#define MDIO_AN_REG_CL37_CL73 0x8370 6938#define MDIO_AN_REG_CL37_CL73 0x8370
@@ -6866,6 +6986,8 @@ Theotherbitsarereservedandshouldbezero*/
6866#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080 6986#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
6867 6987
6868/* BCM84833 only */ 6988/* BCM84833 only */
6989#define MDIO_84833_TOP_CFG_FW_REV 0x400f
6990#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1
6869#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a 6991#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
6870#define MDIO_84833_SUPER_ISOLATE 0x8000 6992#define MDIO_84833_SUPER_ISOLATE 0x8000
6871/* These are mailbox register set used by 84833. */ 6993/* These are mailbox register set used by 84833. */
@@ -6993,11 +7115,13 @@ Theotherbitsarereservedandshouldbezero*/
6993#define MDIO_WC_REG_DIGITAL3_UP1 0x8329 7115#define MDIO_WC_REG_DIGITAL3_UP1 0x8329
6994#define MDIO_WC_REG_DIGITAL3_LP_UP1 0x832c 7116#define MDIO_WC_REG_DIGITAL3_LP_UP1 0x832c
6995#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c 7117#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c
7118#define MDIO_WC_REG_DIGITAL4_MISC5 0x833e
6996#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345 7119#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345
6997#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349 7120#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349
6998#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e 7121#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e
6999#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350 7122#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350
7000#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368 7123#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368
7124#define MDIO_WC_REG_EEE_COMBO_CONTROL0 0x8390
7001#define MDIO_WC_REG_TX66_CONTROL 0x83b0 7125#define MDIO_WC_REG_TX66_CONTROL 0x83b0
7002#define MDIO_WC_REG_RX66_CONTROL 0x83c0 7126#define MDIO_WC_REG_RX66_CONTROL 0x83c0
7003#define MDIO_WC_REG_RX66_SCW0 0x83c2 7127#define MDIO_WC_REG_RX66_SCW0 0x83c2
@@ -7036,6 +7160,7 @@ Theotherbitsarereservedandshouldbezero*/
7036#define MDIO_REG_GPHY_EEE_1G (0x1 << 2) 7160#define MDIO_REG_GPHY_EEE_1G (0x1 << 2)
7037#define MDIO_REG_GPHY_EEE_100 (0x1 << 1) 7161#define MDIO_REG_GPHY_EEE_100 (0x1 << 1)
7038#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e 7162#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e
7163#define MDIO_REG_GPHY_AUX_STATUS 0x19
7039#define MDIO_REG_INTR_STATUS 0x1a 7164#define MDIO_REG_INTR_STATUS 0x1a
7040#define MDIO_REG_INTR_MASK 0x1b 7165#define MDIO_REG_INTR_MASK 0x1b
7041#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1) 7166#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
@@ -7150,8 +7275,7 @@ Theotherbitsarereservedandshouldbezero*/
7150#define CDU_REGION_NUMBER_UCM_AG 4 7275#define CDU_REGION_NUMBER_UCM_AG 4
7151 7276
7152 7277
7153/** 7278/* String-to-compress [31:8] = CID (all 24 bits)
7154 * String-to-compress [31:8] = CID (all 24 bits)
7155 * String-to-compress [7:4] = Region 7279 * String-to-compress [7:4] = Region
7156 * String-to-compress [3:0] = Type 7280 * String-to-compress [3:0] = Type
7157 */ 7281 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 6c14b4a4e82..734fd87cd99 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4107,6 +4107,10 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
4107 data->capabilities |= 4107 data->capabilities |=
4108 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; 4108 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4109 4109
4110 if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4111 data->capabilities |=
4112 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4113
4110 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) 4114 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4111 data->capabilities |= 4115 data->capabilities |=
4112 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; 4116 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
@@ -4115,6 +4119,10 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
4115 data->capabilities |= 4119 data->capabilities |=
4116 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; 4120 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4117 4121
4122 if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4123 data->capabilities |=
4124 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4125
4118 /* Hashing mask */ 4126 /* Hashing mask */
4119 data->rss_result_mask = p->rss_result_mask; 4127 data->rss_result_mask = p->rss_result_mask;
4120 4128
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index efd80bdd0df..f83e033da6d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -167,9 +167,8 @@ typedef int (*exe_q_remove)(struct bnx2x *bp,
167 union bnx2x_qable_obj *o, 167 union bnx2x_qable_obj *o,
168 struct bnx2x_exeq_elem *elem); 168 struct bnx2x_exeq_elem *elem);
169 169
170/** 170/* Return positive if entry was optimized, 0 - if not, negative
171 * @return positive is entry was optimized, 0 - if not, negative 171 * in case of an error.
172 * in case of an error.
173 */ 172 */
174typedef int (*exe_q_optimize)(struct bnx2x *bp, 173typedef int (*exe_q_optimize)(struct bnx2x *bp,
175 union bnx2x_qable_obj *o, 174 union bnx2x_qable_obj *o,
@@ -694,8 +693,10 @@ enum {
694 693
695 BNX2X_RSS_IPV4, 694 BNX2X_RSS_IPV4,
696 BNX2X_RSS_IPV4_TCP, 695 BNX2X_RSS_IPV4_TCP,
696 BNX2X_RSS_IPV4_UDP,
697 BNX2X_RSS_IPV6, 697 BNX2X_RSS_IPV6,
698 BNX2X_RSS_IPV6_TCP, 698 BNX2X_RSS_IPV6_TCP,
699 BNX2X_RSS_IPV6_UDP,
699}; 700};
700 701
701struct bnx2x_config_rss_params { 702struct bnx2x_config_rss_params {
@@ -729,6 +730,10 @@ struct bnx2x_rss_config_obj {
729 /* Last configured indirection table */ 730 /* Last configured indirection table */
730 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; 731 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
731 732
733 /* flags for enabling 4-tupple hash on UDP */
734 u8 udp_rss_v4;
735 u8 udp_rss_v6;
736
732 int (*config_rss)(struct bnx2x *bp, 737 int (*config_rss)(struct bnx2x *bp,
733 struct bnx2x_config_rss_params *p); 738 struct bnx2x_config_rss_params *p);
734}; 739};
@@ -1280,12 +1285,11 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
1280 struct bnx2x_rx_mode_obj *o); 1285 struct bnx2x_rx_mode_obj *o);
1281 1286
1282/** 1287/**
1283 * Send and RX_MODE ramrod according to the provided parameters. 1288 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
1284 * 1289 *
1285 * @param bp 1290 * @p: Command parameters
1286 * @param p Command parameters
1287 * 1291 *
1288 * @return 0 - if operation was successfull and there is no pending completions, 1292 * Return: 0 - if operation was successfull and there is no pending completions,
1289 * positive number - if there are pending completions, 1293 * positive number - if there are pending completions,
1290 * negative - if there were errors 1294 * negative - if there were errors
1291 */ 1295 */
@@ -1302,7 +1306,11 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
1302 bnx2x_obj_type type); 1306 bnx2x_obj_type type);
1303 1307
1304/** 1308/**
1305 * Configure multicast MACs list. May configure a new list 1309 * bnx2x_config_mcast - Configure multicast MACs list.
1310 *
1311 * @cmd: command to execute: BNX2X_MCAST_CMD_X
1312 *
1313 * May configure a new list
1306 * provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up 1314 * provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up
1307 * (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current 1315 * (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current
1308 * configuration, continue to execute the pending commands 1316 * configuration, continue to execute the pending commands
@@ -1313,11 +1321,7 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
1313 * the current command will be enqueued to the tail of the 1321 * the current command will be enqueued to the tail of the
1314 * pending commands list. 1322 * pending commands list.
1315 * 1323 *
1316 * @param bp 1324 * Return: 0 is operation was sucessfull and there are no pending completions,
1317 * @param p
1318 * @param command to execute: BNX2X_MCAST_CMD_X
1319 *
1320 * @return 0 is operation was sucessfull and there are no pending completions,
1321 * negative if there were errors, positive if there are pending 1325 * negative if there were errors, positive if there are pending
1322 * completions. 1326 * completions.
1323 */ 1327 */
@@ -1342,21 +1346,17 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
1342 bnx2x_obj_type type); 1346 bnx2x_obj_type type);
1343 1347
1344/** 1348/**
1345 * Updates RSS configuration according to provided parameters. 1349 * bnx2x_config_rss - Updates RSS configuration according to provided parameters
1346 *
1347 * @param bp
1348 * @param p
1349 * 1350 *
1350 * @return 0 in case of success 1351 * Return: 0 in case of success
1351 */ 1352 */
1352int bnx2x_config_rss(struct bnx2x *bp, 1353int bnx2x_config_rss(struct bnx2x *bp,
1353 struct bnx2x_config_rss_params *p); 1354 struct bnx2x_config_rss_params *p);
1354 1355
1355/** 1356/**
1356 * Return the current ind_table configuration. 1357 * bnx2x_get_rss_ind_table - Return the current ind_table configuration.
1357 * 1358 *
1358 * @param bp 1359 * @ind_table: buffer to fill with the current indirection
1359 * @param ind_table buffer to fill with the current indirection
1360 * table content. Should be at least 1360 * table content. Should be at least
1361 * T_ETH_INDIRECTION_TABLE_SIZE bytes long. 1361 * T_ETH_INDIRECTION_TABLE_SIZE bytes long.
1362 */ 1362 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 1e2785cd11d..667d89042d3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -785,6 +785,10 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
785 785
786 pstats->host_port_stats_counter++; 786 pstats->host_port_stats_counter++;
787 787
788 if (CHIP_IS_E3(bp))
789 estats->eee_tx_lpi += REG_RD(bp,
790 MISC_REG_CPMU_LP_SM_ENT_CNT_P0);
791
788 if (!BP_NOMCP(bp)) { 792 if (!BP_NOMCP(bp)) {
789 u32 nig_timer_max = 793 u32 nig_timer_max =
790 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); 794 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
@@ -855,17 +859,22 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
855 struct tstorm_per_queue_stats *tclient = 859 struct tstorm_per_queue_stats *tclient =
856 &bp->fw_stats_data->queue_stats[i]. 860 &bp->fw_stats_data->queue_stats[i].
857 tstorm_queue_statistics; 861 tstorm_queue_statistics;
858 struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient; 862 struct tstorm_per_queue_stats *old_tclient =
863 &bnx2x_fp_stats(bp, fp)->old_tclient;
859 struct ustorm_per_queue_stats *uclient = 864 struct ustorm_per_queue_stats *uclient =
860 &bp->fw_stats_data->queue_stats[i]. 865 &bp->fw_stats_data->queue_stats[i].
861 ustorm_queue_statistics; 866 ustorm_queue_statistics;
862 struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient; 867 struct ustorm_per_queue_stats *old_uclient =
868 &bnx2x_fp_stats(bp, fp)->old_uclient;
863 struct xstorm_per_queue_stats *xclient = 869 struct xstorm_per_queue_stats *xclient =
864 &bp->fw_stats_data->queue_stats[i]. 870 &bp->fw_stats_data->queue_stats[i].
865 xstorm_queue_statistics; 871 xstorm_queue_statistics;
866 struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient; 872 struct xstorm_per_queue_stats *old_xclient =
867 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; 873 &bnx2x_fp_stats(bp, fp)->old_xclient;
868 struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old; 874 struct bnx2x_eth_q_stats *qstats =
875 &bnx2x_fp_stats(bp, fp)->eth_q_stats;
876 struct bnx2x_eth_q_stats_old *qstats_old =
877 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
869 878
870 u32 diff; 879 u32 diff;
871 880
@@ -1048,8 +1057,11 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
1048 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); 1057 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
1049 1058
1050 tmp = estats->mac_discard; 1059 tmp = estats->mac_discard;
1051 for_each_rx_queue(bp, i) 1060 for_each_rx_queue(bp, i) {
1052 tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); 1061 struct tstorm_per_queue_stats *old_tclient =
1062 &bp->fp_stats[i].old_tclient;
1063 tmp += le32_to_cpu(old_tclient->checksum_discard);
1064 }
1053 nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped; 1065 nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
1054 1066
1055 nstats->tx_dropped = 0; 1067 nstats->tx_dropped = 0;
@@ -1099,9 +1111,9 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp)
1099 int i; 1111 int i;
1100 1112
1101 for_each_queue(bp, i) { 1113 for_each_queue(bp, i) {
1102 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; 1114 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
1103 struct bnx2x_eth_q_stats_old *qstats_old = 1115 struct bnx2x_eth_q_stats_old *qstats_old =
1104 &bp->fp[i].eth_q_stats_old; 1116 &bp->fp_stats[i].eth_q_stats_old;
1105 1117
1106 UPDATE_ESTAT_QSTAT(driver_xoff); 1118 UPDATE_ESTAT_QSTAT(driver_xoff);
1107 UPDATE_ESTAT_QSTAT(rx_err_discard_pkt); 1119 UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
@@ -1309,12 +1321,9 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1309 bnx2x_stats_comp(bp); 1321 bnx2x_stats_comp(bp);
1310} 1322}
1311 1323
1312/** 1324/* This function will prepare the statistics ramrod data the way
1313 * This function will prepare the statistics ramrod data the way
1314 * we will only have to increment the statistics counter and 1325 * we will only have to increment the statistics counter and
1315 * send the ramrod each time we have to. 1326 * send the ramrod each time we have to.
1316 *
1317 * @param bp
1318 */ 1327 */
1319static void bnx2x_prep_fw_stats_req(struct bnx2x *bp) 1328static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
1320{ 1329{
@@ -1428,7 +1437,7 @@ static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
1428 query[first_queue_query_index + i]; 1437 query[first_queue_query_index + i];
1429 1438
1430 cur_query_entry->kind = STATS_TYPE_QUEUE; 1439 cur_query_entry->kind = STATS_TYPE_QUEUE;
1431 cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]); 1440 cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]);
1432 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1441 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1433 cur_query_entry->address.hi = 1442 cur_query_entry->address.hi =
1434 cpu_to_le32(U64_HI(cur_data_offset)); 1443 cpu_to_le32(U64_HI(cur_data_offset));
@@ -1479,15 +1488,19 @@ void bnx2x_stats_init(struct bnx2x *bp)
1479 1488
1480 /* function stats */ 1489 /* function stats */
1481 for_each_queue(bp, i) { 1490 for_each_queue(bp, i) {
1482 struct bnx2x_fastpath *fp = &bp->fp[i]; 1491 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
1483 1492
1484 memset(&fp->old_tclient, 0, sizeof(fp->old_tclient)); 1493 memset(&fp_stats->old_tclient, 0,
1485 memset(&fp->old_uclient, 0, sizeof(fp->old_uclient)); 1494 sizeof(fp_stats->old_tclient));
1486 memset(&fp->old_xclient, 0, sizeof(fp->old_xclient)); 1495 memset(&fp_stats->old_uclient, 0,
1496 sizeof(fp_stats->old_uclient));
1497 memset(&fp_stats->old_xclient, 0,
1498 sizeof(fp_stats->old_xclient));
1487 if (bp->stats_init) { 1499 if (bp->stats_init) {
1488 memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats)); 1500 memset(&fp_stats->eth_q_stats, 0,
1489 memset(&fp->eth_q_stats_old, 0, 1501 sizeof(fp_stats->eth_q_stats));
1490 sizeof(fp->eth_q_stats_old)); 1502 memset(&fp_stats->eth_q_stats_old, 0,
1503 sizeof(fp_stats->eth_q_stats_old));
1491 } 1504 }
1492 } 1505 }
1493 1506
@@ -1529,8 +1542,10 @@ void bnx2x_save_statistics(struct bnx2x *bp)
1529 /* save queue statistics */ 1542 /* save queue statistics */
1530 for_each_eth_queue(bp, i) { 1543 for_each_eth_queue(bp, i) {
1531 struct bnx2x_fastpath *fp = &bp->fp[i]; 1544 struct bnx2x_fastpath *fp = &bp->fp[i];
1532 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; 1545 struct bnx2x_eth_q_stats *qstats =
1533 struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old; 1546 &bnx2x_fp_stats(bp, fp)->eth_q_stats;
1547 struct bnx2x_eth_q_stats_old *qstats_old =
1548 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
1534 1549
1535 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi); 1550 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1536 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo); 1551 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
@@ -1569,7 +1584,7 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1569 struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats; 1584 struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
1570 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1585 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1571 struct per_queue_stats *fcoe_q_stats = 1586 struct per_queue_stats *fcoe_q_stats =
1572 &bp->fw_stats_data->queue_stats[FCOE_IDX]; 1587 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)];
1573 1588
1574 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = 1589 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
1575 &fcoe_q_stats->tstorm_queue_statistics; 1590 &fcoe_q_stats->tstorm_queue_statistics;
@@ -1586,8 +1601,7 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1586 memset(afex_stats, 0, sizeof(struct afex_stats)); 1601 memset(afex_stats, 0, sizeof(struct afex_stats));
1587 1602
1588 for_each_eth_queue(bp, i) { 1603 for_each_eth_queue(bp, i) {
1589 struct bnx2x_fastpath *fp = &bp->fp[i]; 1604 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
1590 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1591 1605
1592 ADD_64(afex_stats->rx_unicast_bytes_hi, 1606 ADD_64(afex_stats->rx_unicast_bytes_hi,
1593 qstats->total_unicast_bytes_received_hi, 1607 qstats->total_unicast_bytes_received_hi,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 93e689fdfed..24b8e505b60 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -203,6 +203,8 @@ struct bnx2x_eth_stats {
203 /* Recovery */ 203 /* Recovery */
204 u32 recoverable_error; 204 u32 recoverable_error;
205 u32 unrecoverable_error; 205 u32 unrecoverable_error;
206 /* src: Clear-on-Read register; Will not survive PMF Migration */
207 u32 eee_tx_lpi;
206}; 208};
207 209
208 210
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 2c89d17cbb2..3b4fc61f24c 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -256,11 +256,16 @@ static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
256 struct cnic_local *cp = dev->cnic_priv; 256 struct cnic_local *cp = dev->cnic_priv;
257 struct cnic_eth_dev *ethdev = cp->ethdev; 257 struct cnic_eth_dev *ethdev = cp->ethdev;
258 struct drv_ctl_info info; 258 struct drv_ctl_info info;
259 struct fcoe_capabilities *fcoe_cap =
260 &info.data.register_data.fcoe_features;
259 261
260 if (reg) 262 if (reg) {
261 info.cmd = DRV_CTL_ULP_REGISTER_CMD; 263 info.cmd = DRV_CTL_ULP_REGISTER_CMD;
262 else 264 if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
265 memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
266 } else {
263 info.cmd = DRV_CTL_ULP_UNREGISTER_CMD; 267 info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
268 }
264 269
265 info.data.ulp_type = ulp_type; 270 info.data.ulp_type = ulp_type;
266 ethdev->drv_ctl(dev->netdev, &info); 271 ethdev->drv_ctl(dev->netdev, &info);
@@ -286,6 +291,9 @@ static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
286{ 291{
287 u32 i; 292 u32 i;
288 293
294 if (!cp->ctx_tbl)
295 return -EINVAL;
296
289 for (i = 0; i < cp->max_cid_space; i++) { 297 for (i = 0; i < cp->max_cid_space; i++) {
290 if (cp->ctx_tbl[i].cid == cid) { 298 if (cp->ctx_tbl[i].cid == cid) {
291 *l5_cid = i; 299 *l5_cid = i;
@@ -612,6 +620,8 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
612 620
613 if (ulp_type == CNIC_ULP_ISCSI) 621 if (ulp_type == CNIC_ULP_ISCSI)
614 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 622 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
623 else if (ulp_type == CNIC_ULP_FCOE)
624 dev->fcoe_cap = NULL;
615 625
616 synchronize_rcu(); 626 synchronize_rcu();
617 627
@@ -2589,7 +2599,7 @@ static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2589 return; 2599 return;
2590 } 2600 }
2591 2601
2592 cqes[0] = (struct kcqe *) &kcqe; 2602 cqes[0] = &kcqe;
2593 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1); 2603 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2594} 2604}
2595 2605
@@ -3217,6 +3227,9 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3217 u32 l5_cid; 3227 u32 l5_cid;
3218 struct cnic_local *cp = dev->cnic_priv; 3228 struct cnic_local *cp = dev->cnic_priv;
3219 3229
3230 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
3231 break;
3232
3220 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) { 3233 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3221 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 3234 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3222 3235
@@ -3947,6 +3960,15 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3947 cnic_cm_upcall(cp, csk, opcode); 3960 cnic_cm_upcall(cp, csk, opcode);
3948 break; 3961 break;
3949 3962
3963 case L5CM_RAMROD_CMD_ID_CLOSE:
3964 if (l4kcqe->status != 0) {
3965 netdev_warn(dev->netdev, "RAMROD CLOSE compl with "
3966 "status 0x%x\n", l4kcqe->status);
3967 opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3968 /* Fall through */
3969 } else {
3970 break;
3971 }
3950 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 3972 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3951 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 3973 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3952 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 3974 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
@@ -4250,8 +4272,6 @@ static int cnic_cm_shutdown(struct cnic_dev *dev)
4250 struct cnic_local *cp = dev->cnic_priv; 4272 struct cnic_local *cp = dev->cnic_priv;
4251 int i; 4273 int i;
4252 4274
4253 cp->stop_cm(dev);
4254
4255 if (!cp->csk_tbl) 4275 if (!cp->csk_tbl)
4256 return 0; 4276 return 0;
4257 4277
@@ -4669,9 +4689,9 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4669 4689
4670 cp->kcq1.sw_prod_idx = 0; 4690 cp->kcq1.sw_prod_idx = 0;
4671 cp->kcq1.hw_prod_idx_ptr = 4691 cp->kcq1.hw_prod_idx_ptr =
4672 (u16 *) &sblk->status_completion_producer_index; 4692 &sblk->status_completion_producer_index;
4673 4693
4674 cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx; 4694 cp->kcq1.status_idx_ptr = &sblk->status_idx;
4675 4695
4676 /* Initialize the kernel complete queue context. */ 4696 /* Initialize the kernel complete queue context. */
4677 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 4697 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
@@ -4697,9 +4717,9 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4697 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id); 4717 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4698 4718
4699 cp->kcq1.hw_prod_idx_ptr = 4719 cp->kcq1.hw_prod_idx_ptr =
4700 (u16 *) &msblk->status_completion_producer_index; 4720 &msblk->status_completion_producer_index;
4701 cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx; 4721 cp->kcq1.status_idx_ptr = &msblk->status_idx;
4702 cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index; 4722 cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
4703 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; 4723 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4704 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 4724 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4705 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 4725 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
@@ -4981,8 +5001,14 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4981 cp->port_mode = CHIP_PORT_MODE_NONE; 5001 cp->port_mode = CHIP_PORT_MODE_NONE;
4982 5002
4983 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 5003 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
4984 u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR); 5004 u32 val;
4985 5005
5006 pci_read_config_dword(dev->pcidev, PCICFG_ME_REGISTER, &val);
5007 cp->func = (u8) ((val & ME_REG_ABS_PF_NUM) >>
5008 ME_REG_ABS_PF_NUM_SHIFT);
5009 func = CNIC_FUNC(cp);
5010
5011 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
4986 if (!(val & 1)) 5012 if (!(val & 1))
4987 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN); 5013 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
4988 else 5014 else
@@ -5287,6 +5313,7 @@ static void cnic_stop_hw(struct cnic_dev *dev)
5287 i++; 5313 i++;
5288 } 5314 }
5289 cnic_shutdown_rings(dev); 5315 cnic_shutdown_rings(dev);
5316 cp->stop_cm(dev);
5290 clear_bit(CNIC_F_CNIC_UP, &dev->flags); 5317 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5291 RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL); 5318 RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
5292 synchronize_rcu(); 5319 synchronize_rcu();
@@ -5516,9 +5543,7 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5516 rcu_read_unlock(); 5543 rcu_read_unlock();
5517} 5544}
5518 5545
5519/** 5546/* netdev event handler */
5520 * netdev event handler
5521 */
5522static int cnic_netdev_event(struct notifier_block *this, unsigned long event, 5547static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5523 void *ptr) 5548 void *ptr)
5524{ 5549{
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 289274e546b..5cb88881bba 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -12,8 +12,10 @@
12#ifndef CNIC_IF_H 12#ifndef CNIC_IF_H
13#define CNIC_IF_H 13#define CNIC_IF_H
14 14
15#define CNIC_MODULE_VERSION "2.5.10" 15#include "bnx2x/bnx2x_mfw_req.h"
16#define CNIC_MODULE_RELDATE "March 21, 2012" 16
17#define CNIC_MODULE_VERSION "2.5.12"
18#define CNIC_MODULE_RELDATE "June 29, 2012"
17 19
18#define CNIC_ULP_RDMA 0 20#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1 21#define CNIC_ULP_ISCSI 1
@@ -131,6 +133,11 @@ struct drv_ctl_l2_ring {
131 u32 cid; 133 u32 cid;
132}; 134};
133 135
136struct drv_ctl_register_data {
137 int ulp_type;
138 struct fcoe_capabilities fcoe_features;
139};
140
134struct drv_ctl_info { 141struct drv_ctl_info {
135 int cmd; 142 int cmd;
136 union { 143 union {
@@ -138,6 +145,7 @@ struct drv_ctl_info {
138 struct drv_ctl_io io; 145 struct drv_ctl_io io;
139 struct drv_ctl_l2_ring ring; 146 struct drv_ctl_l2_ring ring;
140 int ulp_type; 147 int ulp_type;
148 struct drv_ctl_register_data register_data;
141 char bytes[MAX_DRV_CTL_DATA]; 149 char bytes[MAX_DRV_CTL_DATA];
142 } data; 150 } data;
143}; 151};
@@ -305,6 +313,7 @@ struct cnic_dev {
305 int max_rdma_conn; 313 int max_rdma_conn;
306 314
307 union drv_info_to_mcp *stats_addr; 315 union drv_info_to_mcp *stats_addr;
316 struct fcoe_capabilities *fcoe_cap;
308 317
309 void *cnic_priv; 318 void *cnic_priv;
310}; 319};
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e47ff8be1d7..6cbab036973 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -9908,7 +9908,7 @@ static bool tg3_enable_msix(struct tg3 *tp)
9908 int i, rc; 9908 int i, rc;
9909 struct msix_entry msix_ent[tp->irq_max]; 9909 struct msix_entry msix_ent[tp->irq_max];
9910 9910
9911 tp->irq_cnt = num_online_cpus(); 9911 tp->irq_cnt = netif_get_num_default_rss_queues();
9912 if (tp->irq_cnt > 1) { 9912 if (tp->irq_cnt > 1) {
9913 /* We want as many rx rings enabled as there are cpus. 9913 /* We want as many rx rings enabled as there are cpus.
9914 * In multiqueue MSI-X mode, the first MSI-X vector 9914 * In multiqueue MSI-X mode, the first MSI-X vector
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
index 689e5e19cc0..550d2521ba7 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -52,13 +52,7 @@ bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg)
52} 52}
53 53
54/** 54/**
55 * bfa_cee_attr_meminfo() 55 * bfa_cee_attr_meminfo - Returns the size of the DMA memory needed by CEE attributes
56 *
57 * @brief Returns the size of the DMA memory needed by CEE attributes
58 *
59 * @param[in] void
60 *
61 * @return Size of DMA region
62 */ 56 */
63static u32 57static u32
64bfa_cee_attr_meminfo(void) 58bfa_cee_attr_meminfo(void)
@@ -66,13 +60,7 @@ bfa_cee_attr_meminfo(void)
66 return roundup(sizeof(struct bfa_cee_attr), BFA_DMA_ALIGN_SZ); 60 return roundup(sizeof(struct bfa_cee_attr), BFA_DMA_ALIGN_SZ);
67} 61}
68/** 62/**
69 * bfa_cee_stats_meminfo() 63 * bfa_cee_stats_meminfo - Returns the size of the DMA memory needed by CEE stats
70 *
71 * @brief Returns the size of the DMA memory needed by CEE stats
72 *
73 * @param[in] void
74 *
75 * @return Size of DMA region
76 */ 64 */
77static u32 65static u32
78bfa_cee_stats_meminfo(void) 66bfa_cee_stats_meminfo(void)
@@ -81,14 +69,10 @@ bfa_cee_stats_meminfo(void)
81} 69}
82 70
83/** 71/**
84 * bfa_cee_get_attr_isr() 72 * bfa_cee_get_attr_isr - CEE ISR for get-attributes responses from f/w
85 *
86 * @brief CEE ISR for get-attributes responses from f/w
87 *
88 * @param[in] cee - Pointer to the CEE module
89 * status - Return status from the f/w
90 * 73 *
91 * @return void 74 * @cee: Pointer to the CEE module
75 * @status: Return status from the f/w
92 */ 76 */
93static void 77static void
94bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status) 78bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status)
@@ -105,14 +89,10 @@ bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status)
105} 89}
106 90
107/** 91/**
108 * bfa_cee_get_attr_isr() 92 * bfa_cee_get_attr_isr - CEE ISR for get-stats responses from f/w
109 *
110 * @brief CEE ISR for get-stats responses from f/w
111 * 93 *
112 * @param[in] cee - Pointer to the CEE module 94 * @cee: Pointer to the CEE module
113 * status - Return status from the f/w 95 * @status: Return status from the f/w
114 *
115 * @return void
116 */ 96 */
117static void 97static void
118bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status) 98bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status)
@@ -147,13 +127,7 @@ bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
147 cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status); 127 cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
148} 128}
149/** 129/**
150 * bfa_nw_cee_meminfo() 130 * bfa_nw_cee_meminfo - Returns the size of the DMA memory needed by CEE module
151 *
152 * @brief Returns the size of the DMA memory needed by CEE module
153 *
154 * @param[in] void
155 *
156 * @return Size of DMA region
157 */ 131 */
158u32 132u32
159bfa_nw_cee_meminfo(void) 133bfa_nw_cee_meminfo(void)
@@ -162,15 +136,11 @@ bfa_nw_cee_meminfo(void)
162} 136}
163 137
164/** 138/**
165 * bfa_nw_cee_mem_claim() 139 * bfa_nw_cee_mem_claim - Initialized CEE DMA Memory
166 *
167 * @brief Initialized CEE DMA Memory
168 *
169 * @param[in] cee CEE module pointer
170 * dma_kva Kernel Virtual Address of CEE DMA Memory
171 * dma_pa Physical Address of CEE DMA Memory
172 * 140 *
173 * @return void 141 * @cee: CEE module pointer
142 * @dma_kva: Kernel Virtual Address of CEE DMA Memory
143 * @dma_pa: Physical Address of CEE DMA Memory
174 */ 144 */
175void 145void
176bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa) 146bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
@@ -185,13 +155,11 @@ bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
185} 155}
186 156
187/** 157/**
188 * bfa_cee_get_attr() 158 * bfa_cee_get_attr - Send the request to the f/w to fetch CEE attributes.
189 *
190 * @brief Send the request to the f/w to fetch CEE attributes.
191 * 159 *
192 * @param[in] Pointer to the CEE module data structure. 160 * @cee: Pointer to the CEE module data structure.
193 * 161 *
194 * @return Status 162 * Return: status
195 */ 163 */
196enum bfa_status 164enum bfa_status
197bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr, 165bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
@@ -220,13 +188,7 @@ bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
220} 188}
221 189
222/** 190/**
223 * bfa_cee_isrs() 191 * bfa_cee_isrs - Handles Mail-box interrupts for CEE module.
224 *
225 * @brief Handles Mail-box interrupts for CEE module.
226 *
227 * @param[in] Pointer to the CEE module data structure.
228 *
229 * @return void
230 */ 192 */
231 193
232static void 194static void
@@ -253,14 +215,9 @@ bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
253} 215}
254 216
255/** 217/**
256 * bfa_cee_notify() 218 * bfa_cee_notify - CEE module heart-beat failure handler.
257 *
258 * @brief CEE module heart-beat failure handler.
259 * @brief CEE module IOC event handler.
260 *
261 * @param[in] IOC event type
262 * 219 *
263 * @return void 220 * @event: IOC event type
264 */ 221 */
265 222
266static void 223static void
@@ -307,17 +264,13 @@ bfa_cee_notify(void *arg, enum bfa_ioc_event event)
307} 264}
308 265
309/** 266/**
310 * bfa_nw_cee_attach() 267 * bfa_nw_cee_attach - CEE module-attach API
311 *
312 * @brief CEE module-attach API
313 * 268 *
314 * @param[in] cee - Pointer to the CEE module data structure 269 * @cee: Pointer to the CEE module data structure
315 * ioc - Pointer to the ioc module data structure 270 * @ioc: Pointer to the ioc module data structure
316 * dev - Pointer to the device driver module data structure 271 * @dev: Pointer to the device driver module data structure.
317 * The device driver specific mbox ISR functions have 272 * The device driver specific mbox ISR functions have
318 * this pointer as one of the parameters. 273 * this pointer as one of the parameters.
319 *
320 * @return void
321 */ 274 */
322void 275void
323bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, 276bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h
index 3da1a946ccd..ad004a4c389 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h
@@ -16,23 +16,18 @@
16 * www.brocade.com 16 * www.brocade.com
17 */ 17 */
18 18
19/** 19/* BFA common services */
20 * @file bfa_cs.h BFA common services
21 */
22 20
23#ifndef __BFA_CS_H__ 21#ifndef __BFA_CS_H__
24#define __BFA_CS_H__ 22#define __BFA_CS_H__
25 23
26#include "cna.h" 24#include "cna.h"
27 25
28/** 26/* BFA state machine interfaces */
29 * @ BFA state machine interfaces
30 */
31 27
32typedef void (*bfa_sm_t)(void *sm, int event); 28typedef void (*bfa_sm_t)(void *sm, int event);
33 29
34/** 30/* oc - object class eg. bfa_ioc
35 * oc - object class eg. bfa_ioc
36 * st - state, eg. reset 31 * st - state, eg. reset
37 * otype - object type, eg. struct bfa_ioc 32 * otype - object type, eg. struct bfa_ioc
38 * etype - object type, eg. enum ioc_event 33 * etype - object type, eg. enum ioc_event
@@ -45,9 +40,7 @@ typedef void (*bfa_sm_t)(void *sm, int event);
45#define bfa_sm_get_state(_sm) ((_sm)->sm) 40#define bfa_sm_get_state(_sm) ((_sm)->sm)
46#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state)) 41#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
47 42
48/** 43/* For converting from state machine function to state encoding. */
49 * For converting from state machine function to state encoding.
50 */
51struct bfa_sm_table { 44struct bfa_sm_table {
52 bfa_sm_t sm; /*!< state machine function */ 45 bfa_sm_t sm; /*!< state machine function */
53 int state; /*!< state machine encoding */ 46 int state; /*!< state machine encoding */
@@ -55,13 +48,10 @@ struct bfa_sm_table {
55}; 48};
56#define BFA_SM(_sm) ((bfa_sm_t)(_sm)) 49#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
57 50
58/** 51/* State machine with entry actions. */
59 * State machine with entry actions.
60 */
61typedef void (*bfa_fsm_t)(void *fsm, int event); 52typedef void (*bfa_fsm_t)(void *fsm, int event);
62 53
63/** 54/* oc - object class eg. bfa_ioc
64 * oc - object class eg. bfa_ioc
65 * st - state, eg. reset 55 * st - state, eg. reset
66 * otype - object type, eg. struct bfa_ioc 56 * otype - object type, eg. struct bfa_ioc
67 * etype - object type, eg. enum ioc_event 57 * etype - object type, eg. enum ioc_event
@@ -90,9 +80,7 @@ bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm)
90 return smt[i].state; 80 return smt[i].state;
91} 81}
92 82
93/** 83/* Generic wait counter. */
94 * @ Generic wait counter.
95 */
96 84
97typedef void (*bfa_wc_resume_t) (void *cbarg); 85typedef void (*bfa_wc_resume_t) (void *cbarg);
98 86
@@ -116,9 +104,7 @@ bfa_wc_down(struct bfa_wc *wc)
116 wc->wc_resume(wc->wc_cbarg); 104 wc->wc_resume(wc->wc_cbarg);
117} 105}
118 106
119/** 107/* Initialize a waiting counter. */
120 * Initialize a waiting counter.
121 */
122static inline void 108static inline void
123bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg) 109bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
124{ 110{
@@ -128,9 +114,7 @@ bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
128 bfa_wc_up(wc); 114 bfa_wc_up(wc);
129} 115}
130 116
131/** 117/* Wait for counter to reach zero */
132 * Wait for counter to reach zero
133 */
134static inline void 118static inline void
135bfa_wc_wait(struct bfa_wc *wc) 119bfa_wc_wait(struct bfa_wc *wc)
136{ 120{
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index 48f87733739..e423f82da49 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -26,13 +26,9 @@
26#define BFA_STRING_32 32 26#define BFA_STRING_32 32
27#define BFA_VERSION_LEN 64 27#define BFA_VERSION_LEN 64
28 28
29/** 29/* ---------------------- adapter definitions ------------ */
30 * ---------------------- adapter definitions ------------
31 */
32 30
33/** 31/* BFA adapter level attributes. */
34 * BFA adapter level attributes.
35 */
36enum { 32enum {
37 BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE), 33 BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
38 /* 34 /*
@@ -74,18 +70,14 @@ struct bfa_adapter_attr {
74 u8 trunk_capable; 70 u8 trunk_capable;
75}; 71};
76 72
77/** 73/* ---------------------- IOC definitions ------------ */
78 * ---------------------- IOC definitions ------------
79 */
80 74
81enum { 75enum {
82 BFA_IOC_DRIVER_LEN = 16, 76 BFA_IOC_DRIVER_LEN = 16,
83 BFA_IOC_CHIP_REV_LEN = 8, 77 BFA_IOC_CHIP_REV_LEN = 8,
84}; 78};
85 79
86/** 80/* Driver and firmware versions. */
87 * Driver and firmware versions.
88 */
89struct bfa_ioc_driver_attr { 81struct bfa_ioc_driver_attr {
90 char driver[BFA_IOC_DRIVER_LEN]; /*!< driver name */ 82 char driver[BFA_IOC_DRIVER_LEN]; /*!< driver name */
91 char driver_ver[BFA_VERSION_LEN]; /*!< driver version */ 83 char driver_ver[BFA_VERSION_LEN]; /*!< driver version */
@@ -95,9 +87,7 @@ struct bfa_ioc_driver_attr {
95 char ob_ver[BFA_VERSION_LEN]; /*!< openboot version */ 87 char ob_ver[BFA_VERSION_LEN]; /*!< openboot version */
96}; 88};
97 89
98/** 90/* IOC PCI device attributes */
99 * IOC PCI device attributes
100 */
101struct bfa_ioc_pci_attr { 91struct bfa_ioc_pci_attr {
102 u16 vendor_id; /*!< PCI vendor ID */ 92 u16 vendor_id; /*!< PCI vendor ID */
103 u16 device_id; /*!< PCI device ID */ 93 u16 device_id; /*!< PCI device ID */
@@ -108,9 +98,7 @@ struct bfa_ioc_pci_attr {
108 char chip_rev[BFA_IOC_CHIP_REV_LEN]; /*!< chip revision */ 98 char chip_rev[BFA_IOC_CHIP_REV_LEN]; /*!< chip revision */
109}; 99};
110 100
111/** 101/* IOC states */
112 * IOC states
113 */
114enum bfa_ioc_state { 102enum bfa_ioc_state {
115 BFA_IOC_UNINIT = 1, /*!< IOC is in uninit state */ 103 BFA_IOC_UNINIT = 1, /*!< IOC is in uninit state */
116 BFA_IOC_RESET = 2, /*!< IOC is in reset state */ 104 BFA_IOC_RESET = 2, /*!< IOC is in reset state */
@@ -127,9 +115,7 @@ enum bfa_ioc_state {
127 BFA_IOC_HWFAIL = 13, /*!< PCI mapping doesn't exist */ 115 BFA_IOC_HWFAIL = 13, /*!< PCI mapping doesn't exist */
128}; 116};
129 117
130/** 118/* IOC firmware stats */
131 * IOC firmware stats
132 */
133struct bfa_fw_ioc_stats { 119struct bfa_fw_ioc_stats {
134 u32 enable_reqs; 120 u32 enable_reqs;
135 u32 disable_reqs; 121 u32 disable_reqs;
@@ -139,9 +125,7 @@ struct bfa_fw_ioc_stats {
139 u32 unknown_reqs; 125 u32 unknown_reqs;
140}; 126};
141 127
142/** 128/* IOC driver stats */
143 * IOC driver stats
144 */
145struct bfa_ioc_drv_stats { 129struct bfa_ioc_drv_stats {
146 u32 ioc_isrs; 130 u32 ioc_isrs;
147 u32 ioc_enables; 131 u32 ioc_enables;
@@ -157,9 +141,7 @@ struct bfa_ioc_drv_stats {
157 u32 rsvd; 141 u32 rsvd;
158}; 142};
159 143
160/** 144/* IOC statistics */
161 * IOC statistics
162 */
163struct bfa_ioc_stats { 145struct bfa_ioc_stats {
164 struct bfa_ioc_drv_stats drv_stats; /*!< driver IOC stats */ 146 struct bfa_ioc_drv_stats drv_stats; /*!< driver IOC stats */
165 struct bfa_fw_ioc_stats fw_stats; /*!< firmware IOC stats */ 147 struct bfa_fw_ioc_stats fw_stats; /*!< firmware IOC stats */
@@ -171,9 +153,7 @@ enum bfa_ioc_type {
171 BFA_IOC_TYPE_LL = 3, 153 BFA_IOC_TYPE_LL = 3,
172}; 154};
173 155
174/** 156/* IOC attributes returned in queries */
175 * IOC attributes returned in queries
176 */
177struct bfa_ioc_attr { 157struct bfa_ioc_attr {
178 enum bfa_ioc_type ioc_type; 158 enum bfa_ioc_type ioc_type;
179 enum bfa_ioc_state state; /*!< IOC state */ 159 enum bfa_ioc_state state; /*!< IOC state */
@@ -187,22 +167,16 @@ struct bfa_ioc_attr {
187 u8 rsvd[4]; /*!< 64bit align */ 167 u8 rsvd[4]; /*!< 64bit align */
188}; 168};
189 169
190/** 170/* Adapter capability mask definition */
191 * Adapter capability mask definition
192 */
193enum { 171enum {
194 BFA_CM_HBA = 0x01, 172 BFA_CM_HBA = 0x01,
195 BFA_CM_CNA = 0x02, 173 BFA_CM_CNA = 0x02,
196 BFA_CM_NIC = 0x04, 174 BFA_CM_NIC = 0x04,
197}; 175};
198 176
199/** 177/* ---------------------- mfg definitions ------------ */
200 * ---------------------- mfg definitions ------------
201 */
202 178
203/** 179/* Checksum size */
204 * Checksum size
205 */
206#define BFA_MFG_CHKSUM_SIZE 16 180#define BFA_MFG_CHKSUM_SIZE 16
207 181
208#define BFA_MFG_PARTNUM_SIZE 14 182#define BFA_MFG_PARTNUM_SIZE 14
@@ -213,8 +187,7 @@ enum {
213 187
214#pragma pack(1) 188#pragma pack(1)
215 189
216/** 190/* BFA adapter manufacturing block definition.
217 * @brief BFA adapter manufacturing block definition.
218 * 191 *
219 * All numerical fields are in big-endian format. 192 * All numerical fields are in big-endian format.
220 */ 193 */
@@ -256,9 +229,7 @@ struct bfa_mfg_block {
256 229
257#pragma pack() 230#pragma pack()
258 231
259/** 232/* ---------------------- pci definitions ------------ */
260 * ---------------------- pci definitions ------------
261 */
262 233
263/* 234/*
264 * PCI device ID information 235 * PCI device ID information
@@ -275,9 +246,7 @@ enum {
275#define bfa_asic_id_ctc(device) \ 246#define bfa_asic_id_ctc(device) \
276 (bfa_asic_id_ct(device) || bfa_asic_id_ct2(device)) 247 (bfa_asic_id_ct(device) || bfa_asic_id_ct2(device))
277 248
278/** 249/* PCI sub-system device and vendor ID information */
279 * PCI sub-system device and vendor ID information
280 */
281enum { 250enum {
282 BFA_PCI_FCOE_SSDEVICE_ID = 0x14, 251 BFA_PCI_FCOE_SSDEVICE_ID = 0x14,
283 BFA_PCI_CT2_SSID_FCoE = 0x22, 252 BFA_PCI_CT2_SSID_FCoE = 0x22,
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
index 8ab33ee2c2b..b39c5f23974 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
@@ -20,10 +20,7 @@
20 20
21#include "bfa_defs.h" 21#include "bfa_defs.h"
22 22
23/** 23/* FC physical port statistics. */
24 * @brief
25 * FC physical port statistics.
26 */
27struct bfa_port_fc_stats { 24struct bfa_port_fc_stats {
28 u64 secs_reset; /*!< Seconds since stats is reset */ 25 u64 secs_reset; /*!< Seconds since stats is reset */
29 u64 tx_frames; /*!< Tx frames */ 26 u64 tx_frames; /*!< Tx frames */
@@ -59,10 +56,7 @@ struct bfa_port_fc_stats {
59 u64 bbsc_link_resets; /*!< Credit Recovery-Link Resets */ 56 u64 bbsc_link_resets; /*!< Credit Recovery-Link Resets */
60}; 57};
61 58
62/** 59/* Eth Physical Port statistics. */
63 * @brief
64 * Eth Physical Port statistics.
65 */
66struct bfa_port_eth_stats { 60struct bfa_port_eth_stats {
67 u64 secs_reset; /*!< Seconds since stats is reset */ 61 u64 secs_reset; /*!< Seconds since stats is reset */
68 u64 frame_64; /*!< Frames 64 bytes */ 62 u64 frame_64; /*!< Frames 64 bytes */
@@ -108,10 +102,7 @@ struct bfa_port_eth_stats {
108 u64 tx_iscsi_zero_pause; /*!< Tx iSCSI zero pause */ 102 u64 tx_iscsi_zero_pause; /*!< Tx iSCSI zero pause */
109}; 103};
110 104
111/** 105/* Port statistics. */
112 * @brief
113 * Port statistics.
114 */
115union bfa_port_stats_u { 106union bfa_port_stats_u {
116 struct bfa_port_fc_stats fc; 107 struct bfa_port_fc_stats fc;
117 struct bfa_port_eth_stats eth; 108 struct bfa_port_eth_stats eth;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
index 6681fe87c1e..7fb396fe679 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
@@ -20,33 +20,23 @@
20 20
21#include "bfa_defs.h" 21#include "bfa_defs.h"
22 22
23/** 23/* Manufacturing block version */
24 * Manufacturing block version
25 */
26#define BFA_MFG_VERSION 3 24#define BFA_MFG_VERSION 3
27#define BFA_MFG_VERSION_UNINIT 0xFF 25#define BFA_MFG_VERSION_UNINIT 0xFF
28 26
29/** 27/* Manufacturing block encrypted version */
30 * Manufacturing block encrypted version
31 */
32#define BFA_MFG_ENC_VER 2 28#define BFA_MFG_ENC_VER 2
33 29
34/** 30/* Manufacturing block version 1 length */
35 * Manufacturing block version 1 length
36 */
37#define BFA_MFG_VER1_LEN 128 31#define BFA_MFG_VER1_LEN 128
38 32
39/** 33/* Manufacturing block header length */
40 * Manufacturing block header length
41 */
42#define BFA_MFG_HDR_LEN 4 34#define BFA_MFG_HDR_LEN 4
43 35
44#define BFA_MFG_SERIALNUM_SIZE 11 36#define BFA_MFG_SERIALNUM_SIZE 11
45#define STRSZ(_n) (((_n) + 4) & ~3) 37#define STRSZ(_n) (((_n) + 4) & ~3)
46 38
47/** 39/* Manufacturing card type */
48 * Manufacturing card type
49 */
50enum { 40enum {
51 BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */ 41 BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */
52 BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */ 42 BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */
@@ -70,9 +60,7 @@ enum {
70 60
71#pragma pack(1) 61#pragma pack(1)
72 62
73/** 63/* Check if Mezz card */
74 * Check if Mezz card
75 */
76#define bfa_mfg_is_mezz(type) (( \ 64#define bfa_mfg_is_mezz(type) (( \
77 (type) == BFA_MFG_TYPE_JAYHAWK || \ 65 (type) == BFA_MFG_TYPE_JAYHAWK || \
78 (type) == BFA_MFG_TYPE_WANCHESE || \ 66 (type) == BFA_MFG_TYPE_WANCHESE || \
@@ -127,9 +115,7 @@ do { \
127 } \ 115 } \
128} while (0) 116} while (0)
129 117
130/** 118/* VPD data length */
131 * VPD data length
132 */
133#define BFA_MFG_VPD_LEN 512 119#define BFA_MFG_VPD_LEN 512
134#define BFA_MFG_VPD_LEN_INVALID 0 120#define BFA_MFG_VPD_LEN_INVALID 0
135 121
@@ -137,9 +123,7 @@ do { \
137#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */ 123#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */
138#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */ 124#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */
139 125
140/** 126/* VPD vendor tag */
141 * VPD vendor tag
142 */
143enum { 127enum {
144 BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */ 128 BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */
145 BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */ 129 BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */
@@ -151,8 +135,7 @@ enum {
151 BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */ 135 BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */
152}; 136};
153 137
154/** 138/* BFA adapter flash vpd data definition.
155 * @brief BFA adapter flash vpd data definition.
156 * 139 *
157 * All numerical fields are in big-endian format. 140 * All numerical fields are in big-endian format.
158 */ 141 */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
index 7c5fe6c2e80..ea9af9ae754 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
@@ -18,8 +18,7 @@
18#ifndef __BFA_DEFS_STATUS_H__ 18#ifndef __BFA_DEFS_STATUS_H__
19#define __BFA_DEFS_STATUS_H__ 19#define __BFA_DEFS_STATUS_H__
20 20
21/** 21/* API status return values
22 * API status return values
23 * 22 *
24 * NOTE: The error msgs are auto generated from the comments. Only singe line 23 * NOTE: The error msgs are auto generated from the comments. Only singe line
25 * comments are supported 24 * comments are supported
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 0b640fafbda..959c58ef972 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -20,13 +20,9 @@
20#include "bfi_reg.h" 20#include "bfi_reg.h"
21#include "bfa_defs.h" 21#include "bfa_defs.h"
22 22
23/** 23/* IOC local definitions */
24 * IOC local definitions
25 */
26 24
27/** 25/* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
28 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
29 */
30 26
31#define bfa_ioc_firmware_lock(__ioc) \ 27#define bfa_ioc_firmware_lock(__ioc) \
32 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) 28 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
@@ -96,9 +92,7 @@ static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
96static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model); 92static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
97static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc); 93static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
98 94
99/** 95/* IOC state machine definitions/declarations */
100 * IOC state machine definitions/declarations
101 */
102enum ioc_event { 96enum ioc_event {
103 IOC_E_RESET = 1, /*!< IOC reset request */ 97 IOC_E_RESET = 1, /*!< IOC reset request */
104 IOC_E_ENABLE = 2, /*!< IOC enable request */ 98 IOC_E_ENABLE = 2, /*!< IOC enable request */
@@ -148,9 +142,7 @@ static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
148static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc); 142static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
149static void bfa_iocpf_stop(struct bfa_ioc *ioc); 143static void bfa_iocpf_stop(struct bfa_ioc *ioc);
150 144
151/** 145/* IOCPF state machine events */
152 * IOCPF state machine events
153 */
154enum iocpf_event { 146enum iocpf_event {
155 IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */ 147 IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
156 IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */ 148 IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
@@ -166,9 +158,7 @@ enum iocpf_event {
166 IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */ 158 IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */
167}; 159};
168 160
169/** 161/* IOCPF states */
170 * IOCPF states
171 */
172enum bfa_iocpf_state { 162enum bfa_iocpf_state {
173 BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */ 163 BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */
174 BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */ 164 BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
@@ -215,21 +205,15 @@ static struct bfa_sm_table iocpf_sm_table[] = {
215 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, 205 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
216}; 206};
217 207
218/** 208/* IOC State Machine */
219 * IOC State Machine
220 */
221 209
222/** 210/* Beginning state. IOC uninit state. */
223 * Beginning state. IOC uninit state.
224 */
225static void 211static void
226bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc) 212bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
227{ 213{
228} 214}
229 215
230/** 216/* IOC is in uninit state. */
231 * IOC is in uninit state.
232 */
233static void 217static void
234bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event) 218bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
235{ 219{
@@ -243,18 +227,14 @@ bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
243 } 227 }
244} 228}
245 229
246/** 230/* Reset entry actions -- initialize state machine */
247 * Reset entry actions -- initialize state machine
248 */
249static void 231static void
250bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc) 232bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
251{ 233{
252 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); 234 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
253} 235}
254 236
255/** 237/* IOC is in reset state. */
256 * IOC is in reset state.
257 */
258static void 238static void
259bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) 239bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
260{ 240{
@@ -282,8 +262,7 @@ bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
282 bfa_iocpf_enable(ioc); 262 bfa_iocpf_enable(ioc);
283} 263}
284 264
285/** 265/* Host IOC function is being enabled, awaiting response from firmware.
286 * Host IOC function is being enabled, awaiting response from firmware.
287 * Semaphore is acquired. 266 * Semaphore is acquired.
288 */ 267 */
289static void 268static void
@@ -325,9 +304,7 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
325 } 304 }
326} 305}
327 306
328/** 307/* Semaphore should be acquired for version check. */
329 * Semaphore should be acquired for version check.
330 */
331static void 308static void
332bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc) 309bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
333{ 310{
@@ -336,9 +313,7 @@ bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
336 bfa_ioc_send_getattr(ioc); 313 bfa_ioc_send_getattr(ioc);
337} 314}
338 315
339/** 316/* IOC configuration in progress. Timer is active. */
340 * IOC configuration in progress. Timer is active.
341 */
342static void 317static void
343bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) 318bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
344{ 319{
@@ -419,9 +394,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
419 bfa_iocpf_disable(ioc); 394 bfa_iocpf_disable(ioc);
420} 395}
421 396
422/** 397/* IOC is being disabled */
423 * IOC is being disabled
424 */
425static void 398static void
426bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) 399bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
427{ 400{
@@ -449,9 +422,7 @@ bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
449 } 422 }
450} 423}
451 424
452/** 425/* IOC disable completion entry. */
453 * IOC disable completion entry.
454 */
455static void 426static void
456bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc) 427bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
457{ 428{
@@ -485,9 +456,7 @@ bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
485{ 456{
486} 457}
487 458
488/** 459/* Hardware initialization retry. */
489 * Hardware initialization retry.
490 */
491static void 460static void
492bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event) 461bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
493{ 462{
@@ -534,9 +503,7 @@ bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
534{ 503{
535} 504}
536 505
537/** 506/* IOC failure. */
538 * IOC failure.
539 */
540static void 507static void
541bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event) 508bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
542{ 509{
@@ -568,9 +535,7 @@ bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
568{ 535{
569} 536}
570 537
571/** 538/* IOC failure. */
572 * IOC failure.
573 */
574static void 539static void
575bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event) 540bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
576{ 541{
@@ -593,13 +558,9 @@ bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
593 } 558 }
594} 559}
595 560
596/** 561/* IOCPF State Machine */
597 * IOCPF State Machine
598 */
599 562
600/** 563/* Reset entry actions -- initialize state machine */
601 * Reset entry actions -- initialize state machine
602 */
603static void 564static void
604bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf) 565bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
605{ 566{
@@ -607,9 +568,7 @@ bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
607 iocpf->auto_recover = bfa_nw_auto_recover; 568 iocpf->auto_recover = bfa_nw_auto_recover;
608} 569}
609 570
610/** 571/* Beginning state. IOC is in reset state. */
611 * Beginning state. IOC is in reset state.
612 */
613static void 572static void
614bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event) 573bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
615{ 574{
@@ -626,9 +585,7 @@ bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
626 } 585 }
627} 586}
628 587
629/** 588/* Semaphore should be acquired for version check. */
630 * Semaphore should be acquired for version check.
631 */
632static void 589static void
633bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf) 590bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
634{ 591{
@@ -636,9 +593,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
636 bfa_ioc_hw_sem_get(iocpf->ioc); 593 bfa_ioc_hw_sem_get(iocpf->ioc);
637} 594}
638 595
639/** 596/* Awaiting h/w semaphore to continue with version check. */
640 * Awaiting h/w semaphore to continue with version check.
641 */
642static void 597static void
643bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event) 598bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
644{ 599{
@@ -683,9 +638,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
683 } 638 }
684} 639}
685 640
686/** 641/* Notify enable completion callback */
687 * Notify enable completion callback
688 */
689static void 642static void
690bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf) 643bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
691{ 644{
@@ -698,9 +651,7 @@ bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
698 msecs_to_jiffies(BFA_IOC_TOV)); 651 msecs_to_jiffies(BFA_IOC_TOV));
699} 652}
700 653
701/** 654/* Awaiting firmware version match. */
702 * Awaiting firmware version match.
703 */
704static void 655static void
705bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event) 656bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
706{ 657{
@@ -727,18 +678,14 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
727 } 678 }
728} 679}
729 680
730/** 681/* Request for semaphore. */
731 * Request for semaphore.
732 */
733static void 682static void
734bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf) 683bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
735{ 684{
736 bfa_ioc_hw_sem_get(iocpf->ioc); 685 bfa_ioc_hw_sem_get(iocpf->ioc);
737} 686}
738 687
739/** 688/* Awaiting semaphore for h/w initialzation. */
740 * Awaiting semaphore for h/w initialzation.
741 */
742static void 689static void
743bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event) 690bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
744{ 691{
@@ -778,8 +725,7 @@ bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
778 bfa_ioc_reset(iocpf->ioc, false); 725 bfa_ioc_reset(iocpf->ioc, false);
779} 726}
780 727
781/** 728/* Hardware is being initialized. Interrupts are enabled.
782 * Hardware is being initialized. Interrupts are enabled.
783 * Holding hardware semaphore lock. 729 * Holding hardware semaphore lock.
784 */ 730 */
785static void 731static void
@@ -822,8 +768,7 @@ bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
822 bfa_ioc_send_enable(iocpf->ioc); 768 bfa_ioc_send_enable(iocpf->ioc);
823} 769}
824 770
825/** 771/* Host IOC function is being enabled, awaiting response from firmware.
826 * Host IOC function is being enabled, awaiting response from firmware.
827 * Semaphore is acquired. 772 * Semaphore is acquired.
828 */ 773 */
829static void 774static void
@@ -896,9 +841,7 @@ bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
896 bfa_ioc_send_disable(iocpf->ioc); 841 bfa_ioc_send_disable(iocpf->ioc);
897} 842}
898 843
899/** 844/* IOC is being disabled */
900 * IOC is being disabled
901 */
902static void 845static void
903bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event) 846bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
904{ 847{
@@ -935,9 +878,7 @@ bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
935 bfa_ioc_hw_sem_get(iocpf->ioc); 878 bfa_ioc_hw_sem_get(iocpf->ioc);
936} 879}
937 880
938/** 881/* IOC hb ack request is being removed. */
939 * IOC hb ack request is being removed.
940 */
941static void 882static void
942bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 883bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
943{ 884{
@@ -963,9 +904,7 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
963 } 904 }
964} 905}
965 906
966/** 907/* IOC disable completion entry. */
967 * IOC disable completion entry.
968 */
969static void 908static void
970bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf) 909bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
971{ 910{
@@ -1000,9 +939,7 @@ bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
1000 bfa_ioc_hw_sem_get(iocpf->ioc); 939 bfa_ioc_hw_sem_get(iocpf->ioc);
1001} 940}
1002 941
1003/** 942/* Hardware initialization failed. */
1004 * Hardware initialization failed.
1005 */
1006static void 943static void
1007bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 944bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1008{ 945{
@@ -1046,9 +983,7 @@ bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
1046{ 983{
1047} 984}
1048 985
1049/** 986/* Hardware initialization failed. */
1050 * Hardware initialization failed.
1051 */
1052static void 987static void
1053bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event) 988bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1054{ 989{
@@ -1084,9 +1019,7 @@ bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1084 bfa_ioc_hw_sem_get(iocpf->ioc); 1019 bfa_ioc_hw_sem_get(iocpf->ioc);
1085} 1020}
1086 1021
1087/** 1022/* IOC is in failed state. */
1088 * IOC is in failed state.
1089 */
1090static void 1023static void
1091bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 1024bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1092{ 1025{
@@ -1134,10 +1067,7 @@ bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
1134{ 1067{
1135} 1068}
1136 1069
1137/** 1070/* IOC is in failed state. */
1138 * @brief
1139 * IOC is in failed state.
1140 */
1141static void 1071static void
1142bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event) 1072bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1143{ 1073{
@@ -1151,13 +1081,9 @@ bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1151 } 1081 }
1152} 1082}
1153 1083
1154/** 1084/* BFA IOC private functions */
1155 * BFA IOC private functions
1156 */
1157 1085
1158/** 1086/* Notify common modules registered for notification. */
1159 * Notify common modules registered for notification.
1160 */
1161static void 1087static void
1162bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event) 1088bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
1163{ 1089{
@@ -1298,10 +1224,7 @@ bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
1298 del_timer(&ioc->sem_timer); 1224 del_timer(&ioc->sem_timer);
1299} 1225}
1300 1226
1301/** 1227/* Initialize LPU local memory (aka secondary memory / SRAM) */
1302 * @brief
1303 * Initialize LPU local memory (aka secondary memory / SRAM)
1304 */
1305static void 1228static void
1306bfa_ioc_lmem_init(struct bfa_ioc *ioc) 1229bfa_ioc_lmem_init(struct bfa_ioc *ioc)
1307{ 1230{
@@ -1366,9 +1289,7 @@ bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1366 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1289 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1367} 1290}
1368 1291
1369/** 1292/* Get driver and firmware versions. */
1370 * Get driver and firmware versions.
1371 */
1372void 1293void
1373bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) 1294bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1374{ 1295{
@@ -1388,9 +1309,7 @@ bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1388 } 1309 }
1389} 1310}
1390 1311
1391/** 1312/* Returns TRUE if same. */
1392 * Returns TRUE if same.
1393 */
1394bool 1313bool
1395bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) 1314bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1396{ 1315{
@@ -1408,8 +1327,7 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1408 return true; 1327 return true;
1409} 1328}
1410 1329
1411/** 1330/* Return true if current running version is valid. Firmware signature and
1412 * Return true if current running version is valid. Firmware signature and
1413 * execution context (driver/bios) must match. 1331 * execution context (driver/bios) must match.
1414 */ 1332 */
1415static bool 1333static bool
@@ -1430,9 +1348,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
1430 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr); 1348 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
1431} 1349}
1432 1350
1433/** 1351/* Conditionally flush any pending message from firmware at start. */
1434 * Conditionally flush any pending message from firmware at start.
1435 */
1436static void 1352static void
1437bfa_ioc_msgflush(struct bfa_ioc *ioc) 1353bfa_ioc_msgflush(struct bfa_ioc *ioc)
1438{ 1354{
@@ -1443,9 +1359,6 @@ bfa_ioc_msgflush(struct bfa_ioc *ioc)
1443 writel(1, ioc->ioc_regs.lpu_mbox_cmd); 1359 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1444} 1360}
1445 1361
1446/**
1447 * @img ioc_init_logic.jpg
1448 */
1449static void 1362static void
1450bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) 1363bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1451{ 1364{
@@ -1603,10 +1516,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1603 del_timer(&ioc->hb_timer); 1516 del_timer(&ioc->hb_timer);
1604} 1517}
1605 1518
1606/** 1519/* Initiate a full firmware download. */
1607 * @brief
1608 * Initiate a full firmware download.
1609 */
1610static void 1520static void
1611bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, 1521bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1612 u32 boot_env) 1522 u32 boot_env)
@@ -1672,9 +1582,7 @@ bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1672 bfa_ioc_hwinit(ioc, force); 1582 bfa_ioc_hwinit(ioc, force);
1673} 1583}
1674 1584
1675/** 1585/* BFA ioc enable reply by firmware */
1676 * BFA ioc enable reply by firmware
1677 */
1678static void 1586static void
1679bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode, 1587bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
1680 u8 cap_bm) 1588 u8 cap_bm)
@@ -1686,10 +1594,7 @@ bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
1686 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); 1594 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1687} 1595}
1688 1596
1689/** 1597/* Update BFA configuration from firmware configuration. */
1690 * @brief
1691 * Update BFA configuration from firmware configuration.
1692 */
1693static void 1598static void
1694bfa_ioc_getattr_reply(struct bfa_ioc *ioc) 1599bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1695{ 1600{
@@ -1702,9 +1607,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1702 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); 1607 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1703} 1608}
1704 1609
1705/** 1610/* Attach time initialization of mbox logic. */
1706 * Attach time initialization of mbox logic.
1707 */
1708static void 1611static void
1709bfa_ioc_mbox_attach(struct bfa_ioc *ioc) 1612bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1710{ 1613{
@@ -1718,9 +1621,7 @@ bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1718 } 1621 }
1719} 1622}
1720 1623
1721/** 1624/* Mbox poll timer -- restarts any pending mailbox requests. */
1722 * Mbox poll timer -- restarts any pending mailbox requests.
1723 */
1724static void 1625static void
1725bfa_ioc_mbox_poll(struct bfa_ioc *ioc) 1626bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1726{ 1627{
@@ -1760,9 +1661,7 @@ bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1760 } 1661 }
1761} 1662}
1762 1663
1763/** 1664/* Cleanup any pending requests. */
1764 * Cleanup any pending requests.
1765 */
1766static void 1665static void
1767bfa_ioc_mbox_flush(struct bfa_ioc *ioc) 1666bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
1768{ 1667{
@@ -1774,12 +1673,12 @@ bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
1774} 1673}
1775 1674
1776/** 1675/**
1777 * Read data from SMEM to host through PCI memmap 1676 * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap
1778 * 1677 *
1779 * @param[in] ioc memory for IOC 1678 * @ioc: memory for IOC
1780 * @param[in] tbuf app memory to store data from smem 1679 * @tbuf: app memory to store data from smem
1781 * @param[in] soff smem offset 1680 * @soff: smem offset
1782 * @param[in] sz size of smem in bytes 1681 * @sz: size of smem in bytes
1783 */ 1682 */
1784static int 1683static int
1785bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz) 1684bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
@@ -1826,9 +1725,7 @@ bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
1826 return 0; 1725 return 0;
1827} 1726}
1828 1727
1829/** 1728/* Retrieve saved firmware trace from a prior IOC failure. */
1830 * Retrieve saved firmware trace from a prior IOC failure.
1831 */
1832int 1729int
1833bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen) 1730bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
1834{ 1731{
@@ -1844,9 +1741,7 @@ bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
1844 return status; 1741 return status;
1845} 1742}
1846 1743
1847/** 1744/* Save firmware trace if configured. */
1848 * Save firmware trace if configured.
1849 */
1850static void 1745static void
1851bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc) 1746bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
1852{ 1747{
@@ -1861,9 +1756,7 @@ bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
1861 } 1756 }
1862} 1757}
1863 1758
1864/** 1759/* Retrieve saved firmware trace from a prior IOC failure. */
1865 * Retrieve saved firmware trace from a prior IOC failure.
1866 */
1867int 1760int
1868bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen) 1761bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
1869{ 1762{
@@ -1892,9 +1785,7 @@ bfa_ioc_fail_notify(struct bfa_ioc *ioc)
1892 bfa_nw_ioc_debug_save_ftrc(ioc); 1785 bfa_nw_ioc_debug_save_ftrc(ioc);
1893} 1786}
1894 1787
1895/** 1788/* IOCPF to IOC interface */
1896 * IOCPF to IOC interface
1897 */
1898static void 1789static void
1899bfa_ioc_pf_enabled(struct bfa_ioc *ioc) 1790bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
1900{ 1791{
@@ -1928,9 +1819,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
1928 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 1819 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1929} 1820}
1930 1821
1931/** 1822/* IOC public */
1932 * IOC public
1933 */
1934static enum bfa_status 1823static enum bfa_status
1935bfa_ioc_pll_init(struct bfa_ioc *ioc) 1824bfa_ioc_pll_init(struct bfa_ioc *ioc)
1936{ 1825{
@@ -1954,8 +1843,7 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
1954 return BFA_STATUS_OK; 1843 return BFA_STATUS_OK;
1955} 1844}
1956 1845
1957/** 1846/* Interface used by diag module to do firmware boot with memory test
1958 * Interface used by diag module to do firmware boot with memory test
1959 * as the entry vector. 1847 * as the entry vector.
1960 */ 1848 */
1961static void 1849static void
@@ -1983,9 +1871,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
1983 bfa_ioc_lpu_start(ioc); 1871 bfa_ioc_lpu_start(ioc);
1984} 1872}
1985 1873
1986/** 1874/* Enable/disable IOC failure auto recovery. */
1987 * Enable/disable IOC failure auto recovery.
1988 */
1989void 1875void
1990bfa_nw_ioc_auto_recover(bool auto_recover) 1876bfa_nw_ioc_auto_recover(bool auto_recover)
1991{ 1877{
@@ -2056,10 +1942,10 @@ bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
2056} 1942}
2057 1943
2058/** 1944/**
2059 * IOC attach time initialization and setup. 1945 * bfa_nw_ioc_attach - IOC attach time initialization and setup.
2060 * 1946 *
2061 * @param[in] ioc memory for IOC 1947 * @ioc: memory for IOC
2062 * @param[in] bfa driver instance structure 1948 * @bfa: driver instance structure
2063 */ 1949 */
2064void 1950void
2065bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn) 1951bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
@@ -2078,9 +1964,7 @@ bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
2078 bfa_fsm_send_event(ioc, IOC_E_RESET); 1964 bfa_fsm_send_event(ioc, IOC_E_RESET);
2079} 1965}
2080 1966
2081/** 1967/* Driver detach time IOC cleanup. */
2082 * Driver detach time IOC cleanup.
2083 */
2084void 1968void
2085bfa_nw_ioc_detach(struct bfa_ioc *ioc) 1969bfa_nw_ioc_detach(struct bfa_ioc *ioc)
2086{ 1970{
@@ -2091,9 +1975,9 @@ bfa_nw_ioc_detach(struct bfa_ioc *ioc)
2091} 1975}
2092 1976
2093/** 1977/**
2094 * Setup IOC PCI properties. 1978 * bfa_nw_ioc_pci_init - Setup IOC PCI properties.
2095 * 1979 *
2096 * @param[in] pcidev PCI device information for this IOC 1980 * @pcidev: PCI device information for this IOC
2097 */ 1981 */
2098void 1982void
2099bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, 1983bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
@@ -2160,10 +2044,10 @@ bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
2160} 2044}
2161 2045
2162/** 2046/**
2163 * Initialize IOC dma memory 2047 * bfa_nw_ioc_mem_claim - Initialize IOC dma memory
2164 * 2048 *
2165 * @param[in] dm_kva kernel virtual address of IOC dma memory 2049 * @dm_kva: kernel virtual address of IOC dma memory
2166 * @param[in] dm_pa physical address of IOC dma memory 2050 * @dm_pa: physical address of IOC dma memory
2167 */ 2051 */
2168void 2052void
2169bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa) 2053bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
@@ -2176,9 +2060,7 @@ bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
2176 ioc->attr = (struct bfi_ioc_attr *) dm_kva; 2060 ioc->attr = (struct bfi_ioc_attr *) dm_kva;
2177} 2061}
2178 2062
2179/** 2063/* Return size of dma memory required. */
2180 * Return size of dma memory required.
2181 */
2182u32 2064u32
2183bfa_nw_ioc_meminfo(void) 2065bfa_nw_ioc_meminfo(void)
2184{ 2066{
@@ -2201,9 +2083,7 @@ bfa_nw_ioc_disable(struct bfa_ioc *ioc)
2201 bfa_fsm_send_event(ioc, IOC_E_DISABLE); 2083 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2202} 2084}
2203 2085
2204/** 2086/* Initialize memory for saving firmware trace. */
2205 * Initialize memory for saving firmware trace.
2206 */
2207void 2087void
2208bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave) 2088bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
2209{ 2089{
@@ -2217,9 +2097,7 @@ bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
2217 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr); 2097 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
2218} 2098}
2219 2099
2220/** 2100/* Register mailbox message handler function, to be called by common modules */
2221 * Register mailbox message handler function, to be called by common modules
2222 */
2223void 2101void
2224bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, 2102bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
2225 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) 2103 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
@@ -2231,11 +2109,12 @@ bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
2231} 2109}
2232 2110
2233/** 2111/**
2234 * Queue a mailbox command request to firmware. Waits if mailbox is busy. 2112 * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware.
2235 * Responsibility of caller to serialize
2236 * 2113 *
2237 * @param[in] ioc IOC instance 2114 * @ioc: IOC instance
2238 * @param[i] cmd Mailbox command 2115 * @cmd: Mailbox command
2116 *
2117 * Waits if mailbox is busy. Responsibility of caller to serialize
2239 */ 2118 */
2240bool 2119bool
2241bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd, 2120bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
@@ -2272,9 +2151,7 @@ bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
2272 return false; 2151 return false;
2273} 2152}
2274 2153
2275/** 2154/* Handle mailbox interrupts */
2276 * Handle mailbox interrupts
2277 */
2278void 2155void
2279bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc) 2156bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
2280{ 2157{
@@ -2314,9 +2191,7 @@ bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
2314 bfa_fsm_send_event(ioc, IOC_E_HWERROR); 2191 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2315} 2192}
2316 2193
2317/** 2194/* return true if IOC is disabled */
2318 * return true if IOC is disabled
2319 */
2320bool 2195bool
2321bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc) 2196bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
2322{ 2197{
@@ -2324,17 +2199,14 @@ bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
2324 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); 2199 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2325} 2200}
2326 2201
2327/** 2202/* return true if IOC is operational */
2328 * return true if IOC is operational
2329 */
2330bool 2203bool
2331bfa_nw_ioc_is_operational(struct bfa_ioc *ioc) 2204bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
2332{ 2205{
2333 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); 2206 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2334} 2207}
2335 2208
2336/** 2209/* Add to IOC heartbeat failure notification queue. To be used by common
2337 * Add to IOC heartbeat failure notification queue. To be used by common
2338 * modules such as cee, port, diag. 2210 * modules such as cee, port, diag.
2339 */ 2211 */
2340void 2212void
@@ -2518,9 +2390,7 @@ bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2518 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); 2390 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2519} 2391}
2520 2392
2521/** 2393/* WWN public */
2522 * WWN public
2523 */
2524static u64 2394static u64
2525bfa_ioc_get_pwwn(struct bfa_ioc *ioc) 2395bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
2526{ 2396{
@@ -2533,9 +2403,7 @@ bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
2533 return ioc->attr->mac; 2403 return ioc->attr->mac;
2534} 2404}
2535 2405
2536/** 2406/* Firmware failure detected. Start recovery actions. */
2537 * Firmware failure detected. Start recovery actions.
2538 */
2539static void 2407static void
2540bfa_ioc_recover(struct bfa_ioc *ioc) 2408bfa_ioc_recover(struct bfa_ioc *ioc)
2541{ 2409{
@@ -2545,10 +2413,7 @@ bfa_ioc_recover(struct bfa_ioc *ioc)
2545 bfa_fsm_send_event(ioc, IOC_E_HBFAIL); 2413 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2546} 2414}
2547 2415
2548/** 2416/* BFA IOC PF private functions */
2549 * @dg hal_iocpf_pvt BFA IOC PF private functions
2550 * @{
2551 */
2552 2417
2553static void 2418static void
2554bfa_iocpf_enable(struct bfa_ioc *ioc) 2419bfa_iocpf_enable(struct bfa_ioc *ioc)
@@ -2669,8 +2534,6 @@ bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
2669 2534
2670/* 2535/*
2671 * Send flash write request. 2536 * Send flash write request.
2672 *
2673 * @param[in] cbarg - callback argument
2674 */ 2537 */
2675static void 2538static void
2676bfa_flash_write_send(struct bfa_flash *flash) 2539bfa_flash_write_send(struct bfa_flash *flash)
@@ -2699,10 +2562,10 @@ bfa_flash_write_send(struct bfa_flash *flash)
2699 flash->offset += len; 2562 flash->offset += len;
2700} 2563}
2701 2564
2702/* 2565/**
2703 * Send flash read request. 2566 * bfa_flash_read_send - Send flash read request.
2704 * 2567 *
2705 * @param[in] cbarg - callback argument 2568 * @cbarg: callback argument
2706 */ 2569 */
2707static void 2570static void
2708bfa_flash_read_send(void *cbarg) 2571bfa_flash_read_send(void *cbarg)
@@ -2724,11 +2587,11 @@ bfa_flash_read_send(void *cbarg)
2724 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); 2587 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2725} 2588}
2726 2589
2727/* 2590/**
2728 * Process flash response messages upon receiving interrupts. 2591 * bfa_flash_intr - Process flash response messages upon receiving interrupts.
2729 * 2592 *
2730 * @param[in] flasharg - flash structure 2593 * @flasharg: flash structure
2731 * @param[in] msg - message structure 2594 * @msg: message structure
2732 */ 2595 */
2733static void 2596static void
2734bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg) 2597bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
@@ -2821,12 +2684,12 @@ bfa_nw_flash_meminfo(void)
2821 return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); 2684 return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
2822} 2685}
2823 2686
2824/* 2687/**
2825 * Flash attach API. 2688 * bfa_nw_flash_attach - Flash attach API.
2826 * 2689 *
2827 * @param[in] flash - flash structure 2690 * @flash: flash structure
2828 * @param[in] ioc - ioc structure 2691 * @ioc: ioc structure
2829 * @param[in] dev - device structure 2692 * @dev: device structure
2830 */ 2693 */
2831void 2694void
2832bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev) 2695bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
@@ -2842,12 +2705,12 @@ bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
2842 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q); 2705 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
2843} 2706}
2844 2707
2845/* 2708/**
2846 * Claim memory for flash 2709 * bfa_nw_flash_memclaim - Claim memory for flash
2847 * 2710 *
2848 * @param[in] flash - flash structure 2711 * @flash: flash structure
2849 * @param[in] dm_kva - pointer to virtual memory address 2712 * @dm_kva: pointer to virtual memory address
2850 * @param[in] dm_pa - physical memory address 2713 * @dm_pa: physical memory address
2851 */ 2714 */
2852void 2715void
2853bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa) 2716bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
@@ -2859,13 +2722,13 @@ bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
2859 dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); 2722 dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
2860} 2723}
2861 2724
2862/* 2725/**
2863 * Get flash attribute. 2726 * bfa_nw_flash_get_attr - Get flash attribute.
2864 * 2727 *
2865 * @param[in] flash - flash structure 2728 * @flash: flash structure
2866 * @param[in] attr - flash attribute structure 2729 * @attr: flash attribute structure
2867 * @param[in] cbfn - callback function 2730 * @cbfn: callback function
2868 * @param[in] cbarg - callback argument 2731 * @cbarg: callback argument
2869 * 2732 *
2870 * Return status. 2733 * Return status.
2871 */ 2734 */
@@ -2895,17 +2758,17 @@ bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
2895 return BFA_STATUS_OK; 2758 return BFA_STATUS_OK;
2896} 2759}
2897 2760
2898/* 2761/**
2899 * Update flash partition. 2762 * bfa_nw_flash_update_part - Update flash partition.
2900 * 2763 *
2901 * @param[in] flash - flash structure 2764 * @flash: flash structure
2902 * @param[in] type - flash partition type 2765 * @type: flash partition type
2903 * @param[in] instance - flash partition instance 2766 * @instance: flash partition instance
2904 * @param[in] buf - update data buffer 2767 * @buf: update data buffer
2905 * @param[in] len - data buffer length 2768 * @len: data buffer length
2906 * @param[in] offset - offset relative to the partition starting address 2769 * @offset: offset relative to the partition starting address
2907 * @param[in] cbfn - callback function 2770 * @cbfn: callback function
2908 * @param[in] cbarg - callback argument 2771 * @cbarg: callback argument
2909 * 2772 *
2910 * Return status. 2773 * Return status.
2911 */ 2774 */
@@ -2944,17 +2807,17 @@ bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
2944 return BFA_STATUS_OK; 2807 return BFA_STATUS_OK;
2945} 2808}
2946 2809
2947/* 2810/**
2948 * Read flash partition. 2811 * bfa_nw_flash_read_part - Read flash partition.
2949 * 2812 *
2950 * @param[in] flash - flash structure 2813 * @flash: flash structure
2951 * @param[in] type - flash partition type 2814 * @type: flash partition type
2952 * @param[in] instance - flash partition instance 2815 * @instance: flash partition instance
2953 * @param[in] buf - read data buffer 2816 * @buf: read data buffer
2954 * @param[in] len - data buffer length 2817 * @len: data buffer length
2955 * @param[in] offset - offset relative to the partition starting address 2818 * @offset: offset relative to the partition starting address
2956 * @param[in] cbfn - callback function 2819 * @cbfn: callback function
2957 * @param[in] cbarg - callback argument 2820 * @cbarg: callback argument
2958 * 2821 *
2959 * Return status. 2822 * Return status.
2960 */ 2823 */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index 3b4460fdc14..63a85e555df 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -30,9 +30,7 @@
30#define BNA_DBG_FWTRC_LEN (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \ 30#define BNA_DBG_FWTRC_LEN (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \
31 BFI_IOC_TRC_HDR_SZ) 31 BFI_IOC_TRC_HDR_SZ)
32 32
33/** 33/* PCI device information required by IOC */
34 * PCI device information required by IOC
35 */
36struct bfa_pcidev { 34struct bfa_pcidev {
37 int pci_slot; 35 int pci_slot;
38 u8 pci_func; 36 u8 pci_func;
@@ -41,8 +39,7 @@ struct bfa_pcidev {
41 void __iomem *pci_bar_kva; 39 void __iomem *pci_bar_kva;
42}; 40};
43 41
44/** 42/* Structure used to remember the DMA-able memory block's KVA and Physical
45 * Structure used to remember the DMA-able memory block's KVA and Physical
46 * Address 43 * Address
47 */ 44 */
48struct bfa_dma { 45struct bfa_dma {
@@ -52,15 +49,11 @@ struct bfa_dma {
52 49
53#define BFA_DMA_ALIGN_SZ 256 50#define BFA_DMA_ALIGN_SZ 256
54 51
55/** 52/* smem size for Crossbow and Catapult */
56 * smem size for Crossbow and Catapult
57 */
58#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */ 53#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
59#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */ 54#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
60 55
61/** 56/* BFA dma address assignment macro. (big endian format) */
62 * @brief BFA dma address assignment macro. (big endian format)
63 */
64#define bfa_dma_be_addr_set(dma_addr, pa) \ 57#define bfa_dma_be_addr_set(dma_addr, pa) \
65 __bfa_dma_be_addr_set(&dma_addr, (u64)pa) 58 __bfa_dma_be_addr_set(&dma_addr, (u64)pa)
66static inline void 59static inline void
@@ -108,9 +101,7 @@ struct bfa_ioc_regs {
108 u32 smem_pg0; 101 u32 smem_pg0;
109}; 102};
110 103
111/** 104/* IOC Mailbox structures */
112 * IOC Mailbox structures
113 */
114typedef void (*bfa_mbox_cmd_cbfn_t)(void *cbarg); 105typedef void (*bfa_mbox_cmd_cbfn_t)(void *cbarg);
115struct bfa_mbox_cmd { 106struct bfa_mbox_cmd {
116 struct list_head qe; 107 struct list_head qe;
@@ -119,9 +110,7 @@ struct bfa_mbox_cmd {
119 u32 msg[BFI_IOC_MSGSZ]; 110 u32 msg[BFI_IOC_MSGSZ];
120}; 111};
121 112
122/** 113/* IOC mailbox module */
123 * IOC mailbox module
124 */
125typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m); 114typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m);
126struct bfa_ioc_mbox_mod { 115struct bfa_ioc_mbox_mod {
127 struct list_head cmd_q; /*!< pending mbox queue */ 116 struct list_head cmd_q; /*!< pending mbox queue */
@@ -132,9 +121,7 @@ struct bfa_ioc_mbox_mod {
132 } mbhdlr[BFI_MC_MAX]; 121 } mbhdlr[BFI_MC_MAX];
133}; 122};
134 123
135/** 124/* IOC callback function interfaces */
136 * IOC callback function interfaces
137 */
138typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status); 125typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
139typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa); 126typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa);
140typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa); 127typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa);
@@ -146,9 +133,7 @@ struct bfa_ioc_cbfn {
146 bfa_ioc_reset_cbfn_t reset_cbfn; 133 bfa_ioc_reset_cbfn_t reset_cbfn;
147}; 134};
148 135
149/** 136/* IOC event notification mechanism. */
150 * IOC event notification mechanism.
151 */
152enum bfa_ioc_event { 137enum bfa_ioc_event {
153 BFA_IOC_E_ENABLED = 1, 138 BFA_IOC_E_ENABLED = 1,
154 BFA_IOC_E_DISABLED = 2, 139 BFA_IOC_E_DISABLED = 2,
@@ -163,9 +148,7 @@ struct bfa_ioc_notify {
163 void *cbarg; 148 void *cbarg;
164}; 149};
165 150
166/** 151/* Initialize a IOC event notification structure */
167 * Initialize a IOC event notification structure
168 */
169#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \ 152#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \
170 (__notify)->cbfn = (__cbfn); \ 153 (__notify)->cbfn = (__cbfn); \
171 (__notify)->cbarg = (__cbarg); \ 154 (__notify)->cbarg = (__cbarg); \
@@ -261,9 +244,7 @@ struct bfa_ioc_hwif {
261#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) 244#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
262#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) 245#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
263 246
264/** 247/* IOC mailbox interface */
265 * IOC mailbox interface
266 */
267bool bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, 248bool bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc,
268 struct bfa_mbox_cmd *cmd, 249 struct bfa_mbox_cmd *cmd,
269 bfa_mbox_cmd_cbfn_t cbfn, void *cbarg); 250 bfa_mbox_cmd_cbfn_t cbfn, void *cbarg);
@@ -271,9 +252,7 @@ void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc);
271void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, 252void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
272 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg); 253 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
273 254
274/** 255/* IOC interfaces */
275 * IOC interfaces
276 */
277 256
278#define bfa_ioc_pll_init_asic(__ioc) \ 257#define bfa_ioc_pll_init_asic(__ioc) \
279 ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \ 258 ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index b6b036a143a..5df0b0c68c5 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -87,9 +87,7 @@ static const struct bfa_ioc_hwif nw_hwif_ct2 = {
87 .ioc_sync_complete = bfa_ioc_ct_sync_complete, 87 .ioc_sync_complete = bfa_ioc_ct_sync_complete,
88}; 88};
89 89
90/** 90/* Called from bfa_ioc_attach() to map asic specific calls. */
91 * Called from bfa_ioc_attach() to map asic specific calls.
92 */
93void 91void
94bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc) 92bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
95{ 93{
@@ -102,9 +100,7 @@ bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc)
102 ioc->ioc_hwif = &nw_hwif_ct2; 100 ioc->ioc_hwif = &nw_hwif_ct2;
103} 101}
104 102
105/** 103/* Return true if firmware of current driver matches the running firmware. */
106 * Return true if firmware of current driver matches the running firmware.
107 */
108static bool 104static bool
109bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc) 105bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
110{ 106{
@@ -182,9 +178,7 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
182 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 178 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
183} 179}
184 180
185/** 181/* Notify other functions on HB failure. */
186 * Notify other functions on HB failure.
187 */
188static void 182static void
189bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) 183bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
190{ 184{
@@ -195,9 +189,7 @@ bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
195 readl(ioc->ioc_regs.alt_ll_halt); 189 readl(ioc->ioc_regs.alt_ll_halt);
196} 190}
197 191
198/** 192/* Host to LPU mailbox message addresses */
199 * Host to LPU mailbox message addresses
200 */
201static const struct { 193static const struct {
202 u32 hfn_mbox; 194 u32 hfn_mbox;
203 u32 lpu_mbox; 195 u32 lpu_mbox;
@@ -209,9 +201,7 @@ static const struct {
209 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } 201 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
210}; 202};
211 203
212/** 204/* Host <-> LPU mailbox command/status registers - port 0 */
213 * Host <-> LPU mailbox command/status registers - port 0
214 */
215static const struct { 205static const struct {
216 u32 hfn; 206 u32 hfn;
217 u32 lpu; 207 u32 lpu;
@@ -222,9 +212,7 @@ static const struct {
222 { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT } 212 { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
223}; 213};
224 214
225/** 215/* Host <-> LPU mailbox command/status registers - port 1 */
226 * Host <-> LPU mailbox command/status registers - port 1
227 */
228static const struct { 216static const struct {
229 u32 hfn; 217 u32 hfn;
230 u32 lpu; 218 u32 lpu;
@@ -368,9 +356,7 @@ bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc)
368 ioc->ioc_regs.err_set = rb + ERR_SET_REG; 356 ioc->ioc_regs.err_set = rb + ERR_SET_REG;
369} 357}
370 358
371/** 359/* Initialize IOC to port mapping. */
372 * Initialize IOC to port mapping.
373 */
374 360
375#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8) 361#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
376static void 362static void
@@ -398,9 +384,7 @@ bfa_ioc_ct2_map_port(struct bfa_ioc *ioc)
398 ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH); 384 ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
399} 385}
400 386
401/** 387/* Set interrupt mode for a function: INTX or MSIX */
402 * Set interrupt mode for a function: INTX or MSIX
403 */
404static void 388static void
405bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix) 389bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
406{ 390{
@@ -443,9 +427,7 @@ bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc)
443 return false; 427 return false;
444} 428}
445 429
446/** 430/* MSI-X resource allocation for 1860 with no asic block */
447 * MSI-X resource allocation for 1860 with no asic block
448 */
449#define HOSTFN_MSIX_DEFAULT 64 431#define HOSTFN_MSIX_DEFAULT 64
450#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138 432#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
451#define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c 433#define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
@@ -473,9 +455,7 @@ bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc)
473 rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); 455 rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
474} 456}
475 457
476/** 458/* Cleanup hw semaphore and usecnt registers */
477 * Cleanup hw semaphore and usecnt registers
478 */
479static void 459static void
480bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) 460bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
481{ 461{
@@ -492,9 +472,7 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
492 bfa_nw_ioc_hw_sem_release(ioc); 472 bfa_nw_ioc_hw_sem_release(ioc);
493} 473}
494 474
495/** 475/* Synchronized IOC failure processing routines */
496 * Synchronized IOC failure processing routines
497 */
498static bool 476static bool
499bfa_ioc_ct_sync_start(struct bfa_ioc *ioc) 477bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
500{ 478{
@@ -518,9 +496,7 @@ bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
518 496
519 return bfa_ioc_ct_sync_complete(ioc); 497 return bfa_ioc_ct_sync_complete(ioc);
520} 498}
521/** 499/* Synchronized IOC failure processing routines */
522 * Synchronized IOC failure processing routines
523 */
524static void 500static void
525bfa_ioc_ct_sync_join(struct bfa_ioc *ioc) 501bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
526{ 502{
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
index dd36427f475..55067d0d25c 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_msgq.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
@@ -16,9 +16,7 @@
16 * www.brocade.com 16 * www.brocade.com
17 */ 17 */
18 18
19/** 19/* MSGQ module source file. */
20 * @file bfa_msgq.c MSGQ module source file.
21 */
22 20
23#include "bfi.h" 21#include "bfi.h"
24#include "bfa_msgq.h" 22#include "bfa_msgq.h"
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 0d9df695397..1f24c23dc78 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -22,15 +22,11 @@
22 22
23#pragma pack(1) 23#pragma pack(1)
24 24
25/** 25/* BFI FW image type */
26 * BFI FW image type
27 */
28#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */ 26#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
29#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32)) 27#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
30 28
31/** 29/* Msg header common to all msgs */
32 * Msg header common to all msgs
33 */
34struct bfi_mhdr { 30struct bfi_mhdr {
35 u8 msg_class; /*!< @ref enum bfi_mclass */ 31 u8 msg_class; /*!< @ref enum bfi_mclass */
36 u8 msg_id; /*!< msg opcode with in the class */ 32 u8 msg_id; /*!< msg opcode with in the class */
@@ -65,17 +61,14 @@ struct bfi_mhdr {
65#define BFI_I2H_OPCODE_BASE 128 61#define BFI_I2H_OPCODE_BASE 128
66#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE) 62#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
67 63
68/** 64/****************************************************************************
69 ****************************************************************************
70 * 65 *
71 * Scatter Gather Element and Page definition 66 * Scatter Gather Element and Page definition
72 * 67 *
73 **************************************************************************** 68 ****************************************************************************
74 */ 69 */
75 70
76/** 71/* DMA addresses */
77 * DMA addresses
78 */
79union bfi_addr_u { 72union bfi_addr_u {
80 struct { 73 struct {
81 u32 addr_lo; 74 u32 addr_lo;
@@ -83,9 +76,7 @@ union bfi_addr_u {
83 } a32; 76 } a32;
84}; 77};
85 78
86/** 79/* Generic DMA addr-len pair. */
87 * Generic DMA addr-len pair.
88 */
89struct bfi_alen { 80struct bfi_alen {
90 union bfi_addr_u al_addr; /* DMA addr of buffer */ 81 union bfi_addr_u al_addr; /* DMA addr of buffer */
91 u32 al_len; /* length of buffer */ 82 u32 al_len; /* length of buffer */
@@ -98,26 +89,20 @@ struct bfi_alen {
98#define BFI_LMSG_PL_WSZ \ 89#define BFI_LMSG_PL_WSZ \
99 ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4) 90 ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4)
100 91
101/** 92/* Mailbox message structure */
102 * Mailbox message structure
103 */
104#define BFI_MBMSG_SZ 7 93#define BFI_MBMSG_SZ 7
105struct bfi_mbmsg { 94struct bfi_mbmsg {
106 struct bfi_mhdr mh; 95 struct bfi_mhdr mh;
107 u32 pl[BFI_MBMSG_SZ]; 96 u32 pl[BFI_MBMSG_SZ];
108}; 97};
109 98
110/** 99/* Supported PCI function class codes (personality) */
111 * Supported PCI function class codes (personality)
112 */
113enum bfi_pcifn_class { 100enum bfi_pcifn_class {
114 BFI_PCIFN_CLASS_FC = 0x0c04, 101 BFI_PCIFN_CLASS_FC = 0x0c04,
115 BFI_PCIFN_CLASS_ETH = 0x0200, 102 BFI_PCIFN_CLASS_ETH = 0x0200,
116}; 103};
117 104
118/** 105/* Message Classes */
119 * Message Classes
120 */
121enum bfi_mclass { 106enum bfi_mclass {
122 BFI_MC_IOC = 1, /*!< IO Controller (IOC) */ 107 BFI_MC_IOC = 1, /*!< IO Controller (IOC) */
123 BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */ 108 BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */
@@ -159,15 +144,12 @@ enum bfi_mclass {
159 144
160#define BFI_FWBOOT_ENV_OS 0 145#define BFI_FWBOOT_ENV_OS 0
161 146
162/** 147/*----------------------------------------------------------------------
163 *----------------------------------------------------------------------
164 * IOC 148 * IOC
165 *---------------------------------------------------------------------- 149 *----------------------------------------------------------------------
166 */ 150 */
167 151
168/** 152/* Different asic generations */
169 * Different asic generations
170 */
171enum bfi_asic_gen { 153enum bfi_asic_gen {
172 BFI_ASIC_GEN_CB = 1, 154 BFI_ASIC_GEN_CB = 1,
173 BFI_ASIC_GEN_CT = 2, 155 BFI_ASIC_GEN_CT = 2,
@@ -196,9 +178,7 @@ enum bfi_ioc_i2h_msgs {
196 BFI_IOC_I2H_HBEAT = BFA_I2HM(4), 178 BFI_IOC_I2H_HBEAT = BFA_I2HM(4),
197}; 179};
198 180
199/** 181/* BFI_IOC_H2I_GETATTR_REQ message */
200 * BFI_IOC_H2I_GETATTR_REQ message
201 */
202struct bfi_ioc_getattr_req { 182struct bfi_ioc_getattr_req {
203 struct bfi_mhdr mh; 183 struct bfi_mhdr mh;
204 union bfi_addr_u attr_addr; 184 union bfi_addr_u attr_addr;
@@ -231,30 +211,22 @@ struct bfi_ioc_attr {
231 u32 card_type; /*!< card type */ 211 u32 card_type; /*!< card type */
232}; 212};
233 213
234/** 214/* BFI_IOC_I2H_GETATTR_REPLY message */
235 * BFI_IOC_I2H_GETATTR_REPLY message
236 */
237struct bfi_ioc_getattr_reply { 215struct bfi_ioc_getattr_reply {
238 struct bfi_mhdr mh; /*!< Common msg header */ 216 struct bfi_mhdr mh; /*!< Common msg header */
239 u8 status; /*!< cfg reply status */ 217 u8 status; /*!< cfg reply status */
240 u8 rsvd[3]; 218 u8 rsvd[3];
241}; 219};
242 220
243/** 221/* Firmware memory page offsets */
244 * Firmware memory page offsets
245 */
246#define BFI_IOC_SMEM_PG0_CB (0x40) 222#define BFI_IOC_SMEM_PG0_CB (0x40)
247#define BFI_IOC_SMEM_PG0_CT (0x180) 223#define BFI_IOC_SMEM_PG0_CT (0x180)
248 224
249/** 225/* Firmware statistic offset */
250 * Firmware statistic offset
251 */
252#define BFI_IOC_FWSTATS_OFF (0x6B40) 226#define BFI_IOC_FWSTATS_OFF (0x6B40)
253#define BFI_IOC_FWSTATS_SZ (4096) 227#define BFI_IOC_FWSTATS_SZ (4096)
254 228
255/** 229/* Firmware trace offset */
256 * Firmware trace offset
257 */
258#define BFI_IOC_TRC_OFF (0x4b00) 230#define BFI_IOC_TRC_OFF (0x4b00)
259#define BFI_IOC_TRC_ENTS 256 231#define BFI_IOC_TRC_ENTS 256
260#define BFI_IOC_TRC_ENT_SZ 16 232#define BFI_IOC_TRC_ENT_SZ 16
@@ -299,9 +271,7 @@ struct bfi_ioc_hbeat {
299 u32 hb_count; /*!< current heart beat count */ 271 u32 hb_count; /*!< current heart beat count */
300}; 272};
301 273
302/** 274/* IOC hardware/firmware state */
303 * IOC hardware/firmware state
304 */
305enum bfi_ioc_state { 275enum bfi_ioc_state {
306 BFI_IOC_UNINIT = 0, /*!< not initialized */ 276 BFI_IOC_UNINIT = 0, /*!< not initialized */
307 BFI_IOC_INITING = 1, /*!< h/w is being initialized */ 277 BFI_IOC_INITING = 1, /*!< h/w is being initialized */
@@ -345,9 +315,7 @@ enum {
345 ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \ 315 ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
346 BFI_ADAPTER_UNSUPP)) 316 BFI_ADAPTER_UNSUPP))
347 317
348/** 318/* BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages */
349 * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
350 */
351struct bfi_ioc_ctrl_req { 319struct bfi_ioc_ctrl_req {
352 struct bfi_mhdr mh; 320 struct bfi_mhdr mh;
353 u16 clscode; 321 u16 clscode;
@@ -355,9 +323,7 @@ struct bfi_ioc_ctrl_req {
355 u32 tv_sec; 323 u32 tv_sec;
356}; 324};
357 325
358/** 326/* BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages */
359 * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
360 */
361struct bfi_ioc_ctrl_reply { 327struct bfi_ioc_ctrl_reply {
362 struct bfi_mhdr mh; /*!< Common msg header */ 328 struct bfi_mhdr mh; /*!< Common msg header */
363 u8 status; /*!< enable/disable status */ 329 u8 status; /*!< enable/disable status */
@@ -367,9 +333,7 @@ struct bfi_ioc_ctrl_reply {
367}; 333};
368 334
369#define BFI_IOC_MSGSZ 8 335#define BFI_IOC_MSGSZ 8
370/** 336/* H2I Messages */
371 * H2I Messages
372 */
373union bfi_ioc_h2i_msg_u { 337union bfi_ioc_h2i_msg_u {
374 struct bfi_mhdr mh; 338 struct bfi_mhdr mh;
375 struct bfi_ioc_ctrl_req enable_req; 339 struct bfi_ioc_ctrl_req enable_req;
@@ -378,17 +342,14 @@ union bfi_ioc_h2i_msg_u {
378 u32 mboxmsg[BFI_IOC_MSGSZ]; 342 u32 mboxmsg[BFI_IOC_MSGSZ];
379}; 343};
380 344
381/** 345/* I2H Messages */
382 * I2H Messages
383 */
384union bfi_ioc_i2h_msg_u { 346union bfi_ioc_i2h_msg_u {
385 struct bfi_mhdr mh; 347 struct bfi_mhdr mh;
386 struct bfi_ioc_ctrl_reply fw_event; 348 struct bfi_ioc_ctrl_reply fw_event;
387 u32 mboxmsg[BFI_IOC_MSGSZ]; 349 u32 mboxmsg[BFI_IOC_MSGSZ];
388}; 350};
389 351
390/** 352/*----------------------------------------------------------------------
391 *----------------------------------------------------------------------
392 * MSGQ 353 * MSGQ
393 *---------------------------------------------------------------------- 354 *----------------------------------------------------------------------
394 */ 355 */
diff --git a/drivers/net/ethernet/brocade/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h
index 4eecabea397..6704a439297 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h
@@ -37,18 +37,14 @@ enum bfi_port_i2h {
37 BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4), 37 BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
38}; 38};
39 39
40/** 40/* Generic REQ type */
41 * Generic REQ type
42 */
43struct bfi_port_generic_req { 41struct bfi_port_generic_req {
44 struct bfi_mhdr mh; /*!< msg header */ 42 struct bfi_mhdr mh; /*!< msg header */
45 u32 msgtag; /*!< msgtag for reply */ 43 u32 msgtag; /*!< msgtag for reply */
46 u32 rsvd; 44 u32 rsvd;
47}; 45};
48 46
49/** 47/* Generic RSP type */
50 * Generic RSP type
51 */
52struct bfi_port_generic_rsp { 48struct bfi_port_generic_rsp {
53 struct bfi_mhdr mh; /*!< common msg header */ 49 struct bfi_mhdr mh; /*!< common msg header */
54 u8 status; /*!< port enable status */ 50 u8 status; /*!< port enable status */
@@ -56,44 +52,12 @@ struct bfi_port_generic_rsp {
56 u32 msgtag; /*!< msgtag for reply */ 52 u32 msgtag; /*!< msgtag for reply */
57}; 53};
58 54
59/** 55/* BFI_PORT_H2I_GET_STATS_REQ */
60 * @todo
61 * BFI_PORT_H2I_ENABLE_REQ
62 */
63
64/**
65 * @todo
66 * BFI_PORT_I2H_ENABLE_RSP
67 */
68
69/**
70 * BFI_PORT_H2I_DISABLE_REQ
71 */
72
73/**
74 * BFI_PORT_I2H_DISABLE_RSP
75 */
76
77/**
78 * BFI_PORT_H2I_GET_STATS_REQ
79 */
80struct bfi_port_get_stats_req { 56struct bfi_port_get_stats_req {
81 struct bfi_mhdr mh; /*!< common msg header */ 57 struct bfi_mhdr mh; /*!< common msg header */
82 union bfi_addr_u dma_addr; 58 union bfi_addr_u dma_addr;
83}; 59};
84 60
85/**
86 * BFI_PORT_I2H_GET_STATS_RSP
87 */
88
89/**
90 * BFI_PORT_H2I_CLEAR_STATS_REQ
91 */
92
93/**
94 * BFI_PORT_I2H_CLEAR_STATS_RSP
95 */
96
97union bfi_port_h2i_msg_u { 61union bfi_port_h2i_msg_u {
98 struct bfi_mhdr mh; 62 struct bfi_mhdr mh;
99 struct bfi_port_generic_req enable_req; 63 struct bfi_port_generic_req enable_req;
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
index a90f1cf46b4..eef6e1f8aec 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_enet.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -16,12 +16,9 @@
16 * www.brocade.com 16 * www.brocade.com
17 */ 17 */
18 18
19/** 19/* BNA Hardware and Firmware Interface */
20 * @file bfi_enet.h BNA Hardware and Firmware Interface
21 */
22 20
23/** 21/* Skipping statistics collection to avoid clutter.
24 * Skipping statistics collection to avoid clutter.
25 * Command is no longer needed: 22 * Command is no longer needed:
26 * MTU 23 * MTU
27 * TxQ Stop 24 * TxQ Stop
@@ -64,9 +61,7 @@ union bfi_addr_be_u {
64 } a32; 61 } a32;
65}; 62};
66 63
67/** 64/* T X Q U E U E D E F I N E S */
68 * T X Q U E U E D E F I N E S
69 */
70/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */ 65/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
71/* TxQ Entry Opcodes */ 66/* TxQ Entry Opcodes */
72#define BFI_ENET_TXQ_WI_SEND (0x402) /* Single Frame Transmission */ 67#define BFI_ENET_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
@@ -106,10 +101,7 @@ struct bfi_enet_txq_wi_vector { /* Tx Buffer Descriptor */
106 union bfi_addr_be_u addr; 101 union bfi_addr_be_u addr;
107}; 102};
108 103
109/** 104/* TxQ Entry Structure */
110 * TxQ Entry Structure
111 *
112 */
113struct bfi_enet_txq_entry { 105struct bfi_enet_txq_entry {
114 union { 106 union {
115 struct bfi_enet_txq_wi_base base; 107 struct bfi_enet_txq_wi_base base;
@@ -124,16 +116,12 @@ struct bfi_enet_txq_entry {
124#define BFI_ENET_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \ 116#define BFI_ENET_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
125 (((_hdr_size) << 10) | ((_offset) & 0x3FF)) 117 (((_hdr_size) << 10) | ((_offset) & 0x3FF))
126 118
127/** 119/* R X Q U E U E D E F I N E S */
128 * R X Q U E U E D E F I N E S
129 */
130struct bfi_enet_rxq_entry { 120struct bfi_enet_rxq_entry {
131 union bfi_addr_be_u rx_buffer; 121 union bfi_addr_be_u rx_buffer;
132}; 122};
133 123
134/** 124/* R X C O M P L E T I O N Q U E U E D E F I N E S */
135 * R X C O M P L E T I O N Q U E U E D E F I N E S
136 */
137/* CQ Entry Flags */ 125/* CQ Entry Flags */
138#define BFI_ENET_CQ_EF_MAC_ERROR (1 << 0) 126#define BFI_ENET_CQ_EF_MAC_ERROR (1 << 0)
139#define BFI_ENET_CQ_EF_FCS_ERROR (1 << 1) 127#define BFI_ENET_CQ_EF_FCS_ERROR (1 << 1)
@@ -174,9 +162,7 @@ struct bfi_enet_cq_entry {
174 u8 rxq_id; 162 u8 rxq_id;
175}; 163};
176 164
177/** 165/* E N E T C O N T R O L P A T H C O M M A N D S */
178 * E N E T C O N T R O L P A T H C O M M A N D S
179 */
180struct bfi_enet_q { 166struct bfi_enet_q {
181 union bfi_addr_u pg_tbl; 167 union bfi_addr_u pg_tbl;
182 union bfi_addr_u first_entry; 168 union bfi_addr_u first_entry;
@@ -222,9 +208,7 @@ struct bfi_enet_ib {
222 u16 rsvd; 208 u16 rsvd;
223}; 209};
224 210
225/** 211/* ENET command messages */
226 * ENET command messages
227 */
228enum bfi_enet_h2i_msgs { 212enum bfi_enet_h2i_msgs {
229 /* Rx Commands */ 213 /* Rx Commands */
230 BFI_ENET_H2I_RX_CFG_SET_REQ = 1, 214 BFI_ENET_H2I_RX_CFG_SET_REQ = 1,
@@ -350,9 +334,7 @@ enum bfi_enet_i2h_msgs {
350 BFI_ENET_I2H_BW_UPDATE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 4), 334 BFI_ENET_I2H_BW_UPDATE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 4),
351}; 335};
352 336
353/** 337/* The following error codes can be returned by the enet commands */
354 * The following error codes can be returned by the enet commands
355 */
356enum bfi_enet_err { 338enum bfi_enet_err {
357 BFI_ENET_CMD_OK = 0, 339 BFI_ENET_CMD_OK = 0,
358 BFI_ENET_CMD_FAIL = 1, 340 BFI_ENET_CMD_FAIL = 1,
@@ -364,8 +346,7 @@ enum bfi_enet_err {
364 BFI_ENET_CMD_PORT_DISABLED = 7, /* !< port in disabled state */ 346 BFI_ENET_CMD_PORT_DISABLED = 7, /* !< port in disabled state */
365}; 347};
366 348
367/** 349/* Generic Request
368 * Generic Request
369 * 350 *
370 * bfi_enet_req is used by: 351 * bfi_enet_req is used by:
371 * BFI_ENET_H2I_RX_CFG_CLR_REQ 352 * BFI_ENET_H2I_RX_CFG_CLR_REQ
@@ -375,8 +356,7 @@ struct bfi_enet_req {
375 struct bfi_msgq_mhdr mh; 356 struct bfi_msgq_mhdr mh;
376}; 357};
377 358
378/** 359/* Enable/Disable Request
379 * Enable/Disable Request
380 * 360 *
381 * bfi_enet_enable_req is used by: 361 * bfi_enet_enable_req is used by:
382 * BFI_ENET_H2I_RSS_ENABLE_REQ (enet_id must be zero) 362 * BFI_ENET_H2I_RSS_ENABLE_REQ (enet_id must be zero)
@@ -391,9 +371,7 @@ struct bfi_enet_enable_req {
391 u8 rsvd[3]; 371 u8 rsvd[3];
392}; 372};
393 373
394/** 374/* Generic Response */
395 * Generic Response
396 */
397struct bfi_enet_rsp { 375struct bfi_enet_rsp {
398 struct bfi_msgq_mhdr mh; 376 struct bfi_msgq_mhdr mh;
399 u8 error; /*!< if error see cmd_offset */ 377 u8 error; /*!< if error see cmd_offset */
@@ -401,20 +379,16 @@ struct bfi_enet_rsp {
401 u16 cmd_offset; /*!< offset to invalid parameter */ 379 u16 cmd_offset; /*!< offset to invalid parameter */
402}; 380};
403 381
404/** 382/* GLOBAL CONFIGURATION */
405 * GLOBAL CONFIGURATION
406 */
407 383
408/** 384/* bfi_enet_attr_req is used by:
409 * bfi_enet_attr_req is used by:
410 * BFI_ENET_H2I_GET_ATTR_REQ 385 * BFI_ENET_H2I_GET_ATTR_REQ
411 */ 386 */
412struct bfi_enet_attr_req { 387struct bfi_enet_attr_req {
413 struct bfi_msgq_mhdr mh; 388 struct bfi_msgq_mhdr mh;
414}; 389};
415 390
416/** 391/* bfi_enet_attr_rsp is used by:
417 * bfi_enet_attr_rsp is used by:
418 * BFI_ENET_I2H_GET_ATTR_RSP 392 * BFI_ENET_I2H_GET_ATTR_RSP
419 */ 393 */
420struct bfi_enet_attr_rsp { 394struct bfi_enet_attr_rsp {
@@ -427,8 +401,7 @@ struct bfi_enet_attr_rsp {
427 u32 rit_size; 401 u32 rit_size;
428}; 402};
429 403
430/** 404/* Tx Configuration
431 * Tx Configuration
432 * 405 *
433 * bfi_enet_tx_cfg is used by: 406 * bfi_enet_tx_cfg is used by:
434 * BFI_ENET_H2I_TX_CFG_SET_REQ 407 * BFI_ENET_H2I_TX_CFG_SET_REQ
@@ -477,8 +450,7 @@ struct bfi_enet_tx_cfg_rsp {
477 } q_handles[BFI_ENET_TXQ_PRIO_MAX]; 450 } q_handles[BFI_ENET_TXQ_PRIO_MAX];
478}; 451};
479 452
480/** 453/* Rx Configuration
481 * Rx Configuration
482 * 454 *
483 * bfi_enet_rx_cfg is used by: 455 * bfi_enet_rx_cfg is used by:
484 * BFI_ENET_H2I_RX_CFG_SET_REQ 456 * BFI_ENET_H2I_RX_CFG_SET_REQ
@@ -553,8 +525,7 @@ struct bfi_enet_rx_cfg_rsp {
553 } q_handles[BFI_ENET_RX_QSET_MAX]; 525 } q_handles[BFI_ENET_RX_QSET_MAX];
554}; 526};
555 527
556/** 528/* RIT
557 * RIT
558 * 529 *
559 * bfi_enet_rit_req is used by: 530 * bfi_enet_rit_req is used by:
560 * BFI_ENET_H2I_RIT_CFG_REQ 531 * BFI_ENET_H2I_RIT_CFG_REQ
@@ -566,8 +537,7 @@ struct bfi_enet_rit_req {
566 u8 table[BFI_ENET_RSS_RIT_MAX]; 537 u8 table[BFI_ENET_RSS_RIT_MAX];
567}; 538};
568 539
569/** 540/* RSS
570 * RSS
571 * 541 *
572 * bfi_enet_rss_cfg_req is used by: 542 * bfi_enet_rss_cfg_req is used by:
573 * BFI_ENET_H2I_RSS_CFG_REQ 543 * BFI_ENET_H2I_RSS_CFG_REQ
@@ -591,8 +561,7 @@ struct bfi_enet_rss_cfg_req {
591 struct bfi_enet_rss_cfg cfg; 561 struct bfi_enet_rss_cfg cfg;
592}; 562};
593 563
594/** 564/* MAC Unicast
595 * MAC Unicast
596 * 565 *
597 * bfi_enet_rx_vlan_req is used by: 566 * bfi_enet_rx_vlan_req is used by:
598 * BFI_ENET_H2I_MAC_UCAST_SET_REQ 567 * BFI_ENET_H2I_MAC_UCAST_SET_REQ
@@ -606,17 +575,14 @@ struct bfi_enet_ucast_req {
606 u8 rsvd[2]; 575 u8 rsvd[2];
607}; 576};
608 577
609/** 578/* MAC Unicast + VLAN */
610 * MAC Unicast + VLAN
611 */
612struct bfi_enet_mac_n_vlan_req { 579struct bfi_enet_mac_n_vlan_req {
613 struct bfi_msgq_mhdr mh; 580 struct bfi_msgq_mhdr mh;
614 u16 vlan_id; 581 u16 vlan_id;
615 mac_t mac_addr; 582 mac_t mac_addr;
616}; 583};
617 584
618/** 585/* MAC Multicast
619 * MAC Multicast
620 * 586 *
621 * bfi_enet_mac_mfilter_add_req is used by: 587 * bfi_enet_mac_mfilter_add_req is used by:
622 * BFI_ENET_H2I_MAC_MCAST_ADD_REQ 588 * BFI_ENET_H2I_MAC_MCAST_ADD_REQ
@@ -627,8 +593,7 @@ struct bfi_enet_mcast_add_req {
627 u8 rsvd[2]; 593 u8 rsvd[2];
628}; 594};
629 595
630/** 596/* bfi_enet_mac_mfilter_add_rsp is used by:
631 * bfi_enet_mac_mfilter_add_rsp is used by:
632 * BFI_ENET_I2H_MAC_MCAST_ADD_RSP 597 * BFI_ENET_I2H_MAC_MCAST_ADD_RSP
633 */ 598 */
634struct bfi_enet_mcast_add_rsp { 599struct bfi_enet_mcast_add_rsp {
@@ -640,8 +605,7 @@ struct bfi_enet_mcast_add_rsp {
640 u8 rsvd1[2]; 605 u8 rsvd1[2];
641}; 606};
642 607
643/** 608/* bfi_enet_mac_mfilter_del_req is used by:
644 * bfi_enet_mac_mfilter_del_req is used by:
645 * BFI_ENET_H2I_MAC_MCAST_DEL_REQ 609 * BFI_ENET_H2I_MAC_MCAST_DEL_REQ
646 */ 610 */
647struct bfi_enet_mcast_del_req { 611struct bfi_enet_mcast_del_req {
@@ -650,8 +614,7 @@ struct bfi_enet_mcast_del_req {
650 u8 rsvd[2]; 614 u8 rsvd[2];
651}; 615};
652 616
653/** 617/* VLAN
654 * VLAN
655 * 618 *
656 * bfi_enet_rx_vlan_req is used by: 619 * bfi_enet_rx_vlan_req is used by:
657 * BFI_ENET_H2I_RX_VLAN_SET_REQ 620 * BFI_ENET_H2I_RX_VLAN_SET_REQ
@@ -663,8 +626,7 @@ struct bfi_enet_rx_vlan_req {
663 u32 bit_mask[BFI_ENET_VLAN_WORDS_MAX]; 626 u32 bit_mask[BFI_ENET_VLAN_WORDS_MAX];
664}; 627};
665 628
666/** 629/* PAUSE
667 * PAUSE
668 * 630 *
669 * bfi_enet_set_pause_req is used by: 631 * bfi_enet_set_pause_req is used by:
670 * BFI_ENET_H2I_SET_PAUSE_REQ 632 * BFI_ENET_H2I_SET_PAUSE_REQ
@@ -676,8 +638,7 @@ struct bfi_enet_set_pause_req {
676 u8 rx_pause; /* 1 = enable; 0 = disable */ 638 u8 rx_pause; /* 1 = enable; 0 = disable */
677}; 639};
678 640
679/** 641/* DIAGNOSTICS
680 * DIAGNOSTICS
681 * 642 *
682 * bfi_enet_diag_lb_req is used by: 643 * bfi_enet_diag_lb_req is used by:
683 * BFI_ENET_H2I_DIAG_LOOPBACK 644 * BFI_ENET_H2I_DIAG_LOOPBACK
@@ -689,16 +650,13 @@ struct bfi_enet_diag_lb_req {
689 u8 enable; /* 1 = enable; 0 = disable */ 650 u8 enable; /* 1 = enable; 0 = disable */
690}; 651};
691 652
692/** 653/* enum for Loopback opmodes */
693 * enum for Loopback opmodes
694 */
695enum { 654enum {
696 BFI_ENET_DIAG_LB_OPMODE_EXT = 0, 655 BFI_ENET_DIAG_LB_OPMODE_EXT = 0,
697 BFI_ENET_DIAG_LB_OPMODE_CBL = 1, 656 BFI_ENET_DIAG_LB_OPMODE_CBL = 1,
698}; 657};
699 658
700/** 659/* STATISTICS
701 * STATISTICS
702 * 660 *
703 * bfi_enet_stats_req is used by: 661 * bfi_enet_stats_req is used by:
704 * BFI_ENET_H2I_STATS_GET_REQ 662 * BFI_ENET_H2I_STATS_GET_REQ
@@ -713,9 +671,7 @@ struct bfi_enet_stats_req {
713 union bfi_addr_u host_buffer; 671 union bfi_addr_u host_buffer;
714}; 672};
715 673
716/** 674/* defines for "stats_mask" above. */
717 * defines for "stats_mask" above.
718 */
719#define BFI_ENET_STATS_MAC (1 << 0) /* !< MAC Statistics */ 675#define BFI_ENET_STATS_MAC (1 << 0) /* !< MAC Statistics */
720#define BFI_ENET_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */ 676#define BFI_ENET_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */
721#define BFI_ENET_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */ 677#define BFI_ENET_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */
@@ -881,8 +837,7 @@ struct bfi_enet_stats_mac {
881 u64 tx_fragments; 837 u64 tx_fragments;
882}; 838};
883 839
884/** 840/* Complete statistics, DMAed from fw to host followed by
885 * Complete statistics, DMAed from fw to host followed by
886 * BFI_ENET_I2H_STATS_GET_RSP 841 * BFI_ENET_I2H_STATS_GET_RSP
887 */ 842 */
888struct bfi_enet_stats { 843struct bfi_enet_stats {
diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h
index 0e094fe46df..c49fa312ddb 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_reg.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h
@@ -221,9 +221,7 @@ enum {
221#define __PMM_1T_RESET_P 0x00000001 221#define __PMM_1T_RESET_P 0x00000001
222#define PMM_1T_RESET_REG_P1 0x00023c1c 222#define PMM_1T_RESET_REG_P1 0x00023c1c
223 223
224/** 224/* Brocade 1860 Adapter specific defines */
225 * Brocade 1860 Adapter specific defines
226 */
227#define CT2_PCI_CPQ_BASE 0x00030000 225#define CT2_PCI_CPQ_BASE 0x00030000
228#define CT2_PCI_APP_BASE 0x00030100 226#define CT2_PCI_APP_BASE 0x00030100
229#define CT2_PCI_ETH_BASE 0x00030400 227#define CT2_PCI_ETH_BASE 0x00030400
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index 4d7a5de08e1..ede532b4e9d 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -25,11 +25,7 @@
25 25
26extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX]; 26extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
27 27
28/** 28/* Macros and constants */
29 *
30 * Macros and constants
31 *
32 */
33 29
34#define BNA_IOC_TIMER_FREQ 200 30#define BNA_IOC_TIMER_FREQ 200
35 31
@@ -356,11 +352,7 @@ do { \
356 } \ 352 } \
357} while (0) 353} while (0)
358 354
359/** 355/* Inline functions */
360 *
361 * Inline functions
362 *
363 */
364 356
365static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr) 357static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
366{ 358{
@@ -377,15 +369,9 @@ static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
377 369
378#define bna_attr(_bna) (&(_bna)->ioceth.attr) 370#define bna_attr(_bna) (&(_bna)->ioceth.attr)
379 371
380/** 372/* Function prototypes */
381 *
382 * Function prototypes
383 *
384 */
385 373
386/** 374/* BNA */
387 * BNA
388 */
389 375
390/* FW response handlers */ 376/* FW response handlers */
391void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr); 377void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr);
@@ -413,24 +399,19 @@ struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
413void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod, 399void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
414 struct bna_mcam_handle *handle); 400 struct bna_mcam_handle *handle);
415 401
416/** 402/* MBOX */
417 * MBOX
418 */
419 403
420/* API for BNAD */ 404/* API for BNAD */
421void bna_mbox_handler(struct bna *bna, u32 intr_status); 405void bna_mbox_handler(struct bna *bna, u32 intr_status);
422 406
423/** 407/* ETHPORT */
424 * ETHPORT
425 */
426 408
427/* Callbacks for RX */ 409/* Callbacks for RX */
428void bna_ethport_cb_rx_started(struct bna_ethport *ethport); 410void bna_ethport_cb_rx_started(struct bna_ethport *ethport);
429void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport); 411void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport);
430 412
431/** 413/* TX MODULE AND TX */
432 * TX MODULE AND TX 414
433 */
434/* FW response handelrs */ 415/* FW response handelrs */
435void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, 416void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx,
436 struct bfi_msgq_mhdr *msghdr); 417 struct bfi_msgq_mhdr *msghdr);
@@ -462,9 +443,7 @@ void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
462void bna_tx_cleanup_complete(struct bna_tx *tx); 443void bna_tx_cleanup_complete(struct bna_tx *tx);
463void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo); 444void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
464 445
465/** 446/* RX MODULE, RX, RXF */
466 * RX MODULE, RX, RXF
467 */
468 447
469/* FW response handlers */ 448/* FW response handlers */
470void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, 449void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx,
@@ -522,9 +501,7 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
522void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id); 501void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
523void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id); 502void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
524void bna_rx_vlanfilter_enable(struct bna_rx *rx); 503void bna_rx_vlanfilter_enable(struct bna_rx *rx);
525/** 504/* ENET */
526 * ENET
527 */
528 505
529/* API for RX */ 506/* API for RX */
530int bna_enet_mtu_get(struct bna_enet *enet); 507int bna_enet_mtu_get(struct bna_enet *enet);
@@ -544,18 +521,14 @@ void bna_enet_mtu_set(struct bna_enet *enet, int mtu,
544 void (*cbfn)(struct bnad *)); 521 void (*cbfn)(struct bnad *));
545void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac); 522void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac);
546 523
547/** 524/* IOCETH */
548 * IOCETH
549 */
550 525
551/* APIs for BNAD */ 526/* APIs for BNAD */
552void bna_ioceth_enable(struct bna_ioceth *ioceth); 527void bna_ioceth_enable(struct bna_ioceth *ioceth);
553void bna_ioceth_disable(struct bna_ioceth *ioceth, 528void bna_ioceth_disable(struct bna_ioceth *ioceth,
554 enum bna_cleanup_type type); 529 enum bna_cleanup_type type);
555 530
556/** 531/* BNAD */
557 * BNAD
558 */
559 532
560/* Callbacks for ENET */ 533/* Callbacks for ENET */
561void bnad_cb_ethport_link_status(struct bnad *bnad, 534void bnad_cb_ethport_link_status(struct bnad *bnad,
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 9ccc586e376..db14f69d63b 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -378,9 +378,8 @@ bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
378 } 378 }
379} 379}
380 380
381/** 381/* ETHPORT */
382 * ETHPORT 382
383 */
384#define call_ethport_stop_cbfn(_ethport) \ 383#define call_ethport_stop_cbfn(_ethport) \
385do { \ 384do { \
386 if ((_ethport)->stop_cbfn) { \ 385 if ((_ethport)->stop_cbfn) { \
@@ -804,9 +803,8 @@ bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
804 } 803 }
805} 804}
806 805
807/** 806/* ENET */
808 * ENET 807
809 */
810#define bna_enet_chld_start(enet) \ 808#define bna_enet_chld_start(enet) \
811do { \ 809do { \
812 enum bna_tx_type tx_type = \ 810 enum bna_tx_type tx_type = \
@@ -1328,9 +1326,8 @@ bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
1328 *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc); 1326 *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
1329} 1327}
1330 1328
1331/** 1329/* IOCETH */
1332 * IOCETH 1330
1333 */
1334#define enable_mbox_intr(_ioceth) \ 1331#define enable_mbox_intr(_ioceth) \
1335do { \ 1332do { \
1336 u32 intr_status; \ 1333 u32 intr_status; \
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
index 4c6aab2a953..b8c4e21fbf4 100644
--- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -16,20 +16,15 @@
16 * www.brocade.com 16 * www.brocade.com
17 */ 17 */
18 18
19/** 19/* File for interrupt macros and functions */
20 * File for interrupt macros and functions
21 */
22 20
23#ifndef __BNA_HW_DEFS_H__ 21#ifndef __BNA_HW_DEFS_H__
24#define __BNA_HW_DEFS_H__ 22#define __BNA_HW_DEFS_H__
25 23
26#include "bfi_reg.h" 24#include "bfi_reg.h"
27 25
28/** 26/* SW imposed limits */
29 * 27
30 * SW imposed limits
31 *
32 */
33#define BFI_ENET_DEF_TXQ 1 28#define BFI_ENET_DEF_TXQ 1
34#define BFI_ENET_DEF_RXP 1 29#define BFI_ENET_DEF_RXP 1
35#define BFI_ENET_DEF_UCAM 1 30#define BFI_ENET_DEF_UCAM 1
@@ -141,11 +136,8 @@
141} 136}
142 137
143#define bna_port_id_get(_bna) ((_bna)->ioceth.ioc.port_id) 138#define bna_port_id_get(_bna) ((_bna)->ioceth.ioc.port_id)
144/** 139
145 * 140/* Interrupt related bits, flags and macros */
146 * Interrupt related bits, flags and macros
147 *
148 */
149 141
150#define IB_STATUS_BITS 0x0000ffff 142#define IB_STATUS_BITS 0x0000ffff
151 143
@@ -280,11 +272,7 @@ do { \
280 (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \ 272 (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \
281 (_rcb)->q_dbell)); 273 (_rcb)->q_dbell));
282 274
283/** 275/* TxQ, RxQ, CQ related bits, offsets, macros */
284 *
285 * TxQ, RxQ, CQ related bits, offsets, macros
286 *
287 */
288 276
289/* TxQ Entry Opcodes */ 277/* TxQ Entry Opcodes */
290#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */ 278#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
@@ -334,11 +322,7 @@ do { \
334 322
335#define BNA_CQ_EF_LOCAL (1 << 20) 323#define BNA_CQ_EF_LOCAL (1 << 20)
336 324
337/** 325/* Data structures */
338 *
339 * Data structures
340 *
341 */
342 326
343struct bna_reg_offset { 327struct bna_reg_offset {
344 u32 fn_int_status; 328 u32 fn_int_status;
@@ -371,8 +355,7 @@ struct bna_txq_wi_vector {
371 struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */ 355 struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */
372}; 356};
373 357
374/** 358/* TxQ Entry Structure
375 * TxQ Entry Structure
376 * 359 *
377 * BEWARE: Load values into this structure with correct endianess. 360 * BEWARE: Load values into this structure with correct endianess.
378 */ 361 */
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 276fcb589f4..71144b396e0 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -18,9 +18,7 @@
18#include "bna.h" 18#include "bna.h"
19#include "bfi.h" 19#include "bfi.h"
20 20
21/** 21/* IB */
22 * IB
23 */
24static void 22static void
25bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo) 23bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
26{ 24{
@@ -29,9 +27,7 @@ bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
29 (u32)ib->coalescing_timeo, 0); 27 (u32)ib->coalescing_timeo, 0);
30} 28}
31 29
32/** 30/* RXF */
33 * RXF
34 */
35 31
36#define bna_rxf_vlan_cfg_soft_reset(rxf) \ 32#define bna_rxf_vlan_cfg_soft_reset(rxf) \
37do { \ 33do { \
@@ -1312,9 +1308,7 @@ bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1312 return 0; 1308 return 0;
1313} 1309}
1314 1310
1315/** 1311/* RX */
1316 * RX
1317 */
1318 1312
1319#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \ 1313#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1320 (qcfg)->num_paths : ((qcfg)->num_paths * 2)) 1314 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
@@ -2791,9 +2785,8 @@ const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
2791 {1, 2}, 2785 {1, 2},
2792}; 2786};
2793 2787
2794/** 2788/* TX */
2795 * TX 2789
2796 */
2797#define call_tx_stop_cbfn(tx) \ 2790#define call_tx_stop_cbfn(tx) \
2798do { \ 2791do { \
2799 if ((tx)->stop_cbfn) { \ 2792 if ((tx)->stop_cbfn) { \
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index e8d3ab7ea6c..d3eb8bddfb2 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -23,11 +23,7 @@
23#include "bfa_cee.h" 23#include "bfa_cee.h"
24#include "bfa_msgq.h" 24#include "bfa_msgq.h"
25 25
26/** 26/* Forward declarations */
27 *
28 * Forward declarations
29 *
30 */
31 27
32struct bna_mcam_handle; 28struct bna_mcam_handle;
33struct bna_txq; 29struct bna_txq;
@@ -40,11 +36,7 @@ struct bna_enet;
40struct bna; 36struct bna;
41struct bnad; 37struct bnad;
42 38
43/** 39/* Enums, primitive data types */
44 *
45 * Enums, primitive data types
46 *
47 */
48 40
49enum bna_status { 41enum bna_status {
50 BNA_STATUS_T_DISABLED = 0, 42 BNA_STATUS_T_DISABLED = 0,
@@ -331,11 +323,7 @@ struct bna_attr {
331 int max_rit_size; 323 int max_rit_size;
332}; 324};
333 325
334/** 326/* IOCEth */
335 *
336 * IOCEth
337 *
338 */
339 327
340struct bna_ioceth { 328struct bna_ioceth {
341 bfa_fsm_t fsm; 329 bfa_fsm_t fsm;
@@ -351,11 +339,7 @@ struct bna_ioceth {
351 struct bna *bna; 339 struct bna *bna;
352}; 340};
353 341
354/** 342/* Enet */
355 *
356 * Enet
357 *
358 */
359 343
360/* Pause configuration */ 344/* Pause configuration */
361struct bna_pause_config { 345struct bna_pause_config {
@@ -390,11 +374,7 @@ struct bna_enet {
390 struct bna *bna; 374 struct bna *bna;
391}; 375};
392 376
393/** 377/* Ethport */
394 *
395 * Ethport
396 *
397 */
398 378
399struct bna_ethport { 379struct bna_ethport {
400 bfa_fsm_t fsm; 380 bfa_fsm_t fsm;
@@ -419,11 +399,7 @@ struct bna_ethport {
419 struct bna *bna; 399 struct bna *bna;
420}; 400};
421 401
422/** 402/* Interrupt Block */
423 *
424 * Interrupt Block
425 *
426 */
427 403
428/* Doorbell structure */ 404/* Doorbell structure */
429struct bna_ib_dbell { 405struct bna_ib_dbell {
@@ -447,11 +423,7 @@ struct bna_ib {
447 int interpkt_timeo; 423 int interpkt_timeo;
448}; 424};
449 425
450/** 426/* Tx object */
451 *
452 * Tx object
453 *
454 */
455 427
456/* Tx datapath control structure */ 428/* Tx datapath control structure */
457#define BNA_Q_NAME_SIZE 16 429#define BNA_Q_NAME_SIZE 16
@@ -585,11 +557,7 @@ struct bna_tx_mod {
585 struct bna *bna; 557 struct bna *bna;
586}; 558};
587 559
588/** 560/* Rx object */
589 *
590 * Rx object
591 *
592 */
593 561
594/* Rx datapath control structure */ 562/* Rx datapath control structure */
595struct bna_rcb { 563struct bna_rcb {
@@ -898,11 +866,7 @@ struct bna_rx_mod {
898 u32 rid_mask; 866 u32 rid_mask;
899}; 867};
900 868
901/** 869/* CAM */
902 *
903 * CAM
904 *
905 */
906 870
907struct bna_ucam_mod { 871struct bna_ucam_mod {
908 struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */ 872 struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */
@@ -927,11 +891,7 @@ struct bna_mcam_mod {
927 struct bna *bna; 891 struct bna *bna;
928}; 892};
929 893
930/** 894/* Statistics */
931 *
932 * Statistics
933 *
934 */
935 895
936struct bna_stats { 896struct bna_stats {
937 struct bna_dma_addr hw_stats_dma; 897 struct bna_dma_addr hw_stats_dma;
@@ -949,11 +909,7 @@ struct bna_stats_mod {
949 struct bfi_enet_stats_req stats_clr; 909 struct bfi_enet_stats_req stats_clr;
950}; 910};
951 911
952/** 912/* BNA */
953 *
954 * BNA
955 *
956 */
957 913
958struct bna { 914struct bna {
959 struct bna_ident ident; 915 struct bna_ident ident;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 67cd2ed0306..b441f33258e 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1302,8 +1302,7 @@ bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1302 return 0; 1302 return 0;
1303} 1303}
1304 1304
1305/** 1305/* NOTE: Should be called for MSIX only
1306 * NOTE: Should be called for MSIX only
1307 * Unregisters Tx MSIX vector(s) from the kernel 1306 * Unregisters Tx MSIX vector(s) from the kernel
1308 */ 1307 */
1309static void 1308static void
@@ -1322,8 +1321,7 @@ bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1322 } 1321 }
1323} 1322}
1324 1323
1325/** 1324/* NOTE: Should be called for MSIX only
1326 * NOTE: Should be called for MSIX only
1327 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel 1325 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1328 */ 1326 */
1329static int 1327static int
@@ -1354,8 +1352,7 @@ err_return:
1354 return -1; 1352 return -1;
1355} 1353}
1356 1354
1357/** 1355/* NOTE: Should be called for MSIX only
1358 * NOTE: Should be called for MSIX only
1359 * Unregisters Rx MSIX vector(s) from the kernel 1356 * Unregisters Rx MSIX vector(s) from the kernel
1360 */ 1357 */
1361static void 1358static void
@@ -1375,8 +1372,7 @@ bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1375 } 1372 }
1376} 1373}
1377 1374
1378/** 1375/* NOTE: Should be called for MSIX only
1379 * NOTE: Should be called for MSIX only
1380 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel 1376 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1381 */ 1377 */
1382static int 1378static int
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 72742be1127..d7833922475 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -389,9 +389,7 @@ extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
389void bnad_debugfs_init(struct bnad *bnad); 389void bnad_debugfs_init(struct bnad *bnad);
390void bnad_debugfs_uninit(struct bnad *bnad); 390void bnad_debugfs_uninit(struct bnad *bnad);
391 391
392/** 392/* MACROS */
393 * MACROS
394 */
395/* To set & get the stats counters */ 393/* To set & get the stats counters */
396#define BNAD_UPDATE_CTR(_bnad, _ctr) \ 394#define BNAD_UPDATE_CTR(_bnad, _ctr) \
397 (((_bnad)->stats.drv_stats._ctr)++) 395 (((_bnad)->stats.drv_stats._ctr)++)
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
index cfc22a64157..6a68e8d9330 100644
--- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c
+++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
@@ -67,10 +67,10 @@ bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off)
67{ 67{
68 switch (asic_gen) { 68 switch (asic_gen) {
69 case BFI_ASIC_GEN_CT: 69 case BFI_ASIC_GEN_CT:
70 return (u32 *)(bfi_image_ct_cna + off); 70 return (bfi_image_ct_cna + off);
71 break; 71 break;
72 case BFI_ASIC_GEN_CT2: 72 case BFI_ASIC_GEN_CT2:
73 return (u32 *)(bfi_image_ct2_cna + off); 73 return (bfi_image_ct2_cna + off);
74 break; 74 break;
75 default: 75 default:
76 return NULL; 76 return NULL;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 1466bc4e3dd..033064b7b57 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -179,13 +179,16 @@ static void macb_handle_link_change(struct net_device *dev)
179 spin_unlock_irqrestore(&bp->lock, flags); 179 spin_unlock_irqrestore(&bp->lock, flags);
180 180
181 if (status_change) { 181 if (status_change) {
182 if (phydev->link) 182 if (phydev->link) {
183 netif_carrier_on(dev);
183 netdev_info(dev, "link up (%d/%s)\n", 184 netdev_info(dev, "link up (%d/%s)\n",
184 phydev->speed, 185 phydev->speed,
185 phydev->duplex == DUPLEX_FULL ? 186 phydev->duplex == DUPLEX_FULL ?
186 "Full" : "Half"); 187 "Full" : "Half");
187 else 188 } else {
189 netif_carrier_off(dev);
188 netdev_info(dev, "link down\n"); 190 netdev_info(dev, "link down\n");
191 }
189 } 192 }
190} 193}
191 194
@@ -1033,6 +1036,9 @@ static int macb_open(struct net_device *dev)
1033 1036
1034 netdev_dbg(bp->dev, "open\n"); 1037 netdev_dbg(bp->dev, "open\n");
1035 1038
1039 /* carrier starts down */
1040 netif_carrier_off(dev);
1041
1036 /* if the phy is not yet register, retry later*/ 1042 /* if the phy is not yet register, retry later*/
1037 if (!bp->phy_dev) 1043 if (!bp->phy_dev)
1038 return -EAGAIN; 1044 return -EAGAIN;
@@ -1406,6 +1412,8 @@ static int __init macb_probe(struct platform_device *pdev)
1406 1412
1407 platform_set_drvdata(pdev, dev); 1413 platform_set_drvdata(pdev, dev);
1408 1414
1415 netif_carrier_off(dev);
1416
1409 netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n", 1417 netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n",
1410 macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr, 1418 macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr,
1411 dev->irq, dev->dev_addr); 1419 dev->irq, dev->dev_addr);
@@ -1469,6 +1477,7 @@ static int macb_suspend(struct platform_device *pdev, pm_message_t state)
1469 struct net_device *netdev = platform_get_drvdata(pdev); 1477 struct net_device *netdev = platform_get_drvdata(pdev);
1470 struct macb *bp = netdev_priv(netdev); 1478 struct macb *bp = netdev_priv(netdev);
1471 1479
1480 netif_carrier_off(netdev);
1472 netif_device_detach(netdev); 1481 netif_device_detach(netdev);
1473 1482
1474 clk_disable(bp->hclk); 1483 clk_disable(bp->hclk);
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 11f667f6131..2b4b4f529ab 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -264,7 +264,7 @@
264#define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */ 264#define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */
265#define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */ 265#define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */
266#define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */ 266#define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */
267#define XGMAC_OMR_RTC 0x00000010 /* RX Threshhold Ctrl */ 267#define XGMAC_OMR_RTC_256 0x00000018 /* RX Threshhold Ctrl */
268#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */ 268#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */
269 269
270/* XGMAC HW Features Register */ 270/* XGMAC HW Features Register */
@@ -671,26 +671,23 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
671 671
672 p = priv->dma_rx + entry; 672 p = priv->dma_rx + entry;
673 673
674 if (priv->rx_skbuff[entry] != NULL) 674 if (priv->rx_skbuff[entry] == NULL) {
675 continue; 675 skb = __skb_dequeue(&priv->rx_recycle);
676 676 if (skb == NULL)
677 skb = __skb_dequeue(&priv->rx_recycle); 677 skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
678 if (skb == NULL) 678 if (unlikely(skb == NULL))
679 skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz); 679 break;
680 if (unlikely(skb == NULL)) 680
681 break; 681 priv->rx_skbuff[entry] = skb;
682 682 paddr = dma_map_single(priv->device, skb->data,
683 priv->rx_skbuff[entry] = skb; 683 priv->dma_buf_sz, DMA_FROM_DEVICE);
684 paddr = dma_map_single(priv->device, skb->data, 684 desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
685 priv->dma_buf_sz, DMA_FROM_DEVICE); 685 }
686 desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
687 686
688 netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n", 687 netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n",
689 priv->rx_head, priv->rx_tail); 688 priv->rx_head, priv->rx_tail);
690 689
691 priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ); 690 priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ);
692 /* Ensure descriptor is in memory before handing to h/w */
693 wmb();
694 desc_set_rx_owner(p); 691 desc_set_rx_owner(p);
695 } 692 }
696} 693}
@@ -933,6 +930,7 @@ static void xgmac_tx_err(struct xgmac_priv *priv)
933 desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ); 930 desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
934 priv->tx_tail = 0; 931 priv->tx_tail = 0;
935 priv->tx_head = 0; 932 priv->tx_head = 0;
933 writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
936 writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); 934 writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
937 935
938 writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS, 936 writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
@@ -972,7 +970,7 @@ static int xgmac_hw_init(struct net_device *dev)
972 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); 970 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
973 971
974 /* XGMAC requires AXI bus init. This is a 'magic number' for now */ 972 /* XGMAC requires AXI bus init. This is a 'magic number' for now */
975 writel(0x000100E, ioaddr + XGMAC_DMA_AXI_BUS); 973 writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
976 974
977 ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS | 975 ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS |
978 XGMAC_CONTROL_CAR; 976 XGMAC_CONTROL_CAR;
@@ -984,7 +982,8 @@ static int xgmac_hw_init(struct net_device *dev)
984 writel(value, ioaddr + XGMAC_DMA_CONTROL); 982 writel(value, ioaddr + XGMAC_DMA_CONTROL);
985 983
986 /* Set the HW DMA mode and the COE */ 984 /* Set the HW DMA mode and the COE */
987 writel(XGMAC_OMR_TSF | XGMAC_OMR_RSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA, 985 writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA |
986 XGMAC_OMR_RTC_256,
988 ioaddr + XGMAC_OMR); 987 ioaddr + XGMAC_OMR);
989 988
990 /* Reset the MMC counters */ 989 /* Reset the MMC counters */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index abb6ce7c1b7..9b0874957df 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3050,7 +3050,7 @@ static struct pci_error_handlers t3_err_handler = {
3050static void set_nqsets(struct adapter *adap) 3050static void set_nqsets(struct adapter *adap)
3051{ 3051{
3052 int i, j = 0; 3052 int i, j = 0;
3053 int num_cpus = num_online_cpus(); 3053 int num_cpus = netif_get_num_default_rss_queues();
3054 int hwports = adap->params.nports; 3054 int hwports = adap->params.nports;
3055 int nqsets = adap->msix_nvectors - 1; 3055 int nqsets = adap->msix_nvectors - 1;
3056 3056
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 65e4b280619..2dbbcbb450d 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -62,7 +62,9 @@ static const unsigned int MAX_ATIDS = 64 * 1024;
62static const unsigned int ATID_BASE = 0x10000; 62static const unsigned int ATID_BASE = 0x10000;
63 63
64static void cxgb_neigh_update(struct neighbour *neigh); 64static void cxgb_neigh_update(struct neighbour *neigh);
65static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new); 65static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
66 struct dst_entry *new, struct neighbour *new_neigh,
67 const void *daddr);
66 68
67static inline int offload_activated(struct t3cdev *tdev) 69static inline int offload_activated(struct t3cdev *tdev)
68{ 70{
@@ -575,7 +577,7 @@ static void t3_process_tid_release_list(struct work_struct *work)
575 if (!skb) { 577 if (!skb) {
576 spin_lock_bh(&td->tid_release_lock); 578 spin_lock_bh(&td->tid_release_lock);
577 p->ctx = (void *)td->tid_release_list; 579 p->ctx = (void *)td->tid_release_list;
578 td->tid_release_list = (struct t3c_tid_entry *)p; 580 td->tid_release_list = p;
579 break; 581 break;
580 } 582 }
581 mk_tid_release(skb, p - td->tid_maps.tid_tab); 583 mk_tid_release(skb, p - td->tid_maps.tid_tab);
@@ -968,8 +970,10 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
968 } 970 }
969 case (NETEVENT_REDIRECT):{ 971 case (NETEVENT_REDIRECT):{
970 struct netevent_redirect *nr = ctx; 972 struct netevent_redirect *nr = ctx;
971 cxgb_redirect(nr->old, nr->new); 973 cxgb_redirect(nr->old, nr->old_neigh,
972 cxgb_neigh_update(dst_get_neighbour_noref(nr->new)); 974 nr->new, nr->new_neigh,
975 nr->daddr);
976 cxgb_neigh_update(nr->new_neigh);
973 break; 977 break;
974 } 978 }
975 default: 979 default:
@@ -1107,10 +1111,11 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
1107 tdev->send(tdev, skb); 1111 tdev->send(tdev, skb);
1108} 1112}
1109 1113
1110static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) 1114static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
1115 struct dst_entry *new, struct neighbour *new_neigh,
1116 const void *daddr)
1111{ 1117{
1112 struct net_device *olddev, *newdev; 1118 struct net_device *olddev, *newdev;
1113 struct neighbour *n;
1114 struct tid_info *ti; 1119 struct tid_info *ti;
1115 struct t3cdev *tdev; 1120 struct t3cdev *tdev;
1116 u32 tid; 1121 u32 tid;
@@ -1118,15 +1123,8 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
1118 struct l2t_entry *e; 1123 struct l2t_entry *e;
1119 struct t3c_tid_entry *te; 1124 struct t3c_tid_entry *te;
1120 1125
1121 n = dst_get_neighbour_noref(old); 1126 olddev = old_neigh->dev;
1122 if (!n) 1127 newdev = new_neigh->dev;
1123 return;
1124 olddev = n->dev;
1125
1126 n = dst_get_neighbour_noref(new);
1127 if (!n)
1128 return;
1129 newdev = n->dev;
1130 1128
1131 if (!is_offloading(olddev)) 1129 if (!is_offloading(olddev))
1132 return; 1130 return;
@@ -1144,7 +1142,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
1144 } 1142 }
1145 1143
1146 /* Add new L2T entry */ 1144 /* Add new L2T entry */
1147 e = t3_l2t_get(tdev, new, newdev); 1145 e = t3_l2t_get(tdev, new, newdev, daddr);
1148 if (!e) { 1146 if (!e) {
1149 printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n", 1147 printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
1150 __func__); 1148 __func__);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 3fa3c8833ed..8d53438638b 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -299,7 +299,7 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
299} 299}
300 300
301struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst, 301struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
302 struct net_device *dev) 302 struct net_device *dev, const void *daddr)
303{ 303{
304 struct l2t_entry *e = NULL; 304 struct l2t_entry *e = NULL;
305 struct neighbour *neigh; 305 struct neighbour *neigh;
@@ -311,7 +311,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
311 int smt_idx; 311 int smt_idx;
312 312
313 rcu_read_lock(); 313 rcu_read_lock();
314 neigh = dst_get_neighbour_noref(dst); 314 neigh = dst_neigh_lookup(dst, daddr);
315 if (!neigh) 315 if (!neigh)
316 goto done_rcu; 316 goto done_rcu;
317 317
@@ -360,6 +360,8 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
360done_unlock: 360done_unlock:
361 write_unlock_bh(&d->lock); 361 write_unlock_bh(&d->lock);
362done_rcu: 362done_rcu:
363 if (neigh)
364 neigh_release(neigh);
363 rcu_read_unlock(); 365 rcu_read_unlock();
364 return e; 366 return e;
365} 367}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index c4e86436975..8cffcdfd567 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -110,7 +110,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb,
110void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e); 110void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
111void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh); 111void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
112struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst, 112struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
113 struct net_device *dev); 113 struct net_device *dev, const void *daddr);
114int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, 114int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
115 struct l2t_entry *e); 115 struct l2t_entry *e);
116void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e); 116void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index cfb60e1f51d..dd901c5061b 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -2877,7 +2877,7 @@ static void sge_timer_tx(unsigned long data)
2877 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period); 2877 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2878} 2878}
2879 2879
2880/* 2880/**
2881 * sge_timer_rx - perform periodic maintenance of an SGE qset 2881 * sge_timer_rx - perform periodic maintenance of an SGE qset
2882 * @data: the SGE queue set to maintain 2882 * @data: the SGE queue set to maintain
2883 * 2883 *
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index 44ac2f40b64..bff8a3cdd3d 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -1076,7 +1076,7 @@ static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1076 return 0; 1076 return 0;
1077} 1077}
1078 1078
1079/* 1079/**
1080 * t3_load_fw - download firmware 1080 * t3_load_fw - download firmware
1081 * @adapter: the adapter 1081 * @adapter: the adapter
1082 * @fw_data: the firmware image to write 1082 * @fw_data: the firmware image to write
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index e1f96fbb48c..5ed49af23d6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3493,8 +3493,8 @@ static void __devinit cfg_queues(struct adapter *adap)
3493 */ 3493 */
3494 if (n10g) 3494 if (n10g)
3495 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; 3495 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
3496 if (q10g > num_online_cpus()) 3496 if (q10g > netif_get_num_default_rss_queues())
3497 q10g = num_online_cpus(); 3497 q10g = netif_get_num_default_rss_queues();
3498 3498
3499 for_each_port(adap, i) { 3499 for_each_port(adap, i) {
3500 struct port_info *pi = adap2pinfo(adap, i); 3500 struct port_info *pi = adap2pinfo(adap, i);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index e111d974afd..8596acaa402 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -753,7 +753,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
753 end = (void *)q->desc + part1; 753 end = (void *)q->desc + part1;
754 } 754 }
755 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ 755 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
756 *(u64 *)end = 0; 756 *end = 0;
757} 757}
758 758
759/** 759/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 32e1dd566a1..fa947dfa4c3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2010,7 +2010,7 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2010 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2010 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2011} 2011}
2012 2012
2013/* 2013/**
2014 * t4_mem_win_read_len - read memory through PCIE memory window 2014 * t4_mem_win_read_len - read memory through PCIE memory window
2015 * @adap: the adapter 2015 * @adap: the adapter
2016 * @addr: address of first byte requested aligned on 32b. 2016 * @addr: address of first byte requested aligned on 32b.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 25e3308fc9d..9dad56101e2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -418,7 +418,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
418 * restart a TX Ethernet Queue which was stopped for lack of 418 * restart a TX Ethernet Queue which was stopped for lack of
419 * free TX Queue Descriptors ... 419 * free TX Queue Descriptors ...
420 */ 420 */
421 const struct cpl_sge_egr_update *p = (void *)cpl; 421 const struct cpl_sge_egr_update *p = cpl;
422 unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid)); 422 unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid));
423 struct sge *s = &adapter->sge; 423 struct sge *s = &adapter->sge;
424 struct sge_txq *tq; 424 struct sge_txq *tq;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 0bd585bba39..f2d1ecdcaf9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -934,7 +934,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
934 end = (void *)tq->desc + part1; 934 end = (void *)tq->desc + part1;
935 } 935 }
936 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ 936 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
937 *(u64 *)end = 0; 937 *end = 0;
938} 938}
939 939
940/** 940/**
@@ -1323,8 +1323,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1323 */ 1323 */
1324 if (unlikely((void *)sgl == (void *)tq->stat)) { 1324 if (unlikely((void *)sgl == (void *)tq->stat)) {
1325 sgl = (void *)tq->desc; 1325 sgl = (void *)tq->desc;
1326 end = (void *)((void *)tq->desc + 1326 end = ((void *)tq->desc + ((void *)end - (void *)tq->stat));
1327 ((void *)end - (void *)tq->stat));
1328 } 1327 }
1329 1328
1330 write_sgl(skb, tq, sgl, end, 0, addr); 1329 write_sgl(skb, tq, sgl, end, 0, addr);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 8132c785cea..ad1468b3ab9 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1300,8 +1300,6 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1300 skb->ip_summed = CHECKSUM_COMPLETE; 1300 skb->ip_summed = CHECKSUM_COMPLETE;
1301 } 1301 }
1302 1302
1303 skb->dev = netdev;
1304
1305 if (vlan_stripped) 1303 if (vlan_stripped)
1306 __vlan_hwaccel_put_tag(skb, vlan_tci); 1304 __vlan_hwaccel_put_tag(skb, vlan_tci);
1307 1305
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index d3cd489d11a..f879e922484 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -3973,7 +3973,7 @@ DevicePresent(struct net_device *dev, u_long aprom_addr)
3973 tmp = srom_rd(aprom_addr, i); 3973 tmp = srom_rd(aprom_addr, i);
3974 *p++ = cpu_to_le16(tmp); 3974 *p++ = cpu_to_le16(tmp);
3975 } 3975 }
3976 de4x5_dbg_srom((struct de4x5_srom *)&lp->srom); 3976 de4x5_dbg_srom(&lp->srom);
3977 } 3977 }
3978} 3978}
3979 3979
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index c5c4c0e83bd..0490a04ca0b 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
34#include "be_hw.h" 34#include "be_hw.h"
35#include "be_roce.h" 35#include "be_roce.h"
36 36
37#define DRV_VER "4.2.220u" 37#define DRV_VER "4.2.248.0u"
38#define DRV_NAME "be2net" 38#define DRV_NAME "be2net"
39#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 39#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
40#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" 40#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
@@ -573,6 +573,11 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
573 return val; 573 return val;
574} 574}
575 575
576static inline bool is_ipv4_pkt(struct sk_buff *skb)
577{
578 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
579}
580
576static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac) 581static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
577{ 582{
578 u32 addr; 583 u32 addr;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 921c2082af4..5eab791b716 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1132,7 +1132,7 @@ err:
1132 * Uses MCCQ 1132 * Uses MCCQ
1133 */ 1133 */
1134int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, 1134int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1135 u8 *mac, u32 *if_handle, u32 *pmac_id, u32 domain) 1135 u32 *if_handle, u32 domain)
1136{ 1136{
1137 struct be_mcc_wrb *wrb; 1137 struct be_mcc_wrb *wrb;
1138 struct be_cmd_req_if_create *req; 1138 struct be_cmd_req_if_create *req;
@@ -1152,17 +1152,13 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1152 req->hdr.domain = domain; 1152 req->hdr.domain = domain;
1153 req->capability_flags = cpu_to_le32(cap_flags); 1153 req->capability_flags = cpu_to_le32(cap_flags);
1154 req->enable_flags = cpu_to_le32(en_flags); 1154 req->enable_flags = cpu_to_le32(en_flags);
1155 if (mac) 1155
1156 memcpy(req->mac_addr, mac, ETH_ALEN); 1156 req->pmac_invalid = true;
1157 else
1158 req->pmac_invalid = true;
1159 1157
1160 status = be_mcc_notify_wait(adapter); 1158 status = be_mcc_notify_wait(adapter);
1161 if (!status) { 1159 if (!status) {
1162 struct be_cmd_resp_if_create *resp = embedded_payload(wrb); 1160 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1163 *if_handle = le32_to_cpu(resp->interface_id); 1161 *if_handle = le32_to_cpu(resp->interface_id);
1164 if (mac)
1165 *pmac_id = le32_to_cpu(resp->pmac_id);
1166 } 1162 }
1167 1163
1168err: 1164err:
@@ -2330,8 +2326,8 @@ err:
2330} 2326}
2331 2327
2332/* Uses synchronous MCCQ */ 2328/* Uses synchronous MCCQ */
2333int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain, 2329int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2334 bool *pmac_id_active, u32 *pmac_id, u8 *mac) 2330 bool *pmac_id_active, u32 *pmac_id, u8 domain)
2335{ 2331{
2336 struct be_mcc_wrb *wrb; 2332 struct be_mcc_wrb *wrb;
2337 struct be_cmd_req_get_mac_list *req; 2333 struct be_cmd_req_get_mac_list *req;
@@ -2376,8 +2372,9 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
2376 get_mac_list_cmd.va; 2372 get_mac_list_cmd.va;
2377 mac_count = resp->true_mac_count + resp->pseudo_mac_count; 2373 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2378 /* Mac list returned could contain one or more active mac_ids 2374 /* Mac list returned could contain one or more active mac_ids
2379 * or one or more pseudo permanant mac addresses. If an active 2375 * or one or more true or pseudo permanant mac addresses.
2380 * mac_id is present, return first active mac_id found 2376 * If an active mac_id is present, return first active mac_id
2377 * found.
2381 */ 2378 */
2382 for (i = 0; i < mac_count; i++) { 2379 for (i = 0; i < mac_count; i++) {
2383 struct get_list_macaddr *mac_entry; 2380 struct get_list_macaddr *mac_entry;
@@ -2396,7 +2393,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
2396 goto out; 2393 goto out;
2397 } 2394 }
2398 } 2395 }
2399 /* If no active mac_id found, return first pseudo mac addr */ 2396 /* If no active mac_id found, return first mac addr */
2400 *pmac_id_active = false; 2397 *pmac_id_active = false;
2401 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, 2398 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2402 ETH_ALEN); 2399 ETH_ALEN);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index b3f3fc3d132..3c938f55c00 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1664,8 +1664,7 @@ extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
1664extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, 1664extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
1665 int pmac_id, u32 domain); 1665 int pmac_id, u32 domain);
1666extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, 1666extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
1667 u32 en_flags, u8 *mac, u32 *if_handle, u32 *pmac_id, 1667 u32 en_flags, u32 *if_handle, u32 domain);
1668 u32 domain);
1669extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle, 1668extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
1670 u32 domain); 1669 u32 domain);
1671extern int be_cmd_eq_create(struct be_adapter *adapter, 1670extern int be_cmd_eq_create(struct be_adapter *adapter,
@@ -1751,8 +1750,9 @@ extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
1751extern int be_cmd_req_native_mode(struct be_adapter *adapter); 1750extern int be_cmd_req_native_mode(struct be_adapter *adapter);
1752extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size); 1751extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
1753extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); 1752extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
1754extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain, 1753extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
1755 bool *pmac_id_active, u32 *pmac_id, u8 *mac); 1754 bool *pmac_id_active, u32 *pmac_id,
1755 u8 domain);
1756extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, 1756extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
1757 u8 mac_count, u32 domain); 1757 u8 mac_count, u32 domain);
1758extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, 1758extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index d9fb0c501fa..7c8a710eac2 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -58,8 +58,6 @@
58 58
59#define SLI_PORT_CONTROL_IP_MASK 0x08000000 59#define SLI_PORT_CONTROL_IP_MASK 0x08000000
60 60
61#define PCICFG_CUST_SCRATCHPAD_CSR 0x1EC
62
63/********* Memory BAR register ************/ 61/********* Memory BAR register ************/
64#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc 62#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
65/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt 63/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 501dfa9c88e..2141bd78475 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -558,6 +558,7 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
558 wrb->frag_pa_hi = upper_32_bits(addr); 558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF; 559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK; 560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561 wrb->rsvd0 = 0;
561} 562}
562 563
563static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter, 564static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
@@ -576,6 +577,11 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
576 return vlan_tag; 577 return vlan_tag;
577} 578}
578 579
580static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
581{
582 return vlan_tx_tag_present(skb) || adapter->pvid;
583}
584
579static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, 585static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
580 struct sk_buff *skb, u32 wrb_cnt, u32 len) 586 struct sk_buff *skb, u32 wrb_cnt, u32 len)
581{ 587{
@@ -703,33 +709,56 @@ dma_err:
703 return 0; 709 return 0;
704} 710}
705 711
712static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
713 struct sk_buff *skb)
714{
715 u16 vlan_tag = 0;
716
717 skb = skb_share_check(skb, GFP_ATOMIC);
718 if (unlikely(!skb))
719 return skb;
720
721 if (vlan_tx_tag_present(skb)) {
722 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
723 __vlan_put_tag(skb, vlan_tag);
724 skb->vlan_tci = 0;
725 }
726
727 return skb;
728}
729
706static netdev_tx_t be_xmit(struct sk_buff *skb, 730static netdev_tx_t be_xmit(struct sk_buff *skb,
707 struct net_device *netdev) 731 struct net_device *netdev)
708{ 732{
709 struct be_adapter *adapter = netdev_priv(netdev); 733 struct be_adapter *adapter = netdev_priv(netdev);
710 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)]; 734 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
711 struct be_queue_info *txq = &txo->q; 735 struct be_queue_info *txq = &txo->q;
736 struct iphdr *ip = NULL;
712 u32 wrb_cnt = 0, copied = 0; 737 u32 wrb_cnt = 0, copied = 0;
713 u32 start = txq->head; 738 u32 start = txq->head, eth_hdr_len;
714 bool dummy_wrb, stopped = false; 739 bool dummy_wrb, stopped = false;
715 740
716 /* For vlan tagged pkts, BE 741 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
717 * 1) calculates checksum even when CSO is not requested 742 VLAN_ETH_HLEN : ETH_HLEN;
718 * 2) calculates checksum wrongly for padded pkt less than 743
719 * 60 bytes long. 744 /* HW has a bug which considers padding bytes as legal
720 * As a workaround disable TX vlan offloading in such cases. 745 * and modifies the IPv4 hdr's 'tot_len' field
721 */ 746 */
722 if (unlikely(vlan_tx_tag_present(skb) && 747 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
723 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) { 748 is_ipv4_pkt(skb)) {
724 skb = skb_share_check(skb, GFP_ATOMIC); 749 ip = (struct iphdr *)ip_hdr(skb);
725 if (unlikely(!skb)) 750 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
726 goto tx_drop; 751 }
727 752
728 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb)); 753 /* HW has a bug wherein it will calculate CSUM for VLAN
754 * pkts even though it is disabled.
755 * Manually insert VLAN in pkt.
756 */
757 if (skb->ip_summed != CHECKSUM_PARTIAL &&
758 be_vlan_tag_chk(adapter, skb)) {
759 skb = be_insert_vlan_in_pkt(adapter, skb);
729 if (unlikely(!skb)) 760 if (unlikely(!skb))
730 goto tx_drop; 761 goto tx_drop;
731
732 skb->vlan_tci = 0;
733 } 762 }
734 763
735 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb); 764 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
@@ -786,19 +815,12 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
786 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE. 815 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
787 * If the user configures more, place BE in vlan promiscuous mode. 816 * If the user configures more, place BE in vlan promiscuous mode.
788 */ 817 */
789static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num) 818static int be_vid_config(struct be_adapter *adapter)
790{ 819{
791 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num]; 820 u16 vids[BE_NUM_VLANS_SUPPORTED];
792 u16 vtag[BE_NUM_VLANS_SUPPORTED]; 821 u16 num = 0, i;
793 u16 ntags = 0, i;
794 int status = 0; 822 int status = 0;
795 823
796 if (vf) {
797 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
798 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
799 1, 1, 0);
800 }
801
802 /* No need to further configure vids if in promiscuous mode */ 824 /* No need to further configure vids if in promiscuous mode */
803 if (adapter->promiscuous) 825 if (adapter->promiscuous)
804 return 0; 826 return 0;
@@ -809,10 +831,10 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
809 /* Construct VLAN Table to give to HW */ 831 /* Construct VLAN Table to give to HW */
810 for (i = 0; i < VLAN_N_VID; i++) 832 for (i = 0; i < VLAN_N_VID; i++)
811 if (adapter->vlan_tag[i]) 833 if (adapter->vlan_tag[i])
812 vtag[ntags++] = cpu_to_le16(i); 834 vids[num++] = cpu_to_le16(i);
813 835
814 status = be_cmd_vlan_config(adapter, adapter->if_handle, 836 status = be_cmd_vlan_config(adapter, adapter->if_handle,
815 vtag, ntags, 1, 0); 837 vids, num, 1, 0);
816 838
817 /* Set to VLAN promisc mode as setting VLAN filter failed */ 839 /* Set to VLAN promisc mode as setting VLAN filter failed */
818 if (status) { 840 if (status) {
@@ -841,7 +863,7 @@ static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
841 863
842 adapter->vlan_tag[vid] = 1; 864 adapter->vlan_tag[vid] = 1;
843 if (adapter->vlans_added <= (adapter->max_vlans + 1)) 865 if (adapter->vlans_added <= (adapter->max_vlans + 1))
844 status = be_vid_config(adapter, false, 0); 866 status = be_vid_config(adapter);
845 867
846 if (!status) 868 if (!status)
847 adapter->vlans_added++; 869 adapter->vlans_added++;
@@ -863,7 +885,7 @@ static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
863 885
864 adapter->vlan_tag[vid] = 0; 886 adapter->vlan_tag[vid] = 0;
865 if (adapter->vlans_added <= adapter->max_vlans) 887 if (adapter->vlans_added <= adapter->max_vlans)
866 status = be_vid_config(adapter, false, 0); 888 status = be_vid_config(adapter);
867 889
868 if (!status) 890 if (!status)
869 adapter->vlans_added--; 891 adapter->vlans_added--;
@@ -890,7 +912,7 @@ static void be_set_rx_mode(struct net_device *netdev)
890 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF); 912 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
891 913
892 if (adapter->vlans_added) 914 if (adapter->vlans_added)
893 be_vid_config(adapter, false, 0); 915 be_vid_config(adapter);
894 } 916 }
895 917
896 /* Enable multicast promisc if num configured exceeds what we support */ 918 /* Enable multicast promisc if num configured exceeds what we support */
@@ -1057,13 +1079,16 @@ static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1057 u16 offset, stride; 1079 u16 offset, stride;
1058 1080
1059 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 1081 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1082 if (!pos)
1083 return 0;
1060 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset); 1084 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1061 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride); 1085 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1062 1086
1063 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL); 1087 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1064 while (dev) { 1088 while (dev) {
1065 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF; 1089 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1066 if (dev->is_virtfn && dev->devfn == vf_fn) { 1090 if (dev->is_virtfn && dev->devfn == vf_fn &&
1091 dev->bus->number == pdev->bus->number) {
1067 vfs++; 1092 vfs++;
1068 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) 1093 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1069 assigned_vfs++; 1094 assigned_vfs++;
@@ -1898,6 +1923,12 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
1898 */ 1923 */
1899 adapter->num_rx_qs = (num_irqs(adapter) > 1) ? 1924 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1900 num_irqs(adapter) + 1 : 1; 1925 num_irqs(adapter) + 1 : 1;
1926 if (adapter->num_rx_qs != MAX_RX_QS) {
1927 rtnl_lock();
1928 netif_set_real_num_rx_queues(adapter->netdev,
1929 adapter->num_rx_qs);
1930 rtnl_unlock();
1931 }
1901 1932
1902 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; 1933 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1903 for_all_rx_queues(adapter, rxo, i) { 1934 for_all_rx_queues(adapter, rxo, i) {
@@ -2141,12 +2172,14 @@ static void be_msix_disable(struct be_adapter *adapter)
2141 2172
2142static uint be_num_rss_want(struct be_adapter *adapter) 2173static uint be_num_rss_want(struct be_adapter *adapter)
2143{ 2174{
2175 u32 num = 0;
2144 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && 2176 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2145 !sriov_want(adapter) && be_physfn(adapter) && 2177 !sriov_want(adapter) && be_physfn(adapter) &&
2146 !be_is_mc(adapter)) 2178 !be_is_mc(adapter)) {
2147 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; 2179 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2148 else 2180 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2149 return 0; 2181 }
2182 return num;
2150} 2183}
2151 2184
2152static void be_msix_enable(struct be_adapter *adapter) 2185static void be_msix_enable(struct be_adapter *adapter)
@@ -2544,7 +2577,6 @@ static int be_clear(struct be_adapter *adapter)
2544 be_cmd_fw_clean(adapter); 2577 be_cmd_fw_clean(adapter);
2545 2578
2546 be_msix_disable(adapter); 2579 be_msix_disable(adapter);
2547 pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 0);
2548 return 0; 2580 return 0;
2549} 2581}
2550 2582
@@ -2602,8 +2634,8 @@ static int be_vf_setup(struct be_adapter *adapter)
2602 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 2634 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2603 BE_IF_FLAGS_MULTICAST; 2635 BE_IF_FLAGS_MULTICAST;
2604 for_all_vfs(adapter, vf_cfg, vf) { 2636 for_all_vfs(adapter, vf_cfg, vf) {
2605 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL, 2637 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2606 &vf_cfg->if_handle, NULL, vf + 1); 2638 &vf_cfg->if_handle, vf + 1);
2607 if (status) 2639 if (status)
2608 goto err; 2640 goto err;
2609 } 2641 }
@@ -2643,29 +2675,43 @@ static void be_setup_init(struct be_adapter *adapter)
2643 adapter->phy.forced_port_speed = -1; 2675 adapter->phy.forced_port_speed = -1;
2644} 2676}
2645 2677
2646static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac) 2678static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2679 bool *active_mac, u32 *pmac_id)
2647{ 2680{
2648 u32 pmac_id; 2681 int status = 0;
2649 int status;
2650 bool pmac_id_active;
2651 2682
2652 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active, 2683 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2653 &pmac_id, mac); 2684 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2654 if (status != 0) 2685 if (!lancer_chip(adapter) && !be_physfn(adapter))
2655 goto do_none; 2686 *active_mac = true;
2687 else
2688 *active_mac = false;
2656 2689
2657 if (pmac_id_active) { 2690 return status;
2658 status = be_cmd_mac_addr_query(adapter, mac, 2691 }
2659 MAC_ADDRESS_TYPE_NETWORK,
2660 false, adapter->if_handle, pmac_id);
2661 2692
2662 if (!status) 2693 if (lancer_chip(adapter)) {
2663 adapter->pmac_id[0] = pmac_id; 2694 status = be_cmd_get_mac_from_list(adapter, mac,
2695 active_mac, pmac_id, 0);
2696 if (*active_mac) {
2697 status = be_cmd_mac_addr_query(adapter, mac,
2698 MAC_ADDRESS_TYPE_NETWORK,
2699 false, if_handle,
2700 *pmac_id);
2701 }
2702 } else if (be_physfn(adapter)) {
2703 /* For BE3, for PF get permanent MAC */
2704 status = be_cmd_mac_addr_query(adapter, mac,
2705 MAC_ADDRESS_TYPE_NETWORK, true,
2706 0, 0);
2707 *active_mac = false;
2664 } else { 2708 } else {
2665 status = be_cmd_pmac_add(adapter, mac, 2709 /* For BE3, for VF get soft MAC assigned by PF*/
2666 adapter->if_handle, &adapter->pmac_id[0], 0); 2710 status = be_cmd_mac_addr_query(adapter, mac,
2711 MAC_ADDRESS_TYPE_NETWORK, false,
2712 if_handle, 0);
2713 *active_mac = true;
2667 } 2714 }
2668do_none:
2669 return status; 2715 return status;
2670} 2716}
2671 2717
@@ -2686,12 +2732,12 @@ static int be_get_config(struct be_adapter *adapter)
2686 2732
2687static int be_setup(struct be_adapter *adapter) 2733static int be_setup(struct be_adapter *adapter)
2688{ 2734{
2689 struct net_device *netdev = adapter->netdev;
2690 struct device *dev = &adapter->pdev->dev; 2735 struct device *dev = &adapter->pdev->dev;
2691 u32 cap_flags, en_flags; 2736 u32 cap_flags, en_flags;
2692 u32 tx_fc, rx_fc; 2737 u32 tx_fc, rx_fc;
2693 int status; 2738 int status;
2694 u8 mac[ETH_ALEN]; 2739 u8 mac[ETH_ALEN];
2740 bool active_mac;
2695 2741
2696 be_setup_init(adapter); 2742 be_setup_init(adapter);
2697 2743
@@ -2717,14 +2763,6 @@ static int be_setup(struct be_adapter *adapter)
2717 if (status) 2763 if (status)
2718 goto err; 2764 goto err;
2719 2765
2720 memset(mac, 0, ETH_ALEN);
2721 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2722 true /*permanent */, 0, 0);
2723 if (status)
2724 return status;
2725 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2726 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2727
2728 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 2766 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2729 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS; 2767 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2730 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS | 2768 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
@@ -2734,27 +2772,29 @@ static int be_setup(struct be_adapter *adapter)
2734 cap_flags |= BE_IF_FLAGS_RSS; 2772 cap_flags |= BE_IF_FLAGS_RSS;
2735 en_flags |= BE_IF_FLAGS_RSS; 2773 en_flags |= BE_IF_FLAGS_RSS;
2736 } 2774 }
2775
2737 status = be_cmd_if_create(adapter, cap_flags, en_flags, 2776 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2738 netdev->dev_addr, &adapter->if_handle, 2777 &adapter->if_handle, 0);
2739 &adapter->pmac_id[0], 0);
2740 if (status != 0) 2778 if (status != 0)
2741 goto err; 2779 goto err;
2742 2780
2743 /* The VF's permanent mac queried from card is incorrect. 2781 memset(mac, 0, ETH_ALEN);
2744 * For BEx: Query the mac configued by the PF using if_handle 2782 active_mac = false;
2745 * For Lancer: Get and use mac_list to obtain mac address. 2783 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2746 */ 2784 &active_mac, &adapter->pmac_id[0]);
2747 if (!be_physfn(adapter)) { 2785 if (status != 0)
2748 if (lancer_chip(adapter)) 2786 goto err;
2749 status = be_add_mac_from_list(adapter, mac); 2787
2750 else 2788 if (!active_mac) {
2751 status = be_cmd_mac_addr_query(adapter, mac, 2789 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2752 MAC_ADDRESS_TYPE_NETWORK, false, 2790 &adapter->pmac_id[0], 0);
2753 adapter->if_handle, 0); 2791 if (status != 0)
2754 if (!status) { 2792 goto err;
2755 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); 2793 }
2756 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); 2794
2757 } 2795 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2796 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2797 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2758 } 2798 }
2759 2799
2760 status = be_tx_qs_create(adapter); 2800 status = be_tx_qs_create(adapter);
@@ -2763,7 +2803,8 @@ static int be_setup(struct be_adapter *adapter)
2763 2803
2764 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL); 2804 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2765 2805
2766 be_vid_config(adapter, false, 0); 2806 if (adapter->vlans_added)
2807 be_vid_config(adapter);
2767 2808
2768 be_set_rx_mode(adapter->netdev); 2809 be_set_rx_mode(adapter->netdev);
2769 2810
@@ -2773,8 +2814,6 @@ static int be_setup(struct be_adapter *adapter)
2773 be_cmd_set_flow_control(adapter, adapter->tx_fc, 2814 be_cmd_set_flow_control(adapter, adapter->tx_fc,
2774 adapter->rx_fc); 2815 adapter->rx_fc);
2775 2816
2776 pcie_set_readrq(adapter->pdev, 4096);
2777
2778 if (be_physfn(adapter) && num_vfs) { 2817 if (be_physfn(adapter) && num_vfs) {
2779 if (adapter->dev_num_vfs) 2818 if (adapter->dev_num_vfs)
2780 be_vf_setup(adapter); 2819 be_vf_setup(adapter);
@@ -2788,8 +2827,6 @@ static int be_setup(struct be_adapter *adapter)
2788 2827
2789 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 2828 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2790 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED; 2829 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2791
2792 pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 1);
2793 return 0; 2830 return 0;
2794err: 2831err:
2795 be_clear(adapter); 2832 be_clear(adapter);
@@ -3727,10 +3764,7 @@ reschedule:
3727 3764
3728static bool be_reset_required(struct be_adapter *adapter) 3765static bool be_reset_required(struct be_adapter *adapter)
3729{ 3766{
3730 u32 reg; 3767 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
3731
3732 pci_read_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, &reg);
3733 return reg;
3734} 3768}
3735 3769
3736static int __devinit be_probe(struct pci_dev *pdev, 3770static int __devinit be_probe(struct pci_dev *pdev,
@@ -3749,7 +3783,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
3749 goto disable_dev; 3783 goto disable_dev;
3750 pci_set_master(pdev); 3784 pci_set_master(pdev);
3751 3785
3752 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS); 3786 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
3753 if (netdev == NULL) { 3787 if (netdev == NULL) {
3754 status = -ENOMEM; 3788 status = -ENOMEM;
3755 goto rel_reg; 3789 goto rel_reg;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index a3816781054..20297881f8e 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -902,7 +902,7 @@ static const struct net_device_ops ethoc_netdev_ops = {
902}; 902};
903 903
904/** 904/**
905 * ethoc_probe() - initialize OpenCores ethernet MAC 905 * ethoc_probe - initialize OpenCores ethernet MAC
906 * pdev: platform device 906 * pdev: platform device
907 */ 907 */
908static int __devinit ethoc_probe(struct platform_device *pdev) 908static int __devinit ethoc_probe(struct platform_device *pdev)
@@ -1140,7 +1140,7 @@ out:
1140} 1140}
1141 1141
1142/** 1142/**
1143 * ethoc_remove() - shutdown OpenCores ethernet MAC 1143 * ethoc_remove - shutdown OpenCores ethernet MAC
1144 * @pdev: platform device 1144 * @pdev: platform device
1145 */ 1145 */
1146static int __devexit ethoc_remove(struct platform_device *pdev) 1146static int __devexit ethoc_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index ff7f4c5115a..fffd20528b5 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -49,6 +49,7 @@
49#include <linux/of_gpio.h> 49#include <linux/of_gpio.h>
50#include <linux/of_net.h> 50#include <linux/of_net.h>
51#include <linux/pinctrl/consumer.h> 51#include <linux/pinctrl/consumer.h>
52#include <linux/regulator/consumer.h>
52 53
53#include <asm/cacheflush.h> 54#include <asm/cacheflush.h>
54 55
@@ -1388,8 +1389,8 @@ fec_set_mac_address(struct net_device *ndev, void *p)
1388} 1389}
1389 1390
1390#ifdef CONFIG_NET_POLL_CONTROLLER 1391#ifdef CONFIG_NET_POLL_CONTROLLER
1391/* 1392/**
1392 * fec_poll_controller: FEC Poll controller function 1393 * fec_poll_controller - FEC Poll controller function
1393 * @dev: The FEC network adapter 1394 * @dev: The FEC network adapter
1394 * 1395 *
1395 * Polled functionality used by netconsole and others in non interrupt mode 1396 * Polled functionality used by netconsole and others in non interrupt mode
@@ -1506,18 +1507,25 @@ static int __devinit fec_get_phy_mode_dt(struct platform_device *pdev)
1506static void __devinit fec_reset_phy(struct platform_device *pdev) 1507static void __devinit fec_reset_phy(struct platform_device *pdev)
1507{ 1508{
1508 int err, phy_reset; 1509 int err, phy_reset;
1510 int msec = 1;
1509 struct device_node *np = pdev->dev.of_node; 1511 struct device_node *np = pdev->dev.of_node;
1510 1512
1511 if (!np) 1513 if (!np)
1512 return; 1514 return;
1513 1515
1516 of_property_read_u32(np, "phy-reset-duration", &msec);
1517 /* A sane reset duration should not be longer than 1s */
1518 if (msec > 1000)
1519 msec = 1;
1520
1514 phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0); 1521 phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
1515 err = gpio_request_one(phy_reset, GPIOF_OUT_INIT_LOW, "phy-reset"); 1522 err = devm_gpio_request_one(&pdev->dev, phy_reset,
1523 GPIOF_OUT_INIT_LOW, "phy-reset");
1516 if (err) { 1524 if (err) {
1517 pr_debug("FEC: failed to get gpio phy-reset: %d\n", err); 1525 pr_debug("FEC: failed to get gpio phy-reset: %d\n", err);
1518 return; 1526 return;
1519 } 1527 }
1520 msleep(1); 1528 msleep(msec);
1521 gpio_set_value(phy_reset, 1); 1529 gpio_set_value(phy_reset, 1);
1522} 1530}
1523#else /* CONFIG_OF */ 1531#else /* CONFIG_OF */
@@ -1546,6 +1554,7 @@ fec_probe(struct platform_device *pdev)
1546 const struct of_device_id *of_id; 1554 const struct of_device_id *of_id;
1547 static int dev_id; 1555 static int dev_id;
1548 struct pinctrl *pinctrl; 1556 struct pinctrl *pinctrl;
1557 struct regulator *reg_phy;
1549 1558
1550 of_id = of_match_device(fec_dt_ids, &pdev->dev); 1559 of_id = of_match_device(fec_dt_ids, &pdev->dev);
1551 if (of_id) 1560 if (of_id)
@@ -1593,8 +1602,6 @@ fec_probe(struct platform_device *pdev)
1593 fep->phy_interface = ret; 1602 fep->phy_interface = ret;
1594 } 1603 }
1595 1604
1596 fec_reset_phy(pdev);
1597
1598 for (i = 0; i < FEC_IRQ_NUM; i++) { 1605 for (i = 0; i < FEC_IRQ_NUM; i++) {
1599 irq = platform_get_irq(pdev, i); 1606 irq = platform_get_irq(pdev, i);
1600 if (irq < 0) { 1607 if (irq < 0) {
@@ -1634,6 +1641,18 @@ fec_probe(struct platform_device *pdev)
1634 clk_prepare_enable(fep->clk_ahb); 1641 clk_prepare_enable(fep->clk_ahb);
1635 clk_prepare_enable(fep->clk_ipg); 1642 clk_prepare_enable(fep->clk_ipg);
1636 1643
1644 reg_phy = devm_regulator_get(&pdev->dev, "phy");
1645 if (!IS_ERR(reg_phy)) {
1646 ret = regulator_enable(reg_phy);
1647 if (ret) {
1648 dev_err(&pdev->dev,
1649 "Failed to enable phy regulator: %d\n", ret);
1650 goto failed_regulator;
1651 }
1652 }
1653
1654 fec_reset_phy(pdev);
1655
1637 ret = fec_enet_init(ndev); 1656 ret = fec_enet_init(ndev);
1638 if (ret) 1657 if (ret)
1639 goto failed_init; 1658 goto failed_init;
@@ -1655,6 +1674,7 @@ failed_register:
1655 fec_enet_mii_remove(fep); 1674 fec_enet_mii_remove(fep);
1656failed_mii_init: 1675failed_mii_init:
1657failed_init: 1676failed_init:
1677failed_regulator:
1658 clk_disable_unprepare(fep->clk_ahb); 1678 clk_disable_unprepare(fep->clk_ahb);
1659 clk_disable_unprepare(fep->clk_ipg); 1679 clk_disable_unprepare(fep->clk_ipg);
1660failed_pin: 1680failed_pin:
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index f7f0bf5d037..9527b28d70d 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -47,6 +47,9 @@
47#include "gianfar.h" 47#include "gianfar.h"
48#include "fsl_pq_mdio.h" 48#include "fsl_pq_mdio.h"
49 49
50/* Number of microseconds to wait for an MII register to respond */
51#define MII_TIMEOUT 1000
52
50struct fsl_pq_mdio_priv { 53struct fsl_pq_mdio_priv {
51 void __iomem *map; 54 void __iomem *map;
52 struct fsl_pq_mdio __iomem *regs; 55 struct fsl_pq_mdio __iomem *regs;
@@ -64,6 +67,8 @@ struct fsl_pq_mdio_priv {
64int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id, 67int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
65 int regnum, u16 value) 68 int regnum, u16 value)
66{ 69{
70 u32 status;
71
67 /* Set the PHY address and the register address we want to write */ 72 /* Set the PHY address and the register address we want to write */
68 out_be32(&regs->miimadd, (mii_id << 8) | regnum); 73 out_be32(&regs->miimadd, (mii_id << 8) | regnum);
69 74
@@ -71,10 +76,10 @@ int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
71 out_be32(&regs->miimcon, value); 76 out_be32(&regs->miimcon, value);
72 77
73 /* Wait for the transaction to finish */ 78 /* Wait for the transaction to finish */
74 while (in_be32(&regs->miimind) & MIIMIND_BUSY) 79 status = spin_event_timeout(!(in_be32(&regs->miimind) & MIIMIND_BUSY),
75 cpu_relax(); 80 MII_TIMEOUT, 0);
76 81
77 return 0; 82 return status ? 0 : -ETIMEDOUT;
78} 83}
79 84
80/* 85/*
@@ -91,6 +96,7 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
91 int mii_id, int regnum) 96 int mii_id, int regnum)
92{ 97{
93 u16 value; 98 u16 value;
99 u32 status;
94 100
95 /* Set the PHY address and the register address we want to read */ 101 /* Set the PHY address and the register address we want to read */
96 out_be32(&regs->miimadd, (mii_id << 8) | regnum); 102 out_be32(&regs->miimadd, (mii_id << 8) | regnum);
@@ -99,9 +105,12 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
99 out_be32(&regs->miimcom, 0); 105 out_be32(&regs->miimcom, 0);
100 out_be32(&regs->miimcom, MII_READ_COMMAND); 106 out_be32(&regs->miimcom, MII_READ_COMMAND);
101 107
102 /* Wait for the transaction to finish */ 108 /* Wait for the transaction to finish, normally less than 100us */
103 while (in_be32(&regs->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY)) 109 status = spin_event_timeout(!(in_be32(&regs->miimind) &
104 cpu_relax(); 110 (MIIMIND_NOTVALID | MIIMIND_BUSY)),
111 MII_TIMEOUT, 0);
112 if (!status)
113 return -ETIMEDOUT;
105 114
106 /* Grab the value of the register from miimstat */ 115 /* Grab the value of the register from miimstat */
107 value = in_be32(&regs->miimstat); 116 value = in_be32(&regs->miimstat);
@@ -144,7 +153,7 @@ int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
144static int fsl_pq_mdio_reset(struct mii_bus *bus) 153static int fsl_pq_mdio_reset(struct mii_bus *bus)
145{ 154{
146 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus); 155 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
147 int timeout = PHY_INIT_TIMEOUT; 156 u32 status;
148 157
149 mutex_lock(&bus->mdio_lock); 158 mutex_lock(&bus->mdio_lock);
150 159
@@ -155,12 +164,12 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
155 out_be32(&regs->miimcfg, MIIMCFG_INIT_VALUE); 164 out_be32(&regs->miimcfg, MIIMCFG_INIT_VALUE);
156 165
157 /* Wait until the bus is free */ 166 /* Wait until the bus is free */
158 while ((in_be32(&regs->miimind) & MIIMIND_BUSY) && timeout--) 167 status = spin_event_timeout(!(in_be32(&regs->miimind) & MIIMIND_BUSY),
159 cpu_relax(); 168 MII_TIMEOUT, 0);
160 169
161 mutex_unlock(&bus->mdio_lock); 170 mutex_unlock(&bus->mdio_lock);
162 171
163 if (timeout < 0) { 172 if (!status) {
164 printk(KERN_ERR "%s: The MII Bus is stuck!\n", 173 printk(KERN_ERR "%s: The MII Bus is stuck!\n",
165 bus->name); 174 bus->name);
166 return -EBUSY; 175 return -EBUSY;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ab1d80ff079..4605f724668 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1,5 +1,4 @@
1/* 1/* drivers/net/ethernet/freescale/gianfar.c
2 * drivers/net/ethernet/freescale/gianfar.c
3 * 2 *
4 * Gianfar Ethernet Driver 3 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers 4 * This driver is designed for the non-CPM ethernet controllers
@@ -114,7 +113,7 @@ static void gfar_timeout(struct net_device *dev);
114static int gfar_close(struct net_device *dev); 113static int gfar_close(struct net_device *dev);
115struct sk_buff *gfar_new_skb(struct net_device *dev); 114struct sk_buff *gfar_new_skb(struct net_device *dev);
116static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 115static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
117 struct sk_buff *skb); 116 struct sk_buff *skb);
118static int gfar_set_mac_address(struct net_device *dev); 117static int gfar_set_mac_address(struct net_device *dev);
119static int gfar_change_mtu(struct net_device *dev, int new_mtu); 118static int gfar_change_mtu(struct net_device *dev, int new_mtu);
120static irqreturn_t gfar_error(int irq, void *dev_id); 119static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -266,8 +265,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
266 tx_queue->tx_bd_dma_base = addr; 265 tx_queue->tx_bd_dma_base = addr;
267 tx_queue->dev = ndev; 266 tx_queue->dev = ndev;
268 /* enet DMA only understands physical addresses */ 267 /* enet DMA only understands physical addresses */
269 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size; 268 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
270 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size; 269 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
271 } 270 }
272 271
273 /* Start the rx descriptor ring where the tx ring leaves off */ 272 /* Start the rx descriptor ring where the tx ring leaves off */
@@ -276,15 +275,16 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
276 rx_queue->rx_bd_base = vaddr; 275 rx_queue->rx_bd_base = vaddr;
277 rx_queue->rx_bd_dma_base = addr; 276 rx_queue->rx_bd_dma_base = addr;
278 rx_queue->dev = ndev; 277 rx_queue->dev = ndev;
279 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; 278 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
280 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; 279 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
281 } 280 }
282 281
283 /* Setup the skbuff rings */ 282 /* Setup the skbuff rings */
284 for (i = 0; i < priv->num_tx_queues; i++) { 283 for (i = 0; i < priv->num_tx_queues; i++) {
285 tx_queue = priv->tx_queue[i]; 284 tx_queue = priv->tx_queue[i];
286 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * 285 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
287 tx_queue->tx_ring_size, GFP_KERNEL); 286 tx_queue->tx_ring_size,
287 GFP_KERNEL);
288 if (!tx_queue->tx_skbuff) { 288 if (!tx_queue->tx_skbuff) {
289 netif_err(priv, ifup, ndev, 289 netif_err(priv, ifup, ndev,
290 "Could not allocate tx_skbuff\n"); 290 "Could not allocate tx_skbuff\n");
@@ -298,7 +298,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
298 for (i = 0; i < priv->num_rx_queues; i++) { 298 for (i = 0; i < priv->num_rx_queues; i++) {
299 rx_queue = priv->rx_queue[i]; 299 rx_queue = priv->rx_queue[i];
300 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * 300 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
301 rx_queue->rx_ring_size, GFP_KERNEL); 301 rx_queue->rx_ring_size,
302 GFP_KERNEL);
302 303
303 if (!rx_queue->rx_skbuff) { 304 if (!rx_queue->rx_skbuff) {
304 netif_err(priv, ifup, ndev, 305 netif_err(priv, ifup, ndev,
@@ -327,15 +328,15 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv)
327 int i; 328 int i;
328 329
329 baddr = &regs->tbase0; 330 baddr = &regs->tbase0;
330 for(i = 0; i < priv->num_tx_queues; i++) { 331 for (i = 0; i < priv->num_tx_queues; i++) {
331 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); 332 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
332 baddr += 2; 333 baddr += 2;
333 } 334 }
334 335
335 baddr = &regs->rbase0; 336 baddr = &regs->rbase0;
336 for(i = 0; i < priv->num_rx_queues; i++) { 337 for (i = 0; i < priv->num_rx_queues; i++) {
337 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); 338 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
338 baddr += 2; 339 baddr += 2;
339 } 340 }
340} 341}
341 342
@@ -405,7 +406,8 @@ static void gfar_init_mac(struct net_device *ndev)
405 gfar_write(&regs->attreli, attrs); 406 gfar_write(&regs->attreli, attrs);
406 407
407 /* Start with defaults, and add stashing or locking 408 /* Start with defaults, and add stashing or locking
408 * depending on the approprate variables */ 409 * depending on the approprate variables
410 */
409 attrs = ATTR_INIT_SETTINGS; 411 attrs = ATTR_INIT_SETTINGS;
410 412
411 if (priv->bd_stash_en) 413 if (priv->bd_stash_en)
@@ -426,16 +428,16 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
426 struct gfar_private *priv = netdev_priv(dev); 428 struct gfar_private *priv = netdev_priv(dev);
427 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; 429 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
428 unsigned long tx_packets = 0, tx_bytes = 0; 430 unsigned long tx_packets = 0, tx_bytes = 0;
429 int i = 0; 431 int i;
430 432
431 for (i = 0; i < priv->num_rx_queues; i++) { 433 for (i = 0; i < priv->num_rx_queues; i++) {
432 rx_packets += priv->rx_queue[i]->stats.rx_packets; 434 rx_packets += priv->rx_queue[i]->stats.rx_packets;
433 rx_bytes += priv->rx_queue[i]->stats.rx_bytes; 435 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
434 rx_dropped += priv->rx_queue[i]->stats.rx_dropped; 436 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
435 } 437 }
436 438
437 dev->stats.rx_packets = rx_packets; 439 dev->stats.rx_packets = rx_packets;
438 dev->stats.rx_bytes = rx_bytes; 440 dev->stats.rx_bytes = rx_bytes;
439 dev->stats.rx_dropped = rx_dropped; 441 dev->stats.rx_dropped = rx_dropped;
440 442
441 for (i = 0; i < priv->num_tx_queues; i++) { 443 for (i = 0; i < priv->num_tx_queues; i++) {
@@ -443,7 +445,7 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
443 tx_packets += priv->tx_queue[i]->stats.tx_packets; 445 tx_packets += priv->tx_queue[i]->stats.tx_packets;
444 } 446 }
445 447
446 dev->stats.tx_bytes = tx_bytes; 448 dev->stats.tx_bytes = tx_bytes;
447 dev->stats.tx_packets = tx_packets; 449 dev->stats.tx_packets = tx_packets;
448 450
449 return &dev->stats; 451 return &dev->stats;
@@ -468,7 +470,7 @@ static const struct net_device_ops gfar_netdev_ops = {
468 470
469void lock_rx_qs(struct gfar_private *priv) 471void lock_rx_qs(struct gfar_private *priv)
470{ 472{
471 int i = 0x0; 473 int i;
472 474
473 for (i = 0; i < priv->num_rx_queues; i++) 475 for (i = 0; i < priv->num_rx_queues; i++)
474 spin_lock(&priv->rx_queue[i]->rxlock); 476 spin_lock(&priv->rx_queue[i]->rxlock);
@@ -476,7 +478,7 @@ void lock_rx_qs(struct gfar_private *priv)
476 478
477void lock_tx_qs(struct gfar_private *priv) 479void lock_tx_qs(struct gfar_private *priv)
478{ 480{
479 int i = 0x0; 481 int i;
480 482
481 for (i = 0; i < priv->num_tx_queues; i++) 483 for (i = 0; i < priv->num_tx_queues; i++)
482 spin_lock(&priv->tx_queue[i]->txlock); 484 spin_lock(&priv->tx_queue[i]->txlock);
@@ -484,7 +486,7 @@ void lock_tx_qs(struct gfar_private *priv)
484 486
485void unlock_rx_qs(struct gfar_private *priv) 487void unlock_rx_qs(struct gfar_private *priv)
486{ 488{
487 int i = 0x0; 489 int i;
488 490
489 for (i = 0; i < priv->num_rx_queues; i++) 491 for (i = 0; i < priv->num_rx_queues; i++)
490 spin_unlock(&priv->rx_queue[i]->rxlock); 492 spin_unlock(&priv->rx_queue[i]->rxlock);
@@ -492,7 +494,7 @@ void unlock_rx_qs(struct gfar_private *priv)
492 494
493void unlock_tx_qs(struct gfar_private *priv) 495void unlock_tx_qs(struct gfar_private *priv)
494{ 496{
495 int i = 0x0; 497 int i;
496 498
497 for (i = 0; i < priv->num_tx_queues; i++) 499 for (i = 0; i < priv->num_tx_queues; i++)
498 spin_unlock(&priv->tx_queue[i]->txlock); 500 spin_unlock(&priv->tx_queue[i]->txlock);
@@ -508,13 +510,13 @@ static bool gfar_is_vlan_on(struct gfar_private *priv)
508static inline int gfar_uses_fcb(struct gfar_private *priv) 510static inline int gfar_uses_fcb(struct gfar_private *priv)
509{ 511{
510 return gfar_is_vlan_on(priv) || 512 return gfar_is_vlan_on(priv) ||
511 (priv->ndev->features & NETIF_F_RXCSUM) || 513 (priv->ndev->features & NETIF_F_RXCSUM) ||
512 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER); 514 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
513} 515}
514 516
515static void free_tx_pointers(struct gfar_private *priv) 517static void free_tx_pointers(struct gfar_private *priv)
516{ 518{
517 int i = 0; 519 int i;
518 520
519 for (i = 0; i < priv->num_tx_queues; i++) 521 for (i = 0; i < priv->num_tx_queues; i++)
520 kfree(priv->tx_queue[i]); 522 kfree(priv->tx_queue[i]);
@@ -522,7 +524,7 @@ static void free_tx_pointers(struct gfar_private *priv)
522 524
523static void free_rx_pointers(struct gfar_private *priv) 525static void free_rx_pointers(struct gfar_private *priv)
524{ 526{
525 int i = 0; 527 int i;
526 528
527 for (i = 0; i < priv->num_rx_queues; i++) 529 for (i = 0; i < priv->num_rx_queues; i++)
528 kfree(priv->rx_queue[i]); 530 kfree(priv->rx_queue[i]);
@@ -530,7 +532,7 @@ static void free_rx_pointers(struct gfar_private *priv)
530 532
531static void unmap_group_regs(struct gfar_private *priv) 533static void unmap_group_regs(struct gfar_private *priv)
532{ 534{
533 int i = 0; 535 int i;
534 536
535 for (i = 0; i < MAXGROUPS; i++) 537 for (i = 0; i < MAXGROUPS; i++)
536 if (priv->gfargrp[i].regs) 538 if (priv->gfargrp[i].regs)
@@ -539,7 +541,7 @@ static void unmap_group_regs(struct gfar_private *priv)
539 541
540static void disable_napi(struct gfar_private *priv) 542static void disable_napi(struct gfar_private *priv)
541{ 543{
542 int i = 0; 544 int i;
543 545
544 for (i = 0; i < priv->num_grps; i++) 546 for (i = 0; i < priv->num_grps; i++)
545 napi_disable(&priv->gfargrp[i].napi); 547 napi_disable(&priv->gfargrp[i].napi);
@@ -547,14 +549,14 @@ static void disable_napi(struct gfar_private *priv)
547 549
548static void enable_napi(struct gfar_private *priv) 550static void enable_napi(struct gfar_private *priv)
549{ 551{
550 int i = 0; 552 int i;
551 553
552 for (i = 0; i < priv->num_grps; i++) 554 for (i = 0; i < priv->num_grps; i++)
553 napi_enable(&priv->gfargrp[i].napi); 555 napi_enable(&priv->gfargrp[i].napi);
554} 556}
555 557
556static int gfar_parse_group(struct device_node *np, 558static int gfar_parse_group(struct device_node *np,
557 struct gfar_private *priv, const char *model) 559 struct gfar_private *priv, const char *model)
558{ 560{
559 u32 *queue_mask; 561 u32 *queue_mask;
560 562
@@ -580,15 +582,13 @@ static int gfar_parse_group(struct device_node *np,
580 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; 582 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
581 priv->gfargrp[priv->num_grps].priv = priv; 583 priv->gfargrp[priv->num_grps].priv = priv;
582 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock); 584 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
583 if(priv->mode == MQ_MG_MODE) { 585 if (priv->mode == MQ_MG_MODE) {
584 queue_mask = (u32 *)of_get_property(np, 586 queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
585 "fsl,rx-bit-map", NULL); 587 priv->gfargrp[priv->num_grps].rx_bit_map = queue_mask ?
586 priv->gfargrp[priv->num_grps].rx_bit_map = 588 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
587 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps); 589 queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
588 queue_mask = (u32 *)of_get_property(np, 590 priv->gfargrp[priv->num_grps].tx_bit_map = queue_mask ?
589 "fsl,tx-bit-map", NULL); 591 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
590 priv->gfargrp[priv->num_grps].tx_bit_map =
591 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
592 } else { 592 } else {
593 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF; 593 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
594 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF; 594 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
@@ -652,7 +652,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
652 priv->num_rx_queues = num_rx_qs; 652 priv->num_rx_queues = num_rx_qs;
653 priv->num_grps = 0x0; 653 priv->num_grps = 0x0;
654 654
655 /* Init Rx queue filer rule set linked list*/ 655 /* Init Rx queue filer rule set linked list */
656 INIT_LIST_HEAD(&priv->rx_list.list); 656 INIT_LIST_HEAD(&priv->rx_list.list);
657 priv->rx_list.count = 0; 657 priv->rx_list.count = 0;
658 mutex_init(&priv->rx_queue_access); 658 mutex_init(&priv->rx_queue_access);
@@ -673,7 +673,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
673 } else { 673 } else {
674 priv->mode = SQ_SG_MODE; 674 priv->mode = SQ_SG_MODE;
675 err = gfar_parse_group(np, priv, model); 675 err = gfar_parse_group(np, priv, model);
676 if(err) 676 if (err)
677 goto err_grp_init; 677 goto err_grp_init;
678 } 678 }
679 679
@@ -730,27 +730,27 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
730 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; 730 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
731 731
732 mac_addr = of_get_mac_address(np); 732 mac_addr = of_get_mac_address(np);
733
733 if (mac_addr) 734 if (mac_addr)
734 memcpy(dev->dev_addr, mac_addr, ETH_ALEN); 735 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
735 736
736 if (model && !strcasecmp(model, "TSEC")) 737 if (model && !strcasecmp(model, "TSEC"))
737 priv->device_flags = 738 priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
738 FSL_GIANFAR_DEV_HAS_GIGABIT | 739 FSL_GIANFAR_DEV_HAS_COALESCE |
739 FSL_GIANFAR_DEV_HAS_COALESCE | 740 FSL_GIANFAR_DEV_HAS_RMON |
740 FSL_GIANFAR_DEV_HAS_RMON | 741 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
741 FSL_GIANFAR_DEV_HAS_MULTI_INTR; 742
742 if (model && !strcasecmp(model, "eTSEC")) 743 if (model && !strcasecmp(model, "eTSEC"))
743 priv->device_flags = 744 priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
744 FSL_GIANFAR_DEV_HAS_GIGABIT | 745 FSL_GIANFAR_DEV_HAS_COALESCE |
745 FSL_GIANFAR_DEV_HAS_COALESCE | 746 FSL_GIANFAR_DEV_HAS_RMON |
746 FSL_GIANFAR_DEV_HAS_RMON | 747 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
747 FSL_GIANFAR_DEV_HAS_MULTI_INTR | 748 FSL_GIANFAR_DEV_HAS_PADDING |
748 FSL_GIANFAR_DEV_HAS_PADDING | 749 FSL_GIANFAR_DEV_HAS_CSUM |
749 FSL_GIANFAR_DEV_HAS_CSUM | 750 FSL_GIANFAR_DEV_HAS_VLAN |
750 FSL_GIANFAR_DEV_HAS_VLAN | 751 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
751 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 752 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
752 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | 753 FSL_GIANFAR_DEV_HAS_TIMER;
753 FSL_GIANFAR_DEV_HAS_TIMER;
754 754
755 ctype = of_get_property(np, "phy-connection-type", NULL); 755 ctype = of_get_property(np, "phy-connection-type", NULL);
756 756
@@ -781,7 +781,7 @@ err_grp_init:
781} 781}
782 782
783static int gfar_hwtstamp_ioctl(struct net_device *netdev, 783static int gfar_hwtstamp_ioctl(struct net_device *netdev,
784 struct ifreq *ifr, int cmd) 784 struct ifreq *ifr, int cmd)
785{ 785{
786 struct hwtstamp_config config; 786 struct hwtstamp_config config;
787 struct gfar_private *priv = netdev_priv(netdev); 787 struct gfar_private *priv = netdev_priv(netdev);
@@ -851,6 +851,7 @@ static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
851{ 851{
852 unsigned int new_bit_map = 0x0; 852 unsigned int new_bit_map = 0x0;
853 int mask = 0x1 << (max_qs - 1), i; 853 int mask = 0x1 << (max_qs - 1), i;
854
854 for (i = 0; i < max_qs; i++) { 855 for (i = 0; i < max_qs; i++) {
855 if (bit_map & mask) 856 if (bit_map & mask)
856 new_bit_map = new_bit_map + (1 << i); 857 new_bit_map = new_bit_map + (1 << i);
@@ -936,22 +937,22 @@ static void gfar_detect_errata(struct gfar_private *priv)
936 937
937 /* MPC8313 Rev 2.0 and higher; All MPC837x */ 938 /* MPC8313 Rev 2.0 and higher; All MPC837x */
938 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || 939 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
939 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 940 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
940 priv->errata |= GFAR_ERRATA_74; 941 priv->errata |= GFAR_ERRATA_74;
941 942
942 /* MPC8313 and MPC837x all rev */ 943 /* MPC8313 and MPC837x all rev */
943 if ((pvr == 0x80850010 && mod == 0x80b0) || 944 if ((pvr == 0x80850010 && mod == 0x80b0) ||
944 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 945 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
945 priv->errata |= GFAR_ERRATA_76; 946 priv->errata |= GFAR_ERRATA_76;
946 947
947 /* MPC8313 and MPC837x all rev */ 948 /* MPC8313 and MPC837x all rev */
948 if ((pvr == 0x80850010 && mod == 0x80b0) || 949 if ((pvr == 0x80850010 && mod == 0x80b0) ||
949 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 950 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
950 priv->errata |= GFAR_ERRATA_A002; 951 priv->errata |= GFAR_ERRATA_A002;
951 952
952 /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */ 953 /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
953 if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) || 954 if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
954 (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020)) 955 (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
955 priv->errata |= GFAR_ERRATA_12; 956 priv->errata |= GFAR_ERRATA_12;
956 957
957 if (priv->errata) 958 if (priv->errata)
@@ -960,7 +961,8 @@ static void gfar_detect_errata(struct gfar_private *priv)
960} 961}
961 962
962/* Set up the ethernet device structure, private data, 963/* Set up the ethernet device structure, private data,
963 * and anything else we need before we start */ 964 * and anything else we need before we start
965 */
964static int gfar_probe(struct platform_device *ofdev) 966static int gfar_probe(struct platform_device *ofdev)
965{ 967{
966 u32 tempval; 968 u32 tempval;
@@ -991,8 +993,9 @@ static int gfar_probe(struct platform_device *ofdev)
991 993
992 gfar_detect_errata(priv); 994 gfar_detect_errata(priv);
993 995
994 /* Stop the DMA engine now, in case it was running before */ 996 /* Stop the DMA engine now, in case it was running before
995 /* (The firmware could have used it, and left it running). */ 997 * (The firmware could have used it, and left it running).
998 */
996 gfar_halt(dev); 999 gfar_halt(dev);
997 1000
998 /* Reset MAC layer */ 1001 /* Reset MAC layer */
@@ -1026,13 +1029,14 @@ static int gfar_probe(struct platform_device *ofdev)
1026 1029
1027 /* Register for napi ...We are registering NAPI for each grp */ 1030 /* Register for napi ...We are registering NAPI for each grp */
1028 for (i = 0; i < priv->num_grps; i++) 1031 for (i = 0; i < priv->num_grps; i++)
1029 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT); 1032 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
1033 GFAR_DEV_WEIGHT);
1030 1034
1031 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 1035 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1032 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 1036 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1033 NETIF_F_RXCSUM; 1037 NETIF_F_RXCSUM;
1034 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | 1038 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1035 NETIF_F_RXCSUM | NETIF_F_HIGHDMA; 1039 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1036 } 1040 }
1037 1041
1038 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 1042 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
@@ -1081,7 +1085,7 @@ static int gfar_probe(struct platform_device *ofdev)
1081 priv->padding = 0; 1085 priv->padding = 0;
1082 1086
1083 if (dev->features & NETIF_F_IP_CSUM || 1087 if (dev->features & NETIF_F_IP_CSUM ||
1084 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 1088 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1085 dev->needed_headroom = GMAC_FCB_LEN; 1089 dev->needed_headroom = GMAC_FCB_LEN;
1086 1090
1087 /* Program the isrg regs only if number of grps > 1 */ 1091 /* Program the isrg regs only if number of grps > 1 */
@@ -1098,28 +1102,32 @@ static int gfar_probe(struct platform_device *ofdev)
1098 1102
1099 /* Need to reverse the bit maps as bit_map's MSB is q0 1103 /* Need to reverse the bit maps as bit_map's MSB is q0
1100 * but, for_each_set_bit parses from right to left, which 1104 * but, for_each_set_bit parses from right to left, which
1101 * basically reverses the queue numbers */ 1105 * basically reverses the queue numbers
1106 */
1102 for (i = 0; i< priv->num_grps; i++) { 1107 for (i = 0; i< priv->num_grps; i++) {
1103 priv->gfargrp[i].tx_bit_map = reverse_bitmap( 1108 priv->gfargrp[i].tx_bit_map =
1104 priv->gfargrp[i].tx_bit_map, MAX_TX_QS); 1109 reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
1105 priv->gfargrp[i].rx_bit_map = reverse_bitmap( 1110 priv->gfargrp[i].rx_bit_map =
1106 priv->gfargrp[i].rx_bit_map, MAX_RX_QS); 1111 reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
1107 } 1112 }
1108 1113
1109 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, 1114 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
1110 * also assign queues to groups */ 1115 * also assign queues to groups
1116 */
1111 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { 1117 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1112 priv->gfargrp[grp_idx].num_rx_queues = 0x0; 1118 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
1119
1113 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, 1120 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
1114 priv->num_rx_queues) { 1121 priv->num_rx_queues) {
1115 priv->gfargrp[grp_idx].num_rx_queues++; 1122 priv->gfargrp[grp_idx].num_rx_queues++;
1116 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1123 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
1117 rstat = rstat | (RSTAT_CLEAR_RHALT >> i); 1124 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
1118 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); 1125 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1119 } 1126 }
1120 priv->gfargrp[grp_idx].num_tx_queues = 0x0; 1127 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
1128
1121 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map, 1129 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
1122 priv->num_tx_queues) { 1130 priv->num_tx_queues) {
1123 priv->gfargrp[grp_idx].num_tx_queues++; 1131 priv->gfargrp[grp_idx].num_tx_queues++;
1124 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1132 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1125 tstat = tstat | (TSTAT_CLEAR_THALT >> i); 1133 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
@@ -1149,7 +1157,7 @@ static int gfar_probe(struct platform_device *ofdev)
1149 priv->rx_queue[i]->rxic = DEFAULT_RXIC; 1157 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1150 } 1158 }
1151 1159
1152 /* always enable rx filer*/ 1160 /* always enable rx filer */
1153 priv->rx_filer_enable = 1; 1161 priv->rx_filer_enable = 1;
1154 /* Enable most messages by default */ 1162 /* Enable most messages by default */
1155 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1163 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
@@ -1165,7 +1173,8 @@ static int gfar_probe(struct platform_device *ofdev)
1165 } 1173 }
1166 1174
1167 device_init_wakeup(&dev->dev, 1175 device_init_wakeup(&dev->dev,
1168 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1176 priv->device_flags &
1177 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1169 1178
1170 /* fill out IRQ number and name fields */ 1179 /* fill out IRQ number and name fields */
1171 for (i = 0; i < priv->num_grps; i++) { 1180 for (i = 0; i < priv->num_grps; i++) {
@@ -1189,13 +1198,14 @@ static int gfar_probe(struct platform_device *ofdev)
1189 /* Print out the device info */ 1198 /* Print out the device info */
1190 netdev_info(dev, "mac: %pM\n", dev->dev_addr); 1199 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1191 1200
1192 /* Even more device info helps when determining which kernel */ 1201 /* Even more device info helps when determining which kernel
1193 /* provided which set of benchmarks. */ 1202 * provided which set of benchmarks.
1203 */
1194 netdev_info(dev, "Running with NAPI enabled\n"); 1204 netdev_info(dev, "Running with NAPI enabled\n");
1195 for (i = 0; i < priv->num_rx_queues; i++) 1205 for (i = 0; i < priv->num_rx_queues; i++)
1196 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", 1206 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1197 i, priv->rx_queue[i]->rx_ring_size); 1207 i, priv->rx_queue[i]->rx_ring_size);
1198 for(i = 0; i < priv->num_tx_queues; i++) 1208 for (i = 0; i < priv->num_tx_queues; i++)
1199 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", 1209 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1200 i, priv->tx_queue[i]->tx_ring_size); 1210 i, priv->tx_queue[i]->tx_ring_size);
1201 1211
@@ -1242,7 +1252,8 @@ static int gfar_suspend(struct device *dev)
1242 u32 tempval; 1252 u32 tempval;
1243 1253
1244 int magic_packet = priv->wol_en && 1254 int magic_packet = priv->wol_en &&
1245 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1255 (priv->device_flags &
1256 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1246 1257
1247 netif_device_detach(ndev); 1258 netif_device_detach(ndev);
1248 1259
@@ -1294,7 +1305,8 @@ static int gfar_resume(struct device *dev)
1294 unsigned long flags; 1305 unsigned long flags;
1295 u32 tempval; 1306 u32 tempval;
1296 int magic_packet = priv->wol_en && 1307 int magic_packet = priv->wol_en &&
1297 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1308 (priv->device_flags &
1309 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1298 1310
1299 if (!netif_running(ndev)) { 1311 if (!netif_running(ndev)) {
1300 netif_device_attach(ndev); 1312 netif_device_attach(ndev);
@@ -1393,13 +1405,13 @@ static phy_interface_t gfar_get_interface(struct net_device *dev)
1393 } 1405 }
1394 1406
1395 if (ecntrl & ECNTRL_REDUCED_MODE) { 1407 if (ecntrl & ECNTRL_REDUCED_MODE) {
1396 if (ecntrl & ECNTRL_REDUCED_MII_MODE) 1408 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1397 return PHY_INTERFACE_MODE_RMII; 1409 return PHY_INTERFACE_MODE_RMII;
1410 }
1398 else { 1411 else {
1399 phy_interface_t interface = priv->interface; 1412 phy_interface_t interface = priv->interface;
1400 1413
1401 /* 1414 /* This isn't autodetected right now, so it must
1402 * This isn't autodetected right now, so it must
1403 * be set by the device tree or platform code. 1415 * be set by the device tree or platform code.
1404 */ 1416 */
1405 if (interface == PHY_INTERFACE_MODE_RGMII_ID) 1417 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
@@ -1453,8 +1465,7 @@ static int init_phy(struct net_device *dev)
1453 return 0; 1465 return 0;
1454} 1466}
1455 1467
1456/* 1468/* Initialize TBI PHY interface for communicating with the
1457 * Initialize TBI PHY interface for communicating with the
1458 * SERDES lynx PHY on the chip. We communicate with this PHY 1469 * SERDES lynx PHY on the chip. We communicate with this PHY
1459 * through the MDIO bus on each controller, treating it as a 1470 * through the MDIO bus on each controller, treating it as a
1460 * "normal" PHY at the address found in the TBIPA register. We assume 1471 * "normal" PHY at the address found in the TBIPA register. We assume
@@ -1479,8 +1490,7 @@ static void gfar_configure_serdes(struct net_device *dev)
1479 return; 1490 return;
1480 } 1491 }
1481 1492
1482 /* 1493 /* If the link is already up, we must already be ok, and don't need to
1483 * If the link is already up, we must already be ok, and don't need to
1484 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured 1494 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1485 * everything for us? Resetting it takes the link down and requires 1495 * everything for us? Resetting it takes the link down and requires
1486 * several seconds for it to come back. 1496 * several seconds for it to come back.
@@ -1492,18 +1502,19 @@ static void gfar_configure_serdes(struct net_device *dev)
1492 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); 1502 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1493 1503
1494 phy_write(tbiphy, MII_ADVERTISE, 1504 phy_write(tbiphy, MII_ADVERTISE,
1495 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | 1505 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1496 ADVERTISE_1000XPSE_ASYM); 1506 ADVERTISE_1000XPSE_ASYM);
1497 1507
1498 phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE | 1508 phy_write(tbiphy, MII_BMCR,
1499 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); 1509 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1510 BMCR_SPEED1000);
1500} 1511}
1501 1512
1502static void init_registers(struct net_device *dev) 1513static void init_registers(struct net_device *dev)
1503{ 1514{
1504 struct gfar_private *priv = netdev_priv(dev); 1515 struct gfar_private *priv = netdev_priv(dev);
1505 struct gfar __iomem *regs = NULL; 1516 struct gfar __iomem *regs = NULL;
1506 int i = 0; 1517 int i;
1507 1518
1508 for (i = 0; i < priv->num_grps; i++) { 1519 for (i = 0; i < priv->num_grps; i++) {
1509 regs = priv->gfargrp[i].regs; 1520 regs = priv->gfargrp[i].regs;
@@ -1554,15 +1565,13 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)
1554{ 1565{
1555 u32 res; 1566 u32 res;
1556 1567
1557 /* 1568 /* Normaly TSEC should not hang on GRS commands, so we should
1558 * Normaly TSEC should not hang on GRS commands, so we should
1559 * actually wait for IEVENT_GRSC flag. 1569 * actually wait for IEVENT_GRSC flag.
1560 */ 1570 */
1561 if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002))) 1571 if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
1562 return 0; 1572 return 0;
1563 1573
1564 /* 1574 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1565 * Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1566 * the same as bits 23-30, the eTSEC Rx is assumed to be idle 1575 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1567 * and the Rx can be safely reset. 1576 * and the Rx can be safely reset.
1568 */ 1577 */
@@ -1580,7 +1589,7 @@ static void gfar_halt_nodisable(struct net_device *dev)
1580 struct gfar_private *priv = netdev_priv(dev); 1589 struct gfar_private *priv = netdev_priv(dev);
1581 struct gfar __iomem *regs = NULL; 1590 struct gfar __iomem *regs = NULL;
1582 u32 tempval; 1591 u32 tempval;
1583 int i = 0; 1592 int i;
1584 1593
1585 for (i = 0; i < priv->num_grps; i++) { 1594 for (i = 0; i < priv->num_grps; i++) {
1586 regs = priv->gfargrp[i].regs; 1595 regs = priv->gfargrp[i].regs;
@@ -1594,8 +1603,8 @@ static void gfar_halt_nodisable(struct net_device *dev)
1594 regs = priv->gfargrp[0].regs; 1603 regs = priv->gfargrp[0].regs;
1595 /* Stop the DMA, and wait for it to stop */ 1604 /* Stop the DMA, and wait for it to stop */
1596 tempval = gfar_read(&regs->dmactrl); 1605 tempval = gfar_read(&regs->dmactrl);
1597 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) 1606 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
1598 != (DMACTRL_GRS | DMACTRL_GTS)) { 1607 (DMACTRL_GRS | DMACTRL_GTS)) {
1599 int ret; 1608 int ret;
1600 1609
1601 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1610 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
@@ -1660,7 +1669,7 @@ void stop_gfar(struct net_device *dev)
1660 } else { 1669 } else {
1661 for (i = 0; i < priv->num_grps; i++) 1670 for (i = 0; i < priv->num_grps; i++)
1662 free_irq(priv->gfargrp[i].interruptTransmit, 1671 free_irq(priv->gfargrp[i].interruptTransmit,
1663 &priv->gfargrp[i]); 1672 &priv->gfargrp[i]);
1664 } 1673 }
1665 1674
1666 free_skb_resources(priv); 1675 free_skb_resources(priv);
@@ -1679,13 +1688,13 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1679 continue; 1688 continue;
1680 1689
1681 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, 1690 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
1682 txbdp->length, DMA_TO_DEVICE); 1691 txbdp->length, DMA_TO_DEVICE);
1683 txbdp->lstatus = 0; 1692 txbdp->lstatus = 0;
1684 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; 1693 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1685 j++) { 1694 j++) {
1686 txbdp++; 1695 txbdp++;
1687 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, 1696 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
1688 txbdp->length, DMA_TO_DEVICE); 1697 txbdp->length, DMA_TO_DEVICE);
1689 } 1698 }
1690 txbdp++; 1699 txbdp++;
1691 dev_kfree_skb_any(tx_queue->tx_skbuff[i]); 1700 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
@@ -1705,8 +1714,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1705 for (i = 0; i < rx_queue->rx_ring_size; i++) { 1714 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1706 if (rx_queue->rx_skbuff[i]) { 1715 if (rx_queue->rx_skbuff[i]) {
1707 dma_unmap_single(&priv->ofdev->dev, 1716 dma_unmap_single(&priv->ofdev->dev,
1708 rxbdp->bufPtr, priv->rx_buffer_size, 1717 rxbdp->bufPtr, priv->rx_buffer_size,
1709 DMA_FROM_DEVICE); 1718 DMA_FROM_DEVICE);
1710 dev_kfree_skb_any(rx_queue->rx_skbuff[i]); 1719 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1711 rx_queue->rx_skbuff[i] = NULL; 1720 rx_queue->rx_skbuff[i] = NULL;
1712 } 1721 }
@@ -1718,7 +1727,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1718} 1727}
1719 1728
1720/* If there are any tx skbs or rx skbs still around, free them. 1729/* If there are any tx skbs or rx skbs still around, free them.
1721 * Then free tx_skbuff and rx_skbuff */ 1730 * Then free tx_skbuff and rx_skbuff
1731 */
1722static void free_skb_resources(struct gfar_private *priv) 1732static void free_skb_resources(struct gfar_private *priv)
1723{ 1733{
1724 struct gfar_priv_tx_q *tx_queue = NULL; 1734 struct gfar_priv_tx_q *tx_queue = NULL;
@@ -1728,24 +1738,25 @@ static void free_skb_resources(struct gfar_private *priv)
1728 /* Go through all the buffer descriptors and free their data buffers */ 1738 /* Go through all the buffer descriptors and free their data buffers */
1729 for (i = 0; i < priv->num_tx_queues; i++) { 1739 for (i = 0; i < priv->num_tx_queues; i++) {
1730 struct netdev_queue *txq; 1740 struct netdev_queue *txq;
1741
1731 tx_queue = priv->tx_queue[i]; 1742 tx_queue = priv->tx_queue[i];
1732 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); 1743 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1733 if(tx_queue->tx_skbuff) 1744 if (tx_queue->tx_skbuff)
1734 free_skb_tx_queue(tx_queue); 1745 free_skb_tx_queue(tx_queue);
1735 netdev_tx_reset_queue(txq); 1746 netdev_tx_reset_queue(txq);
1736 } 1747 }
1737 1748
1738 for (i = 0; i < priv->num_rx_queues; i++) { 1749 for (i = 0; i < priv->num_rx_queues; i++) {
1739 rx_queue = priv->rx_queue[i]; 1750 rx_queue = priv->rx_queue[i];
1740 if(rx_queue->rx_skbuff) 1751 if (rx_queue->rx_skbuff)
1741 free_skb_rx_queue(rx_queue); 1752 free_skb_rx_queue(rx_queue);
1742 } 1753 }
1743 1754
1744 dma_free_coherent(&priv->ofdev->dev, 1755 dma_free_coherent(&priv->ofdev->dev,
1745 sizeof(struct txbd8) * priv->total_tx_ring_size + 1756 sizeof(struct txbd8) * priv->total_tx_ring_size +
1746 sizeof(struct rxbd8) * priv->total_rx_ring_size, 1757 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1747 priv->tx_queue[0]->tx_bd_base, 1758 priv->tx_queue[0]->tx_bd_base,
1748 priv->tx_queue[0]->tx_bd_dma_base); 1759 priv->tx_queue[0]->tx_bd_dma_base);
1749 skb_queue_purge(&priv->rx_recycle); 1760 skb_queue_purge(&priv->rx_recycle);
1750} 1761}
1751 1762
@@ -1784,7 +1795,7 @@ void gfar_start(struct net_device *dev)
1784} 1795}
1785 1796
1786void gfar_configure_coalescing(struct gfar_private *priv, 1797void gfar_configure_coalescing(struct gfar_private *priv,
1787 unsigned long tx_mask, unsigned long rx_mask) 1798 unsigned long tx_mask, unsigned long rx_mask)
1788{ 1799{
1789 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1800 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1790 u32 __iomem *baddr; 1801 u32 __iomem *baddr;
@@ -1794,11 +1805,11 @@ void gfar_configure_coalescing(struct gfar_private *priv,
1794 * multiple queues, there's only single reg to program 1805 * multiple queues, there's only single reg to program
1795 */ 1806 */
1796 gfar_write(&regs->txic, 0); 1807 gfar_write(&regs->txic, 0);
1797 if(likely(priv->tx_queue[0]->txcoalescing)) 1808 if (likely(priv->tx_queue[0]->txcoalescing))
1798 gfar_write(&regs->txic, priv->tx_queue[0]->txic); 1809 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1799 1810
1800 gfar_write(&regs->rxic, 0); 1811 gfar_write(&regs->rxic, 0);
1801 if(unlikely(priv->rx_queue[0]->rxcoalescing)) 1812 if (unlikely(priv->rx_queue[0]->rxcoalescing))
1802 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic); 1813 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1803 1814
1804 if (priv->mode == MQ_MG_MODE) { 1815 if (priv->mode == MQ_MG_MODE) {
@@ -1825,12 +1836,14 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
1825 int err; 1836 int err;
1826 1837
1827 /* If the device has multiple interrupts, register for 1838 /* If the device has multiple interrupts, register for
1828 * them. Otherwise, only register for the one */ 1839 * them. Otherwise, only register for the one
1840 */
1829 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1841 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1830 /* Install our interrupt handlers for Error, 1842 /* Install our interrupt handlers for Error,
1831 * Transmit, and Receive */ 1843 * Transmit, and Receive
1832 if ((err = request_irq(grp->interruptError, gfar_error, 0, 1844 */
1833 grp->int_name_er,grp)) < 0) { 1845 if ((err = request_irq(grp->interruptError, gfar_error,
1846 0, grp->int_name_er, grp)) < 0) {
1834 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1847 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1835 grp->interruptError); 1848 grp->interruptError);
1836 1849
@@ -1838,21 +1851,21 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
1838 } 1851 }
1839 1852
1840 if ((err = request_irq(grp->interruptTransmit, gfar_transmit, 1853 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1841 0, grp->int_name_tx, grp)) < 0) { 1854 0, grp->int_name_tx, grp)) < 0) {
1842 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1855 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1843 grp->interruptTransmit); 1856 grp->interruptTransmit);
1844 goto tx_irq_fail; 1857 goto tx_irq_fail;
1845 } 1858 }
1846 1859
1847 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0, 1860 if ((err = request_irq(grp->interruptReceive, gfar_receive,
1848 grp->int_name_rx, grp)) < 0) { 1861 0, grp->int_name_rx, grp)) < 0) {
1849 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1862 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1850 grp->interruptReceive); 1863 grp->interruptReceive);
1851 goto rx_irq_fail; 1864 goto rx_irq_fail;
1852 } 1865 }
1853 } else { 1866 } else {
1854 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0, 1867 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt,
1855 grp->int_name_tx, grp)) < 0) { 1868 0, grp->int_name_tx, grp)) < 0) {
1856 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1869 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1857 grp->interruptTransmit); 1870 grp->interruptTransmit);
1858 goto err_irq_fail; 1871 goto err_irq_fail;
@@ -1912,8 +1925,9 @@ irq_fail:
1912 return err; 1925 return err;
1913} 1926}
1914 1927
1915/* Called when something needs to use the ethernet device */ 1928/* Called when something needs to use the ethernet device
1916/* Returns 0 for success. */ 1929 * Returns 0 for success.
1930 */
1917static int gfar_enet_open(struct net_device *dev) 1931static int gfar_enet_open(struct net_device *dev)
1918{ 1932{
1919 struct gfar_private *priv = netdev_priv(dev); 1933 struct gfar_private *priv = netdev_priv(dev);
@@ -1958,18 +1972,17 @@ static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1958} 1972}
1959 1973
1960static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, 1974static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
1961 int fcb_length) 1975 int fcb_length)
1962{ 1976{
1963 u8 flags = 0;
1964
1965 /* If we're here, it's a IP packet with a TCP or UDP 1977 /* If we're here, it's a IP packet with a TCP or UDP
1966 * payload. We set it to checksum, using a pseudo-header 1978 * payload. We set it to checksum, using a pseudo-header
1967 * we provide 1979 * we provide
1968 */ 1980 */
1969 flags = TXFCB_DEFAULT; 1981 u8 flags = TXFCB_DEFAULT;
1970 1982
1971 /* Tell the controller what the protocol is */ 1983 /* Tell the controller what the protocol is
1972 /* And provide the already calculated phcs */ 1984 * And provide the already calculated phcs
1985 */
1973 if (ip_hdr(skb)->protocol == IPPROTO_UDP) { 1986 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1974 flags |= TXFCB_UDP; 1987 flags |= TXFCB_UDP;
1975 fcb->phcs = udp_hdr(skb)->check; 1988 fcb->phcs = udp_hdr(skb)->check;
@@ -1979,7 +1992,8 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
1979 /* l3os is the distance between the start of the 1992 /* l3os is the distance between the start of the
1980 * frame (skb->data) and the start of the IP hdr. 1993 * frame (skb->data) and the start of the IP hdr.
1981 * l4os is the distance between the start of the 1994 * l4os is the distance between the start of the
1982 * l3 hdr and the l4 hdr */ 1995 * l3 hdr and the l4 hdr
1996 */
1983 fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length); 1997 fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
1984 fcb->l4os = skb_network_header_len(skb); 1998 fcb->l4os = skb_network_header_len(skb);
1985 1999
@@ -1993,7 +2007,7 @@ void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1993} 2007}
1994 2008
1995static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, 2009static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1996 struct txbd8 *base, int ring_size) 2010 struct txbd8 *base, int ring_size)
1997{ 2011{
1998 struct txbd8 *new_bd = bdp + stride; 2012 struct txbd8 *new_bd = bdp + stride;
1999 2013
@@ -2001,13 +2015,14 @@ static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2001} 2015}
2002 2016
2003static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, 2017static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2004 int ring_size) 2018 int ring_size)
2005{ 2019{
2006 return skip_txbd(bdp, 1, base, ring_size); 2020 return skip_txbd(bdp, 1, base, ring_size);
2007} 2021}
2008 2022
2009/* This is called by the kernel when a frame is ready for transmission. */ 2023/* This is called by the kernel when a frame is ready for transmission.
2010/* It is pointed to by the dev->hard_start_xmit function pointer */ 2024 * It is pointed to by the dev->hard_start_xmit function pointer
2025 */
2011static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 2026static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2012{ 2027{
2013 struct gfar_private *priv = netdev_priv(dev); 2028 struct gfar_private *priv = netdev_priv(dev);
@@ -2022,13 +2037,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2022 unsigned long flags; 2037 unsigned long flags;
2023 unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN; 2038 unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
2024 2039
2025 /* 2040 /* TOE=1 frames larger than 2500 bytes may see excess delays
2026 * TOE=1 frames larger than 2500 bytes may see excess delays
2027 * before start of transmission. 2041 * before start of transmission.
2028 */ 2042 */
2029 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) && 2043 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
2030 skb->ip_summed == CHECKSUM_PARTIAL && 2044 skb->ip_summed == CHECKSUM_PARTIAL &&
2031 skb->len > 2500)) { 2045 skb->len > 2500)) {
2032 int ret; 2046 int ret;
2033 2047
2034 ret = skb_checksum_help(skb); 2048 ret = skb_checksum_help(skb);
@@ -2044,16 +2058,16 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2044 2058
2045 /* check if time stamp should be generated */ 2059 /* check if time stamp should be generated */
2046 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 2060 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
2047 priv->hwts_tx_en)) { 2061 priv->hwts_tx_en)) {
2048 do_tstamp = 1; 2062 do_tstamp = 1;
2049 fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN; 2063 fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2050 } 2064 }
2051 2065
2052 /* make space for additional header when fcb is needed */ 2066 /* make space for additional header when fcb is needed */
2053 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 2067 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
2054 vlan_tx_tag_present(skb) || 2068 vlan_tx_tag_present(skb) ||
2055 unlikely(do_tstamp)) && 2069 unlikely(do_tstamp)) &&
2056 (skb_headroom(skb) < fcb_length)) { 2070 (skb_headroom(skb) < fcb_length)) {
2057 struct sk_buff *skb_new; 2071 struct sk_buff *skb_new;
2058 2072
2059 skb_new = skb_realloc_headroom(skb, fcb_length); 2073 skb_new = skb_realloc_headroom(skb, fcb_length);
@@ -2096,12 +2110,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2096 /* Time stamp insertion requires one additional TxBD */ 2110 /* Time stamp insertion requires one additional TxBD */
2097 if (unlikely(do_tstamp)) 2111 if (unlikely(do_tstamp))
2098 txbdp_tstamp = txbdp = next_txbd(txbdp, base, 2112 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2099 tx_queue->tx_ring_size); 2113 tx_queue->tx_ring_size);
2100 2114
2101 if (nr_frags == 0) { 2115 if (nr_frags == 0) {
2102 if (unlikely(do_tstamp)) 2116 if (unlikely(do_tstamp))
2103 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | 2117 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2104 TXBD_INTERRUPT); 2118 TXBD_INTERRUPT);
2105 else 2119 else
2106 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 2120 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2107 } else { 2121 } else {
@@ -2113,7 +2127,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2113 length = skb_shinfo(skb)->frags[i].size; 2127 length = skb_shinfo(skb)->frags[i].size;
2114 2128
2115 lstatus = txbdp->lstatus | length | 2129 lstatus = txbdp->lstatus | length |
2116 BD_LFLAG(TXBD_READY); 2130 BD_LFLAG(TXBD_READY);
2117 2131
2118 /* Handle the last BD specially */ 2132 /* Handle the last BD specially */
2119 if (i == nr_frags - 1) 2133 if (i == nr_frags - 1)
@@ -2143,8 +2157,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2143 if (CHECKSUM_PARTIAL == skb->ip_summed) { 2157 if (CHECKSUM_PARTIAL == skb->ip_summed) {
2144 fcb = gfar_add_fcb(skb); 2158 fcb = gfar_add_fcb(skb);
2145 /* as specified by errata */ 2159 /* as specified by errata */
2146 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) 2160 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) &&
2147 && ((unsigned long)fcb % 0x20) > 0x18)) { 2161 ((unsigned long)fcb % 0x20) > 0x18)) {
2148 __skb_pull(skb, GMAC_FCB_LEN); 2162 __skb_pull(skb, GMAC_FCB_LEN);
2149 skb_checksum_help(skb); 2163 skb_checksum_help(skb);
2150 } else { 2164 } else {
@@ -2172,10 +2186,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2172 } 2186 }
2173 2187
2174 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, 2188 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
2175 skb_headlen(skb), DMA_TO_DEVICE); 2189 skb_headlen(skb), DMA_TO_DEVICE);
2176 2190
2177 /* 2191 /* If time stamping is requested one additional TxBD must be set up. The
2178 * If time stamping is requested one additional TxBD must be set up. The
2179 * first TxBD points to the FCB and must have a data length of 2192 * first TxBD points to the FCB and must have a data length of
2180 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with 2193 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2181 * the full frame length. 2194 * the full frame length.
@@ -2183,7 +2196,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2183 if (unlikely(do_tstamp)) { 2196 if (unlikely(do_tstamp)) {
2184 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length; 2197 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
2185 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | 2198 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2186 (skb_headlen(skb) - fcb_length); 2199 (skb_headlen(skb) - fcb_length);
2187 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; 2200 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2188 } else { 2201 } else {
2189 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 2202 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
@@ -2191,8 +2204,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2191 2204
2192 netdev_tx_sent_queue(txq, skb->len); 2205 netdev_tx_sent_queue(txq, skb->len);
2193 2206
2194 /* 2207 /* We can work in parallel with gfar_clean_tx_ring(), except
2195 * We can work in parallel with gfar_clean_tx_ring(), except
2196 * when modifying num_txbdfree. Note that we didn't grab the lock 2208 * when modifying num_txbdfree. Note that we didn't grab the lock
2197 * when we were reading the num_txbdfree and checking for available 2209 * when we were reading the num_txbdfree and checking for available
2198 * space, that's because outside of this function it can only grow, 2210 * space, that's because outside of this function it can only grow,
@@ -2205,8 +2217,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2205 */ 2217 */
2206 spin_lock_irqsave(&tx_queue->txlock, flags); 2218 spin_lock_irqsave(&tx_queue->txlock, flags);
2207 2219
2208 /* 2220 /* The powerpc-specific eieio() is used, as wmb() has too strong
2209 * The powerpc-specific eieio() is used, as wmb() has too strong
2210 * semantics (it requires synchronization between cacheable and 2221 * semantics (it requires synchronization between cacheable and
2211 * uncacheable mappings, which eieio doesn't provide and which we 2222 * uncacheable mappings, which eieio doesn't provide and which we
2212 * don't need), thus requiring a more expensive sync instruction. At 2223 * don't need), thus requiring a more expensive sync instruction. At
@@ -2222,9 +2233,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2222 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; 2233 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2223 2234
2224 /* Update the current skb pointer to the next entry we will use 2235 /* Update the current skb pointer to the next entry we will use
2225 * (wrapping if necessary) */ 2236 * (wrapping if necessary)
2237 */
2226 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & 2238 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2227 TX_RING_MOD_MASK(tx_queue->tx_ring_size); 2239 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2228 2240
2229 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2241 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2230 2242
@@ -2232,7 +2244,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2232 tx_queue->num_txbdfree -= (nr_txbds); 2244 tx_queue->num_txbdfree -= (nr_txbds);
2233 2245
2234 /* If the next BD still needs to be cleaned up, then the bds 2246 /* If the next BD still needs to be cleaned up, then the bds
2235 are full. We need to tell the kernel to stop sending us stuff. */ 2247 * are full. We need to tell the kernel to stop sending us stuff.
2248 */
2236 if (!tx_queue->num_txbdfree) { 2249 if (!tx_queue->num_txbdfree) {
2237 netif_tx_stop_queue(txq); 2250 netif_tx_stop_queue(txq);
2238 2251
@@ -2357,12 +2370,12 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2357 2370
2358 frame_size += priv->padding; 2371 frame_size += priv->padding;
2359 2372
2360 tempsize = 2373 tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2361 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + 2374 INCREMENTAL_BUFFER_SIZE;
2362 INCREMENTAL_BUFFER_SIZE;
2363 2375
2364 /* Only stop and start the controller if it isn't already 2376 /* Only stop and start the controller if it isn't already
2365 * stopped, and we changed something */ 2377 * stopped, and we changed something
2378 */
2366 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 2379 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2367 stop_gfar(dev); 2380 stop_gfar(dev);
2368 2381
@@ -2375,11 +2388,12 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2375 2388
2376 /* If the mtu is larger than the max size for standard 2389 /* If the mtu is larger than the max size for standard
2377 * ethernet frames (ie, a jumbo frame), then set maccfg2 2390 * ethernet frames (ie, a jumbo frame), then set maccfg2
2378 * to allow huge frames, and to check the length */ 2391 * to allow huge frames, and to check the length
2392 */
2379 tempval = gfar_read(&regs->maccfg2); 2393 tempval = gfar_read(&regs->maccfg2);
2380 2394
2381 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || 2395 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
2382 gfar_has_errata(priv, GFAR_ERRATA_74)) 2396 gfar_has_errata(priv, GFAR_ERRATA_74))
2383 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2397 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2384 else 2398 else
2385 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2399 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
@@ -2400,7 +2414,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2400static void gfar_reset_task(struct work_struct *work) 2414static void gfar_reset_task(struct work_struct *work)
2401{ 2415{
2402 struct gfar_private *priv = container_of(work, struct gfar_private, 2416 struct gfar_private *priv = container_of(work, struct gfar_private,
2403 reset_task); 2417 reset_task);
2404 struct net_device *dev = priv->ndev; 2418 struct net_device *dev = priv->ndev;
2405 2419
2406 if (dev->flags & IFF_UP) { 2420 if (dev->flags & IFF_UP) {
@@ -2427,7 +2441,7 @@ static void gfar_align_skb(struct sk_buff *skb)
2427 * as many bytes as needed to align the data properly 2441 * as many bytes as needed to align the data properly
2428 */ 2442 */
2429 skb_reserve(skb, RXBUF_ALIGNMENT - 2443 skb_reserve(skb, RXBUF_ALIGNMENT -
2430 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); 2444 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2431} 2445}
2432 2446
2433/* Interrupt Handler for Transmit complete */ 2447/* Interrupt Handler for Transmit complete */
@@ -2461,8 +2475,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2461 2475
2462 frags = skb_shinfo(skb)->nr_frags; 2476 frags = skb_shinfo(skb)->nr_frags;
2463 2477
2464 /* 2478 /* When time stamping, one additional TxBD must be freed.
2465 * When time stamping, one additional TxBD must be freed.
2466 * Also, we need to dma_unmap_single() the TxPAL. 2479 * Also, we need to dma_unmap_single() the TxPAL.
2467 */ 2480 */
2468 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) 2481 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
@@ -2476,7 +2489,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2476 2489
2477 /* Only clean completed frames */ 2490 /* Only clean completed frames */
2478 if ((lstatus & BD_LFLAG(TXBD_READY)) && 2491 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2479 (lstatus & BD_LENGTH_MASK)) 2492 (lstatus & BD_LENGTH_MASK))
2480 break; 2493 break;
2481 2494
2482 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 2495 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
@@ -2486,11 +2499,12 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2486 buflen = bdp->length; 2499 buflen = bdp->length;
2487 2500
2488 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 2501 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2489 buflen, DMA_TO_DEVICE); 2502 buflen, DMA_TO_DEVICE);
2490 2503
2491 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 2504 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2492 struct skb_shared_hwtstamps shhwtstamps; 2505 struct skb_shared_hwtstamps shhwtstamps;
2493 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); 2506 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2507
2494 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 2508 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2495 shhwtstamps.hwtstamp = ns_to_ktime(*ns); 2509 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2496 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); 2510 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
@@ -2503,23 +2517,20 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2503 bdp = next_txbd(bdp, base, tx_ring_size); 2517 bdp = next_txbd(bdp, base, tx_ring_size);
2504 2518
2505 for (i = 0; i < frags; i++) { 2519 for (i = 0; i < frags; i++) {
2506 dma_unmap_page(&priv->ofdev->dev, 2520 dma_unmap_page(&priv->ofdev->dev, bdp->bufPtr,
2507 bdp->bufPtr, 2521 bdp->length, DMA_TO_DEVICE);
2508 bdp->length,
2509 DMA_TO_DEVICE);
2510 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2522 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2511 bdp = next_txbd(bdp, base, tx_ring_size); 2523 bdp = next_txbd(bdp, base, tx_ring_size);
2512 } 2524 }
2513 2525
2514 bytes_sent += skb->len; 2526 bytes_sent += skb->len;
2515 2527
2516 /* 2528 /* If there's room in the queue (limit it to rx_buffer_size)
2517 * If there's room in the queue (limit it to rx_buffer_size)
2518 * we add this skb back into the pool, if it's the right size 2529 * we add this skb back into the pool, if it's the right size
2519 */ 2530 */
2520 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && 2531 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
2521 skb_recycle_check(skb, priv->rx_buffer_size + 2532 skb_recycle_check(skb, priv->rx_buffer_size +
2522 RXBUF_ALIGNMENT)) { 2533 RXBUF_ALIGNMENT)) {
2523 gfar_align_skb(skb); 2534 gfar_align_skb(skb);
2524 skb_queue_head(&priv->rx_recycle, skb); 2535 skb_queue_head(&priv->rx_recycle, skb);
2525 } else 2536 } else
@@ -2528,7 +2539,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2528 tx_queue->tx_skbuff[skb_dirtytx] = NULL; 2539 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2529 2540
2530 skb_dirtytx = (skb_dirtytx + 1) & 2541 skb_dirtytx = (skb_dirtytx + 1) &
2531 TX_RING_MOD_MASK(tx_ring_size); 2542 TX_RING_MOD_MASK(tx_ring_size);
2532 2543
2533 howmany++; 2544 howmany++;
2534 spin_lock_irqsave(&tx_queue->txlock, flags); 2545 spin_lock_irqsave(&tx_queue->txlock, flags);
@@ -2558,8 +2569,7 @@ static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
2558 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); 2569 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
2559 __napi_schedule(&gfargrp->napi); 2570 __napi_schedule(&gfargrp->napi);
2560 } else { 2571 } else {
2561 /* 2572 /* Clear IEVENT, so interrupts aren't called again
2562 * Clear IEVENT, so interrupts aren't called again
2563 * because of the packets that have already arrived. 2573 * because of the packets that have already arrived.
2564 */ 2574 */
2565 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); 2575 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
@@ -2576,7 +2586,7 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
2576} 2586}
2577 2587
2578static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 2588static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2579 struct sk_buff *skb) 2589 struct sk_buff *skb)
2580{ 2590{
2581 struct net_device *dev = rx_queue->dev; 2591 struct net_device *dev = rx_queue->dev;
2582 struct gfar_private *priv = netdev_priv(dev); 2592 struct gfar_private *priv = netdev_priv(dev);
@@ -2587,7 +2597,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2587 gfar_init_rxbdp(rx_queue, bdp, buf); 2597 gfar_init_rxbdp(rx_queue, bdp, buf);
2588} 2598}
2589 2599
2590static struct sk_buff * gfar_alloc_skb(struct net_device *dev) 2600static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2591{ 2601{
2592 struct gfar_private *priv = netdev_priv(dev); 2602 struct gfar_private *priv = netdev_priv(dev);
2593 struct sk_buff *skb = NULL; 2603 struct sk_buff *skb = NULL;
@@ -2601,7 +2611,7 @@ static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
2601 return skb; 2611 return skb;
2602} 2612}
2603 2613
2604struct sk_buff * gfar_new_skb(struct net_device *dev) 2614struct sk_buff *gfar_new_skb(struct net_device *dev)
2605{ 2615{
2606 struct gfar_private *priv = netdev_priv(dev); 2616 struct gfar_private *priv = netdev_priv(dev);
2607 struct sk_buff *skb = NULL; 2617 struct sk_buff *skb = NULL;
@@ -2619,8 +2629,7 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
2619 struct net_device_stats *stats = &dev->stats; 2629 struct net_device_stats *stats = &dev->stats;
2620 struct gfar_extra_stats *estats = &priv->extra_stats; 2630 struct gfar_extra_stats *estats = &priv->extra_stats;
2621 2631
2622 /* If the packet was truncated, none of the other errors 2632 /* If the packet was truncated, none of the other errors matter */
2623 * matter */
2624 if (status & RXBD_TRUNCATED) { 2633 if (status & RXBD_TRUNCATED) {
2625 stats->rx_length_errors++; 2634 stats->rx_length_errors++;
2626 2635
@@ -2661,7 +2670,8 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2661{ 2670{
2662 /* If valid headers were found, and valid sums 2671 /* If valid headers were found, and valid sums
2663 * were verified, then we tell the kernel that no 2672 * were verified, then we tell the kernel that no
2664 * checksumming is necessary. Otherwise, it is */ 2673 * checksumming is necessary. Otherwise, it is [FIXME]
2674 */
2665 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) 2675 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2666 skb->ip_summed = CHECKSUM_UNNECESSARY; 2676 skb->ip_summed = CHECKSUM_UNNECESSARY;
2667 else 2677 else
@@ -2669,8 +2679,7 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2669} 2679}
2670 2680
2671 2681
2672/* gfar_process_frame() -- handle one incoming packet if skb 2682/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2673 * isn't NULL. */
2674static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 2683static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2675 int amount_pull, struct napi_struct *napi) 2684 int amount_pull, struct napi_struct *napi)
2676{ 2685{
@@ -2682,8 +2691,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2682 /* fcb is at the beginning if exists */ 2691 /* fcb is at the beginning if exists */
2683 fcb = (struct rxfcb *)skb->data; 2692 fcb = (struct rxfcb *)skb->data;
2684 2693
2685 /* Remove the FCB from the skb */ 2694 /* Remove the FCB from the skb
2686 /* Remove the padded bytes, if there are any */ 2695 * Remove the padded bytes, if there are any
2696 */
2687 if (amount_pull) { 2697 if (amount_pull) {
2688 skb_record_rx_queue(skb, fcb->rq); 2698 skb_record_rx_queue(skb, fcb->rq);
2689 skb_pull(skb, amount_pull); 2699 skb_pull(skb, amount_pull);
@@ -2693,6 +2703,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2693 if (priv->hwts_rx_en) { 2703 if (priv->hwts_rx_en) {
2694 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 2704 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2695 u64 *ns = (u64 *) skb->data; 2705 u64 *ns = (u64 *) skb->data;
2706
2696 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 2707 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2697 shhwtstamps->hwtstamp = ns_to_ktime(*ns); 2708 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2698 } 2709 }
@@ -2706,8 +2717,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2706 /* Tell the skb what kind of packet this is */ 2717 /* Tell the skb what kind of packet this is */
2707 skb->protocol = eth_type_trans(skb, dev); 2718 skb->protocol = eth_type_trans(skb, dev);
2708 2719
2709 /* 2720 /* There's need to check for NETIF_F_HW_VLAN_RX here.
2710 * There's need to check for NETIF_F_HW_VLAN_RX here.
2711 * Even if vlan rx accel is disabled, on some chips 2721 * Even if vlan rx accel is disabled, on some chips
2712 * RXFCB_VLN is pseudo randomly set. 2722 * RXFCB_VLN is pseudo randomly set.
2713 */ 2723 */
@@ -2725,8 +2735,8 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2725} 2735}
2726 2736
2727/* gfar_clean_rx_ring() -- Processes each frame in the rx ring 2737/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2728 * until the budget/quota has been reached. Returns the number 2738 * until the budget/quota has been reached. Returns the number
2729 * of frames handled 2739 * of frames handled
2730 */ 2740 */
2731int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) 2741int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2732{ 2742{
@@ -2746,6 +2756,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2746 2756
2747 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 2757 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2748 struct sk_buff *newskb; 2758 struct sk_buff *newskb;
2759
2749 rmb(); 2760 rmb();
2750 2761
2751 /* Add another skb for the future */ 2762 /* Add another skb for the future */
@@ -2754,15 +2765,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2754 skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; 2765 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2755 2766
2756 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 2767 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2757 priv->rx_buffer_size, DMA_FROM_DEVICE); 2768 priv->rx_buffer_size, DMA_FROM_DEVICE);
2758 2769
2759 if (unlikely(!(bdp->status & RXBD_ERR) && 2770 if (unlikely(!(bdp->status & RXBD_ERR) &&
2760 bdp->length > priv->rx_buffer_size)) 2771 bdp->length > priv->rx_buffer_size))
2761 bdp->status = RXBD_LARGE; 2772 bdp->status = RXBD_LARGE;
2762 2773
2763 /* We drop the frame if we failed to allocate a new buffer */ 2774 /* We drop the frame if we failed to allocate a new buffer */
2764 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || 2775 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2765 bdp->status & RXBD_ERR)) { 2776 bdp->status & RXBD_ERR)) {
2766 count_errors(bdp->status, dev); 2777 count_errors(bdp->status, dev);
2767 2778
2768 if (unlikely(!newskb)) 2779 if (unlikely(!newskb))
@@ -2781,7 +2792,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2781 rx_queue->stats.rx_bytes += pkt_len; 2792 rx_queue->stats.rx_bytes += pkt_len;
2782 skb_record_rx_queue(skb, rx_queue->qindex); 2793 skb_record_rx_queue(skb, rx_queue->qindex);
2783 gfar_process_frame(dev, skb, amount_pull, 2794 gfar_process_frame(dev, skb, amount_pull,
2784 &rx_queue->grp->napi); 2795 &rx_queue->grp->napi);
2785 2796
2786 } else { 2797 } else {
2787 netif_warn(priv, rx_err, dev, "Missing skb!\n"); 2798 netif_warn(priv, rx_err, dev, "Missing skb!\n");
@@ -2800,9 +2811,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2800 bdp = next_bd(bdp, base, rx_queue->rx_ring_size); 2811 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2801 2812
2802 /* update to point at the next skb */ 2813 /* update to point at the next skb */
2803 rx_queue->skb_currx = 2814 rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
2804 (rx_queue->skb_currx + 1) & 2815 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2805 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2806 } 2816 }
2807 2817
2808 /* Update the current rxbd pointer to be the next one */ 2818 /* Update the current rxbd pointer to be the next one */
@@ -2813,8 +2823,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2813 2823
2814static int gfar_poll(struct napi_struct *napi, int budget) 2824static int gfar_poll(struct napi_struct *napi, int budget)
2815{ 2825{
2816 struct gfar_priv_grp *gfargrp = container_of(napi, 2826 struct gfar_priv_grp *gfargrp =
2817 struct gfar_priv_grp, napi); 2827 container_of(napi, struct gfar_priv_grp, napi);
2818 struct gfar_private *priv = gfargrp->priv; 2828 struct gfar_private *priv = gfargrp->priv;
2819 struct gfar __iomem *regs = gfargrp->regs; 2829 struct gfar __iomem *regs = gfargrp->regs;
2820 struct gfar_priv_tx_q *tx_queue = NULL; 2830 struct gfar_priv_tx_q *tx_queue = NULL;
@@ -2828,11 +2838,11 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2828 budget_per_queue = budget/num_queues; 2838 budget_per_queue = budget/num_queues;
2829 2839
2830 /* Clear IEVENT, so interrupts aren't called again 2840 /* Clear IEVENT, so interrupts aren't called again
2831 * because of the packets that have already arrived */ 2841 * because of the packets that have already arrived
2842 */
2832 gfar_write(&regs->ievent, IEVENT_RTX_MASK); 2843 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
2833 2844
2834 while (num_queues && left_over_budget) { 2845 while (num_queues && left_over_budget) {
2835
2836 budget_per_queue = left_over_budget/num_queues; 2846 budget_per_queue = left_over_budget/num_queues;
2837 left_over_budget = 0; 2847 left_over_budget = 0;
2838 2848
@@ -2843,12 +2853,13 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2843 tx_queue = priv->tx_queue[rx_queue->qindex]; 2853 tx_queue = priv->tx_queue[rx_queue->qindex];
2844 2854
2845 tx_cleaned += gfar_clean_tx_ring(tx_queue); 2855 tx_cleaned += gfar_clean_tx_ring(tx_queue);
2846 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, 2856 rx_cleaned_per_queue =
2847 budget_per_queue); 2857 gfar_clean_rx_ring(rx_queue, budget_per_queue);
2848 rx_cleaned += rx_cleaned_per_queue; 2858 rx_cleaned += rx_cleaned_per_queue;
2849 if(rx_cleaned_per_queue < budget_per_queue) { 2859 if (rx_cleaned_per_queue < budget_per_queue) {
2850 left_over_budget = left_over_budget + 2860 left_over_budget = left_over_budget +
2851 (budget_per_queue - rx_cleaned_per_queue); 2861 (budget_per_queue -
2862 rx_cleaned_per_queue);
2852 set_bit(i, &serviced_queues); 2863 set_bit(i, &serviced_queues);
2853 num_queues--; 2864 num_queues--;
2854 } 2865 }
@@ -2866,25 +2877,25 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2866 2877
2867 gfar_write(&regs->imask, IMASK_DEFAULT); 2878 gfar_write(&regs->imask, IMASK_DEFAULT);
2868 2879
2869 /* If we are coalescing interrupts, update the timer */ 2880 /* If we are coalescing interrupts, update the timer
2870 /* Otherwise, clear it */ 2881 * Otherwise, clear it
2871 gfar_configure_coalescing(priv, 2882 */
2872 gfargrp->rx_bit_map, gfargrp->tx_bit_map); 2883 gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
2884 gfargrp->tx_bit_map);
2873 } 2885 }
2874 2886
2875 return rx_cleaned; 2887 return rx_cleaned;
2876} 2888}
2877 2889
2878#ifdef CONFIG_NET_POLL_CONTROLLER 2890#ifdef CONFIG_NET_POLL_CONTROLLER
2879/* 2891/* Polling 'interrupt' - used by things like netconsole to send skbs
2880 * Polling 'interrupt' - used by things like netconsole to send skbs
2881 * without having to re-enable interrupts. It's not called while 2892 * without having to re-enable interrupts. It's not called while
2882 * the interrupt routine is executing. 2893 * the interrupt routine is executing.
2883 */ 2894 */
2884static void gfar_netpoll(struct net_device *dev) 2895static void gfar_netpoll(struct net_device *dev)
2885{ 2896{
2886 struct gfar_private *priv = netdev_priv(dev); 2897 struct gfar_private *priv = netdev_priv(dev);
2887 int i = 0; 2898 int i;
2888 2899
2889 /* If the device has multiple interrupts, run tx/rx */ 2900 /* If the device has multiple interrupts, run tx/rx */
2890 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 2901 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
@@ -2893,7 +2904,7 @@ static void gfar_netpoll(struct net_device *dev)
2893 disable_irq(priv->gfargrp[i].interruptReceive); 2904 disable_irq(priv->gfargrp[i].interruptReceive);
2894 disable_irq(priv->gfargrp[i].interruptError); 2905 disable_irq(priv->gfargrp[i].interruptError);
2895 gfar_interrupt(priv->gfargrp[i].interruptTransmit, 2906 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2896 &priv->gfargrp[i]); 2907 &priv->gfargrp[i]);
2897 enable_irq(priv->gfargrp[i].interruptError); 2908 enable_irq(priv->gfargrp[i].interruptError);
2898 enable_irq(priv->gfargrp[i].interruptReceive); 2909 enable_irq(priv->gfargrp[i].interruptReceive);
2899 enable_irq(priv->gfargrp[i].interruptTransmit); 2910 enable_irq(priv->gfargrp[i].interruptTransmit);
@@ -2902,7 +2913,7 @@ static void gfar_netpoll(struct net_device *dev)
2902 for (i = 0; i < priv->num_grps; i++) { 2913 for (i = 0; i < priv->num_grps; i++) {
2903 disable_irq(priv->gfargrp[i].interruptTransmit); 2914 disable_irq(priv->gfargrp[i].interruptTransmit);
2904 gfar_interrupt(priv->gfargrp[i].interruptTransmit, 2915 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2905 &priv->gfargrp[i]); 2916 &priv->gfargrp[i]);
2906 enable_irq(priv->gfargrp[i].interruptTransmit); 2917 enable_irq(priv->gfargrp[i].interruptTransmit);
2907 } 2918 }
2908 } 2919 }
@@ -2954,7 +2965,8 @@ static void adjust_link(struct net_device *dev)
2954 u32 ecntrl = gfar_read(&regs->ecntrl); 2965 u32 ecntrl = gfar_read(&regs->ecntrl);
2955 2966
2956 /* Now we make sure that we can be in full duplex mode. 2967 /* Now we make sure that we can be in full duplex mode.
2957 * If not, we operate in half-duplex mode. */ 2968 * If not, we operate in half-duplex mode.
2969 */
2958 if (phydev->duplex != priv->oldduplex) { 2970 if (phydev->duplex != priv->oldduplex) {
2959 new_state = 1; 2971 new_state = 1;
2960 if (!(phydev->duplex)) 2972 if (!(phydev->duplex))
@@ -2980,7 +2992,8 @@ static void adjust_link(struct net_device *dev)
2980 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 2992 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
2981 2993
2982 /* Reduced mode distinguishes 2994 /* Reduced mode distinguishes
2983 * between 10 and 100 */ 2995 * between 10 and 100
2996 */
2984 if (phydev->speed == SPEED_100) 2997 if (phydev->speed == SPEED_100)
2985 ecntrl |= ECNTRL_R100; 2998 ecntrl |= ECNTRL_R100;
2986 else 2999 else
@@ -3019,7 +3032,8 @@ static void adjust_link(struct net_device *dev)
3019/* Update the hash table based on the current list of multicast 3032/* Update the hash table based on the current list of multicast
3020 * addresses we subscribe to. Also, change the promiscuity of 3033 * addresses we subscribe to. Also, change the promiscuity of
3021 * the device based on the flags (this function is called 3034 * the device based on the flags (this function is called
3022 * whenever dev->flags is changed */ 3035 * whenever dev->flags is changed
3036 */
3023static void gfar_set_multi(struct net_device *dev) 3037static void gfar_set_multi(struct net_device *dev)
3024{ 3038{
3025 struct netdev_hw_addr *ha; 3039 struct netdev_hw_addr *ha;
@@ -3081,7 +3095,8 @@ static void gfar_set_multi(struct net_device *dev)
3081 3095
3082 /* If we have extended hash tables, we need to 3096 /* If we have extended hash tables, we need to
3083 * clear the exact match registers to prepare for 3097 * clear the exact match registers to prepare for
3084 * setting them */ 3098 * setting them
3099 */
3085 if (priv->extended_hash) { 3100 if (priv->extended_hash) {
3086 em_num = GFAR_EM_NUM + 1; 3101 em_num = GFAR_EM_NUM + 1;
3087 gfar_clear_exact_match(dev); 3102 gfar_clear_exact_match(dev);
@@ -3107,13 +3122,14 @@ static void gfar_set_multi(struct net_device *dev)
3107 3122
3108 3123
3109/* Clears each of the exact match registers to zero, so they 3124/* Clears each of the exact match registers to zero, so they
3110 * don't interfere with normal reception */ 3125 * don't interfere with normal reception
3126 */
3111static void gfar_clear_exact_match(struct net_device *dev) 3127static void gfar_clear_exact_match(struct net_device *dev)
3112{ 3128{
3113 int idx; 3129 int idx;
3114 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; 3130 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3115 3131
3116 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) 3132 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3117 gfar_set_mac_for_addr(dev, idx, zero_arr); 3133 gfar_set_mac_for_addr(dev, idx, zero_arr);
3118} 3134}
3119 3135
@@ -3129,7 +3145,8 @@ static void gfar_clear_exact_match(struct net_device *dev)
3129 * hash index which gaddr register to use, and the 5 other bits 3145 * hash index which gaddr register to use, and the 5 other bits
3130 * indicate which bit (assuming an IBM numbering scheme, which 3146 * indicate which bit (assuming an IBM numbering scheme, which
3131 * for PowerPC (tm) is usually the case) in the register holds 3147 * for PowerPC (tm) is usually the case) in the register holds
3132 * the entry. */ 3148 * the entry.
3149 */
3133static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) 3150static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3134{ 3151{
3135 u32 tempval; 3152 u32 tempval;
@@ -3161,8 +3178,9 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3161 3178
3162 macptr += num*2; 3179 macptr += num*2;
3163 3180
3164 /* Now copy it into the mac registers backwards, cuz */ 3181 /* Now copy it into the mac registers backwards, cuz
3165 /* little endian is silly */ 3182 * little endian is silly
3183 */
3166 for (idx = 0; idx < ETH_ALEN; idx++) 3184 for (idx = 0; idx < ETH_ALEN; idx++)
3167 tmpbuf[ETH_ALEN - 1 - idx] = addr[idx]; 3185 tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
3168 3186
@@ -3194,7 +3212,8 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
3194 3212
3195 /* Hmm... */ 3213 /* Hmm... */
3196 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 3214 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3197 netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n", 3215 netdev_dbg(dev,
3216 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3198 events, gfar_read(&regs->imask)); 3217 events, gfar_read(&regs->imask));
3199 3218
3200 /* Update the error counters */ 3219 /* Update the error counters */
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 8a025570d97..8971921cc1c 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -46,18 +46,24 @@
46#include "gianfar.h" 46#include "gianfar.h"
47 47
48extern void gfar_start(struct net_device *dev); 48extern void gfar_start(struct net_device *dev);
49extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); 49extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
50 int rx_work_limit);
50 51
51#define GFAR_MAX_COAL_USECS 0xffff 52#define GFAR_MAX_COAL_USECS 0xffff
52#define GFAR_MAX_COAL_FRAMES 0xff 53#define GFAR_MAX_COAL_FRAMES 0xff
53static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, 54static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
54 u64 * buf); 55 u64 *buf);
55static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf); 56static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
56static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals); 57static int gfar_gcoalesce(struct net_device *dev,
57static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals); 58 struct ethtool_coalesce *cvals);
58static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals); 59static int gfar_scoalesce(struct net_device *dev,
59static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals); 60 struct ethtool_coalesce *cvals);
60static void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo); 61static void gfar_gringparam(struct net_device *dev,
62 struct ethtool_ringparam *rvals);
63static int gfar_sringparam(struct net_device *dev,
64 struct ethtool_ringparam *rvals);
65static void gfar_gdrvinfo(struct net_device *dev,
66 struct ethtool_drvinfo *drvinfo);
61 67
62static const char stat_gstrings[][ETH_GSTRING_LEN] = { 68static const char stat_gstrings[][ETH_GSTRING_LEN] = {
63 "rx-dropped-by-kernel", 69 "rx-dropped-by-kernel",
@@ -130,14 +136,15 @@ static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
130 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN); 136 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
131 else 137 else
132 memcpy(buf, stat_gstrings, 138 memcpy(buf, stat_gstrings,
133 GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN); 139 GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
134} 140}
135 141
136/* Fill in an array of 64-bit statistics from various sources. 142/* Fill in an array of 64-bit statistics from various sources.
137 * This array will be appended to the end of the ethtool_stats 143 * This array will be appended to the end of the ethtool_stats
138 * structure, and returned to user space 144 * structure, and returned to user space
139 */ 145 */
140static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf) 146static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
147 u64 *buf)
141{ 148{
142 int i; 149 int i;
143 struct gfar_private *priv = netdev_priv(dev); 150 struct gfar_private *priv = netdev_priv(dev);
@@ -174,8 +181,8 @@ static int gfar_sset_count(struct net_device *dev, int sset)
174} 181}
175 182
176/* Fills in the drvinfo structure with some basic info */ 183/* Fills in the drvinfo structure with some basic info */
177static void gfar_gdrvinfo(struct net_device *dev, struct 184static void gfar_gdrvinfo(struct net_device *dev,
178 ethtool_drvinfo *drvinfo) 185 struct ethtool_drvinfo *drvinfo)
179{ 186{
180 strncpy(drvinfo->driver, DRV_NAME, GFAR_INFOSTR_LEN); 187 strncpy(drvinfo->driver, DRV_NAME, GFAR_INFOSTR_LEN);
181 strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN); 188 strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN);
@@ -226,7 +233,8 @@ static int gfar_reglen(struct net_device *dev)
226} 233}
227 234
228/* Return a dump of the GFAR register space */ 235/* Return a dump of the GFAR register space */
229static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf) 236static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
237 void *regbuf)
230{ 238{
231 int i; 239 int i;
232 struct gfar_private *priv = netdev_priv(dev); 240 struct gfar_private *priv = netdev_priv(dev);
@@ -239,7 +247,8 @@ static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, voi
239 247
240/* Convert microseconds to ethernet clock ticks, which changes 248/* Convert microseconds to ethernet clock ticks, which changes
241 * depending on what speed the controller is running at */ 249 * depending on what speed the controller is running at */
242static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs) 250static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
251 unsigned int usecs)
243{ 252{
244 unsigned int count; 253 unsigned int count;
245 254
@@ -263,7 +272,8 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int use
263} 272}
264 273
265/* Convert ethernet clock ticks to microseconds */ 274/* Convert ethernet clock ticks to microseconds */
266static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int ticks) 275static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
276 unsigned int ticks)
267{ 277{
268 unsigned int count; 278 unsigned int count;
269 279
@@ -288,7 +298,8 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
288 298
289/* Get the coalescing parameters, and put them in the cvals 299/* Get the coalescing parameters, and put them in the cvals
290 * structure. */ 300 * structure. */
291static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) 301static int gfar_gcoalesce(struct net_device *dev,
302 struct ethtool_coalesce *cvals)
292{ 303{
293 struct gfar_private *priv = netdev_priv(dev); 304 struct gfar_private *priv = netdev_priv(dev);
294 struct gfar_priv_rx_q *rx_queue = NULL; 305 struct gfar_priv_rx_q *rx_queue = NULL;
@@ -353,7 +364,8 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
353 * Both cvals->*_usecs and cvals->*_frames have to be > 0 364 * Both cvals->*_usecs and cvals->*_frames have to be > 0
354 * in order for coalescing to be active 365 * in order for coalescing to be active
355 */ 366 */
356static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) 367static int gfar_scoalesce(struct net_device *dev,
368 struct ethtool_coalesce *cvals)
357{ 369{
358 struct gfar_private *priv = netdev_priv(dev); 370 struct gfar_private *priv = netdev_priv(dev);
359 int i = 0; 371 int i = 0;
@@ -364,7 +376,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
364 /* Set up rx coalescing */ 376 /* Set up rx coalescing */
365 /* As of now, we will enable/disable coalescing for all 377 /* As of now, we will enable/disable coalescing for all
366 * queues together in case of eTSEC2, this will be modified 378 * queues together in case of eTSEC2, this will be modified
367 * along with the ethtool interface */ 379 * along with the ethtool interface
380 */
368 if ((cvals->rx_coalesce_usecs == 0) || 381 if ((cvals->rx_coalesce_usecs == 0) ||
369 (cvals->rx_max_coalesced_frames == 0)) { 382 (cvals->rx_max_coalesced_frames == 0)) {
370 for (i = 0; i < priv->num_rx_queues; i++) 383 for (i = 0; i < priv->num_rx_queues; i++)
@@ -433,7 +446,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
433/* Fills in rvals with the current ring parameters. Currently, 446/* Fills in rvals with the current ring parameters. Currently,
434 * rx, rx_mini, and rx_jumbo rings are the same size, as mini and 447 * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
435 * jumbo are ignored by the driver */ 448 * jumbo are ignored by the driver */
436static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals) 449static void gfar_gringparam(struct net_device *dev,
450 struct ethtool_ringparam *rvals)
437{ 451{
438 struct gfar_private *priv = netdev_priv(dev); 452 struct gfar_private *priv = netdev_priv(dev);
439 struct gfar_priv_tx_q *tx_queue = NULL; 453 struct gfar_priv_tx_q *tx_queue = NULL;
@@ -459,8 +473,10 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
459/* Change the current ring parameters, stopping the controller if 473/* Change the current ring parameters, stopping the controller if
460 * necessary so that we don't mess things up while we're in 474 * necessary so that we don't mess things up while we're in
461 * motion. We wait for the ring to be clean before reallocating 475 * motion. We wait for the ring to be clean before reallocating
462 * the rings. */ 476 * the rings.
463static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals) 477 */
478static int gfar_sringparam(struct net_device *dev,
479 struct ethtool_ringparam *rvals)
464{ 480{
465 struct gfar_private *priv = netdev_priv(dev); 481 struct gfar_private *priv = netdev_priv(dev);
466 int err = 0, i = 0; 482 int err = 0, i = 0;
@@ -486,7 +502,8 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
486 unsigned long flags; 502 unsigned long flags;
487 503
488 /* Halt TX and RX, and process the frames which 504 /* Halt TX and RX, and process the frames which
489 * have already been received */ 505 * have already been received
506 */
490 local_irq_save(flags); 507 local_irq_save(flags);
491 lock_tx_qs(priv); 508 lock_tx_qs(priv);
492 lock_rx_qs(priv); 509 lock_rx_qs(priv);
@@ -499,7 +516,7 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
499 516
500 for (i = 0; i < priv->num_rx_queues; i++) 517 for (i = 0; i < priv->num_rx_queues; i++)
501 gfar_clean_rx_ring(priv->rx_queue[i], 518 gfar_clean_rx_ring(priv->rx_queue[i],
502 priv->rx_queue[i]->rx_ring_size); 519 priv->rx_queue[i]->rx_ring_size);
503 520
504 /* Now we take down the rings to rebuild them */ 521 /* Now we take down the rings to rebuild them */
505 stop_gfar(dev); 522 stop_gfar(dev);
@@ -509,7 +526,8 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
509 for (i = 0; i < priv->num_rx_queues; i++) { 526 for (i = 0; i < priv->num_rx_queues; i++) {
510 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending; 527 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
511 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending; 528 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
512 priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size; 529 priv->tx_queue[i]->num_txbdfree =
530 priv->tx_queue[i]->tx_ring_size;
513 } 531 }
514 532
515 /* Rebuild the rings with the new size */ 533 /* Rebuild the rings with the new size */
@@ -535,7 +553,8 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
535 553
536 if (dev->flags & IFF_UP) { 554 if (dev->flags & IFF_UP) {
537 /* Halt TX and RX, and process the frames which 555 /* Halt TX and RX, and process the frames which
538 * have already been received */ 556 * have already been received
557 */
539 local_irq_save(flags); 558 local_irq_save(flags);
540 lock_tx_qs(priv); 559 lock_tx_qs(priv);
541 lock_rx_qs(priv); 560 lock_rx_qs(priv);
@@ -548,7 +567,7 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
548 567
549 for (i = 0; i < priv->num_rx_queues; i++) 568 for (i = 0; i < priv->num_rx_queues; i++)
550 gfar_clean_rx_ring(priv->rx_queue[i], 569 gfar_clean_rx_ring(priv->rx_queue[i],
551 priv->rx_queue[i]->rx_ring_size); 570 priv->rx_queue[i]->rx_ring_size);
552 571
553 /* Now we take down the rings to rebuild them */ 572 /* Now we take down the rings to rebuild them */
554 stop_gfar(dev); 573 stop_gfar(dev);
@@ -564,12 +583,14 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
564static uint32_t gfar_get_msglevel(struct net_device *dev) 583static uint32_t gfar_get_msglevel(struct net_device *dev)
565{ 584{
566 struct gfar_private *priv = netdev_priv(dev); 585 struct gfar_private *priv = netdev_priv(dev);
586
567 return priv->msg_enable; 587 return priv->msg_enable;
568} 588}
569 589
570static void gfar_set_msglevel(struct net_device *dev, uint32_t data) 590static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
571{ 591{
572 struct gfar_private *priv = netdev_priv(dev); 592 struct gfar_private *priv = netdev_priv(dev);
593
573 priv->msg_enable = data; 594 priv->msg_enable = data;
574} 595}
575 596
@@ -614,14 +635,14 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
614 635
615 if (ethflow & RXH_L2DA) { 636 if (ethflow & RXH_L2DA) {
616 fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH | 637 fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
617 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; 638 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
618 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 639 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
619 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 640 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
620 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 641 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
621 priv->cur_filer_idx = priv->cur_filer_idx - 1; 642 priv->cur_filer_idx = priv->cur_filer_idx - 1;
622 643
623 fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH | 644 fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
624 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; 645 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
625 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 646 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
626 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 647 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
627 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 648 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -630,7 +651,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
630 651
631 if (ethflow & RXH_VLAN) { 652 if (ethflow & RXH_VLAN) {
632 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH | 653 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
633 RQFCR_AND | RQFCR_HASHTBL_0; 654 RQFCR_AND | RQFCR_HASHTBL_0;
634 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 655 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
635 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 656 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
636 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 657 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
@@ -639,7 +660,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
639 660
640 if (ethflow & RXH_IP_SRC) { 661 if (ethflow & RXH_IP_SRC) {
641 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH | 662 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
642 RQFCR_AND | RQFCR_HASHTBL_0; 663 RQFCR_AND | RQFCR_HASHTBL_0;
643 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 664 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
644 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 665 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
645 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 666 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -648,7 +669,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
648 669
649 if (ethflow & (RXH_IP_DST)) { 670 if (ethflow & (RXH_IP_DST)) {
650 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH | 671 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
651 RQFCR_AND | RQFCR_HASHTBL_0; 672 RQFCR_AND | RQFCR_HASHTBL_0;
652 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 673 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
653 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 674 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
654 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 675 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -657,7 +678,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
657 678
658 if (ethflow & RXH_L3_PROTO) { 679 if (ethflow & RXH_L3_PROTO) {
659 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH | 680 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
660 RQFCR_AND | RQFCR_HASHTBL_0; 681 RQFCR_AND | RQFCR_HASHTBL_0;
661 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 682 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
662 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 683 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
663 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 684 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -666,7 +687,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
666 687
667 if (ethflow & RXH_L4_B_0_1) { 688 if (ethflow & RXH_L4_B_0_1) {
668 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH | 689 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
669 RQFCR_AND | RQFCR_HASHTBL_0; 690 RQFCR_AND | RQFCR_HASHTBL_0;
670 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 691 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
671 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 692 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
672 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 693 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -675,7 +696,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
675 696
676 if (ethflow & RXH_L4_B_2_3) { 697 if (ethflow & RXH_L4_B_2_3) {
677 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH | 698 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
678 RQFCR_AND | RQFCR_HASHTBL_0; 699 RQFCR_AND | RQFCR_HASHTBL_0;
679 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 700 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
680 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 701 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
681 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 702 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -683,7 +704,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
683 } 704 }
684} 705}
685 706
686static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class) 707static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
708 u64 class)
687{ 709{
688 unsigned int last_rule_idx = priv->cur_filer_idx; 710 unsigned int last_rule_idx = priv->cur_filer_idx;
689 unsigned int cmp_rqfpr; 711 unsigned int cmp_rqfpr;
@@ -694,9 +716,9 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
694 int ret = 1; 716 int ret = 1;
695 717
696 local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1), 718 local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
697 GFP_KERNEL); 719 GFP_KERNEL);
698 local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1), 720 local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
699 GFP_KERNEL); 721 GFP_KERNEL);
700 if (!local_rqfpr || !local_rqfcr) { 722 if (!local_rqfpr || !local_rqfcr) {
701 pr_err("Out of memory\n"); 723 pr_err("Out of memory\n");
702 ret = 0; 724 ret = 0;
@@ -726,9 +748,9 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
726 local_rqfpr[j] = priv->ftp_rqfpr[i]; 748 local_rqfpr[j] = priv->ftp_rqfpr[i];
727 local_rqfcr[j] = priv->ftp_rqfcr[i]; 749 local_rqfcr[j] = priv->ftp_rqfcr[i];
728 j--; 750 j--;
729 if ((priv->ftp_rqfcr[i] == (RQFCR_PID_PARSE | 751 if ((priv->ftp_rqfcr[i] ==
730 RQFCR_CLE |RQFCR_AND)) && 752 (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) &&
731 (priv->ftp_rqfpr[i] == cmp_rqfpr)) 753 (priv->ftp_rqfpr[i] == cmp_rqfpr))
732 break; 754 break;
733 } 755 }
734 756
@@ -743,12 +765,12 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
743 */ 765 */
744 for (l = i+1; l < MAX_FILER_IDX; l++) { 766 for (l = i+1; l < MAX_FILER_IDX; l++) {
745 if ((priv->ftp_rqfcr[l] & RQFCR_CLE) && 767 if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
746 !(priv->ftp_rqfcr[l] & RQFCR_AND)) { 768 !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
747 priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT | 769 priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
748 RQFCR_HASHTBL_0 | RQFCR_PID_MASK; 770 RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
749 priv->ftp_rqfpr[l] = FPR_FILER_MASK; 771 priv->ftp_rqfpr[l] = FPR_FILER_MASK;
750 gfar_write_filer(priv, l, priv->ftp_rqfcr[l], 772 gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
751 priv->ftp_rqfpr[l]); 773 priv->ftp_rqfpr[l]);
752 break; 774 break;
753 } 775 }
754 776
@@ -773,7 +795,7 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
773 priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k]; 795 priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
774 priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k]; 796 priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
775 gfar_write_filer(priv, priv->cur_filer_idx, 797 gfar_write_filer(priv, priv->cur_filer_idx,
776 local_rqfcr[k], local_rqfpr[k]); 798 local_rqfcr[k], local_rqfpr[k]);
777 if (!priv->cur_filer_idx) 799 if (!priv->cur_filer_idx)
778 break; 800 break;
779 priv->cur_filer_idx = priv->cur_filer_idx - 1; 801 priv->cur_filer_idx = priv->cur_filer_idx - 1;
@@ -785,7 +807,8 @@ err:
785 return ret; 807 return ret;
786} 808}
787 809
788static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd) 810static int gfar_set_hash_opts(struct gfar_private *priv,
811 struct ethtool_rxnfc *cmd)
789{ 812{
790 /* write the filer rules here */ 813 /* write the filer rules here */
791 if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type)) 814 if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
@@ -810,10 +833,10 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
810 i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM; 833 i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
811 if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) { 834 if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
812 netdev_info(priv->ndev, 835 netdev_info(priv->ndev,
813 "Receive Queue Filtering enabled\n"); 836 "Receive Queue Filtering enabled\n");
814 } else { 837 } else {
815 netdev_warn(priv->ndev, 838 netdev_warn(priv->ndev,
816 "Receive Queue Filtering disabled\n"); 839 "Receive Queue Filtering disabled\n");
817 return -EOPNOTSUPP; 840 return -EOPNOTSUPP;
818 } 841 }
819 } 842 }
@@ -823,16 +846,17 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
823 i &= RCTRL_PRSDEP_MASK; 846 i &= RCTRL_PRSDEP_MASK;
824 if (i == RCTRL_PRSDEP_MASK) { 847 if (i == RCTRL_PRSDEP_MASK) {
825 netdev_info(priv->ndev, 848 netdev_info(priv->ndev,
826 "Receive Queue Filtering enabled\n"); 849 "Receive Queue Filtering enabled\n");
827 } else { 850 } else {
828 netdev_warn(priv->ndev, 851 netdev_warn(priv->ndev,
829 "Receive Queue Filtering disabled\n"); 852 "Receive Queue Filtering disabled\n");
830 return -EOPNOTSUPP; 853 return -EOPNOTSUPP;
831 } 854 }
832 } 855 }
833 856
834 /* Sets the properties for arbitrary filer rule 857 /* Sets the properties for arbitrary filer rule
835 * to the first 4 Layer 4 Bytes */ 858 * to the first 4 Layer 4 Bytes
859 */
836 regs->rbifx = 0xC0C1C2C3; 860 regs->rbifx = 0xC0C1C2C3;
837 return 0; 861 return 0;
838} 862}
@@ -870,14 +894,14 @@ static void gfar_set_mask(u32 mask, struct filer_table *tab)
870static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab) 894static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
871{ 895{
872 gfar_set_mask(mask, tab); 896 gfar_set_mask(mask, tab);
873 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE 897 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE |
874 | RQFCR_AND; 898 RQFCR_AND;
875 tab->fe[tab->index].prop = value; 899 tab->fe[tab->index].prop = value;
876 tab->index++; 900 tab->index++;
877} 901}
878 902
879static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag, 903static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
880 struct filer_table *tab) 904 struct filer_table *tab)
881{ 905{
882 gfar_set_mask(mask, tab); 906 gfar_set_mask(mask, tab);
883 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag; 907 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
@@ -885,8 +909,7 @@ static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
885 tab->index++; 909 tab->index++;
886} 910}
887 911
888/* 912/* For setting a tuple of value and mask of type flag
889 * For setting a tuple of value and mask of type flag
890 * Example: 913 * Example:
891 * IP-Src = 10.0.0.0/255.0.0.0 914 * IP-Src = 10.0.0.0/255.0.0.0
892 * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4 915 * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
@@ -901,7 +924,7 @@ static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
901 * Further the all masks are one-padded for better hardware efficiency. 924 * Further the all masks are one-padded for better hardware efficiency.
902 */ 925 */
903static void gfar_set_attribute(u32 value, u32 mask, u32 flag, 926static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
904 struct filer_table *tab) 927 struct filer_table *tab)
905{ 928{
906 switch (flag) { 929 switch (flag) {
907 /* 3bit */ 930 /* 3bit */
@@ -959,7 +982,8 @@ static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
959 982
960/* Translates value and mask for UDP, TCP or SCTP */ 983/* Translates value and mask for UDP, TCP or SCTP */
961static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value, 984static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
962 struct ethtool_tcpip4_spec *mask, struct filer_table *tab) 985 struct ethtool_tcpip4_spec *mask,
986 struct filer_table *tab)
963{ 987{
964 gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab); 988 gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
965 gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab); 989 gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
@@ -970,97 +994,92 @@ static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
970 994
971/* Translates value and mask for RAW-IP4 */ 995/* Translates value and mask for RAW-IP4 */
972static void gfar_set_user_ip(struct ethtool_usrip4_spec *value, 996static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
973 struct ethtool_usrip4_spec *mask, struct filer_table *tab) 997 struct ethtool_usrip4_spec *mask,
998 struct filer_table *tab)
974{ 999{
975 gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab); 1000 gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
976 gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab); 1001 gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
977 gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab); 1002 gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
978 gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab); 1003 gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
979 gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB, 1004 gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB,
980 tab); 1005 tab);
981 1006
982} 1007}
983 1008
984/* Translates value and mask for ETHER spec */ 1009/* Translates value and mask for ETHER spec */
985static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask, 1010static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
986 struct filer_table *tab) 1011 struct filer_table *tab)
987{ 1012{
988 u32 upper_temp_mask = 0; 1013 u32 upper_temp_mask = 0;
989 u32 lower_temp_mask = 0; 1014 u32 lower_temp_mask = 0;
1015
990 /* Source address */ 1016 /* Source address */
991 if (!is_broadcast_ether_addr(mask->h_source)) { 1017 if (!is_broadcast_ether_addr(mask->h_source)) {
992
993 if (is_zero_ether_addr(mask->h_source)) { 1018 if (is_zero_ether_addr(mask->h_source)) {
994 upper_temp_mask = 0xFFFFFFFF; 1019 upper_temp_mask = 0xFFFFFFFF;
995 lower_temp_mask = 0xFFFFFFFF; 1020 lower_temp_mask = 0xFFFFFFFF;
996 } else { 1021 } else {
997 upper_temp_mask = mask->h_source[0] << 16 1022 upper_temp_mask = mask->h_source[0] << 16 |
998 | mask->h_source[1] << 8 1023 mask->h_source[1] << 8 |
999 | mask->h_source[2]; 1024 mask->h_source[2];
1000 lower_temp_mask = mask->h_source[3] << 16 1025 lower_temp_mask = mask->h_source[3] << 16 |
1001 | mask->h_source[4] << 8 1026 mask->h_source[4] << 8 |
1002 | mask->h_source[5]; 1027 mask->h_source[5];
1003 } 1028 }
1004 /* Upper 24bit */ 1029 /* Upper 24bit */
1005 gfar_set_attribute( 1030 gfar_set_attribute(value->h_source[0] << 16 |
1006 value->h_source[0] << 16 | value->h_source[1] 1031 value->h_source[1] << 8 |
1007 << 8 | value->h_source[2], 1032 value->h_source[2],
1008 upper_temp_mask, RQFCR_PID_SAH, tab); 1033 upper_temp_mask, RQFCR_PID_SAH, tab);
1009 /* And the same for the lower part */ 1034 /* And the same for the lower part */
1010 gfar_set_attribute( 1035 gfar_set_attribute(value->h_source[3] << 16 |
1011 value->h_source[3] << 16 | value->h_source[4] 1036 value->h_source[4] << 8 |
1012 << 8 | value->h_source[5], 1037 value->h_source[5],
1013 lower_temp_mask, RQFCR_PID_SAL, tab); 1038 lower_temp_mask, RQFCR_PID_SAL, tab);
1014 } 1039 }
1015 /* Destination address */ 1040 /* Destination address */
1016 if (!is_broadcast_ether_addr(mask->h_dest)) { 1041 if (!is_broadcast_ether_addr(mask->h_dest)) {
1017
1018 /* Special for destination is limited broadcast */ 1042 /* Special for destination is limited broadcast */
1019 if ((is_broadcast_ether_addr(value->h_dest) 1043 if ((is_broadcast_ether_addr(value->h_dest) &&
1020 && is_zero_ether_addr(mask->h_dest))) { 1044 is_zero_ether_addr(mask->h_dest))) {
1021 gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab); 1045 gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
1022 } else { 1046 } else {
1023
1024 if (is_zero_ether_addr(mask->h_dest)) { 1047 if (is_zero_ether_addr(mask->h_dest)) {
1025 upper_temp_mask = 0xFFFFFFFF; 1048 upper_temp_mask = 0xFFFFFFFF;
1026 lower_temp_mask = 0xFFFFFFFF; 1049 lower_temp_mask = 0xFFFFFFFF;
1027 } else { 1050 } else {
1028 upper_temp_mask = mask->h_dest[0] << 16 1051 upper_temp_mask = mask->h_dest[0] << 16 |
1029 | mask->h_dest[1] << 8 1052 mask->h_dest[1] << 8 |
1030 | mask->h_dest[2]; 1053 mask->h_dest[2];
1031 lower_temp_mask = mask->h_dest[3] << 16 1054 lower_temp_mask = mask->h_dest[3] << 16 |
1032 | mask->h_dest[4] << 8 1055 mask->h_dest[4] << 8 |
1033 | mask->h_dest[5]; 1056 mask->h_dest[5];
1034 } 1057 }
1035 1058
1036 /* Upper 24bit */ 1059 /* Upper 24bit */
1037 gfar_set_attribute( 1060 gfar_set_attribute(value->h_dest[0] << 16 |
1038 value->h_dest[0] << 16 1061 value->h_dest[1] << 8 |
1039 | value->h_dest[1] << 8 1062 value->h_dest[2],
1040 | value->h_dest[2], 1063 upper_temp_mask, RQFCR_PID_DAH, tab);
1041 upper_temp_mask, RQFCR_PID_DAH, tab);
1042 /* And the same for the lower part */ 1064 /* And the same for the lower part */
1043 gfar_set_attribute( 1065 gfar_set_attribute(value->h_dest[3] << 16 |
1044 value->h_dest[3] << 16 1066 value->h_dest[4] << 8 |
1045 | value->h_dest[4] << 8 1067 value->h_dest[5],
1046 | value->h_dest[5], 1068 lower_temp_mask, RQFCR_PID_DAL, tab);
1047 lower_temp_mask, RQFCR_PID_DAL, tab);
1048 } 1069 }
1049 } 1070 }
1050 1071
1051 gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab); 1072 gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab);
1052
1053} 1073}
1054 1074
1055/* Convert a rule to binary filter format of gianfar */ 1075/* Convert a rule to binary filter format of gianfar */
1056static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule, 1076static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
1057 struct filer_table *tab) 1077 struct filer_table *tab)
1058{ 1078{
1059 u32 vlan = 0, vlan_mask = 0; 1079 u32 vlan = 0, vlan_mask = 0;
1060 u32 id = 0, id_mask = 0; 1080 u32 id = 0, id_mask = 0;
1061 u32 cfi = 0, cfi_mask = 0; 1081 u32 cfi = 0, cfi_mask = 0;
1062 u32 prio = 0, prio_mask = 0; 1082 u32 prio = 0, prio_mask = 0;
1063
1064 u32 old_index = tab->index; 1083 u32 old_index = tab->index;
1065 1084
1066 /* Check if vlan is wanted */ 1085 /* Check if vlan is wanted */
@@ -1076,13 +1095,16 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
1076 id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK; 1095 id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK;
1077 cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK; 1096 cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK;
1078 cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK; 1097 cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK;
1079 prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 1098 prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >>
1080 prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 1099 VLAN_PRIO_SHIFT;
1100 prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >>
1101 VLAN_PRIO_SHIFT;
1081 1102
1082 if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) { 1103 if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
1083 vlan |= RQFPR_CFI; 1104 vlan |= RQFPR_CFI;
1084 vlan_mask |= RQFPR_CFI; 1105 vlan_mask |= RQFPR_CFI;
1085 } else if (cfi != VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) { 1106 } else if (cfi != VLAN_TAG_PRESENT &&
1107 cfi_mask == VLAN_TAG_PRESENT) {
1086 vlan_mask |= RQFPR_CFI; 1108 vlan_mask |= RQFPR_CFI;
1087 } 1109 }
1088 } 1110 }
@@ -1090,34 +1112,36 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
1090 switch (rule->flow_type & ~FLOW_EXT) { 1112 switch (rule->flow_type & ~FLOW_EXT) {
1091 case TCP_V4_FLOW: 1113 case TCP_V4_FLOW:
1092 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan, 1114 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
1093 RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab); 1115 RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
1094 gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec, 1116 gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
1095 &rule->m_u.tcp_ip4_spec, tab); 1117 &rule->m_u.tcp_ip4_spec, tab);
1096 break; 1118 break;
1097 case UDP_V4_FLOW: 1119 case UDP_V4_FLOW:
1098 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan, 1120 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
1099 RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab); 1121 RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
1100 gfar_set_basic_ip(&rule->h_u.udp_ip4_spec, 1122 gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
1101 &rule->m_u.udp_ip4_spec, tab); 1123 &rule->m_u.udp_ip4_spec, tab);
1102 break; 1124 break;
1103 case SCTP_V4_FLOW: 1125 case SCTP_V4_FLOW:
1104 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask, 1126 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
1105 tab); 1127 tab);
1106 gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab); 1128 gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
1107 gfar_set_basic_ip((struct ethtool_tcpip4_spec *) &rule->h_u, 1129 gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u,
1108 (struct ethtool_tcpip4_spec *) &rule->m_u, tab); 1130 (struct ethtool_tcpip4_spec *)&rule->m_u,
1131 tab);
1109 break; 1132 break;
1110 case IP_USER_FLOW: 1133 case IP_USER_FLOW:
1111 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask, 1134 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
1112 tab); 1135 tab);
1113 gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u, 1136 gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
1114 (struct ethtool_usrip4_spec *) &rule->m_u, tab); 1137 (struct ethtool_usrip4_spec *) &rule->m_u,
1138 tab);
1115 break; 1139 break;
1116 case ETHER_FLOW: 1140 case ETHER_FLOW:
1117 if (vlan) 1141 if (vlan)
1118 gfar_set_parse_bits(vlan, vlan_mask, tab); 1142 gfar_set_parse_bits(vlan, vlan_mask, tab);
1119 gfar_set_ether((struct ethhdr *) &rule->h_u, 1143 gfar_set_ether((struct ethhdr *) &rule->h_u,
1120 (struct ethhdr *) &rule->m_u, tab); 1144 (struct ethhdr *) &rule->m_u, tab);
1121 break; 1145 break;
1122 default: 1146 default:
1123 return -1; 1147 return -1;
@@ -1152,7 +1176,9 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
1152 tab->fe[tab->index - 1].ctrl |= RQFCR_CLE; 1176 tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
1153 } 1177 }
1154 1178
1155 /* In rare cases the cache can be full while there is free space in hw */ 1179 /* In rare cases the cache can be full while there is
1180 * free space in hw
1181 */
1156 if (tab->index > MAX_FILER_CACHE_IDX - 1) 1182 if (tab->index > MAX_FILER_CACHE_IDX - 1)
1157 return -EBUSY; 1183 return -EBUSY;
1158 1184
@@ -1161,7 +1187,7 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
1161 1187
1162/* Copy size filer entries */ 1188/* Copy size filer entries */
1163static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0], 1189static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
1164 struct gfar_filer_entry src[0], s32 size) 1190 struct gfar_filer_entry src[0], s32 size)
1165{ 1191{
1166 while (size > 0) { 1192 while (size > 0) {
1167 size--; 1193 size--;
@@ -1171,10 +1197,12 @@ static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
1171} 1197}
1172 1198
1173/* Delete the contents of the filer-table between start and end 1199/* Delete the contents of the filer-table between start and end
1174 * and collapse them */ 1200 * and collapse them
1201 */
1175static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab) 1202static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
1176{ 1203{
1177 int length; 1204 int length;
1205
1178 if (end > MAX_FILER_CACHE_IDX || end < begin) 1206 if (end > MAX_FILER_CACHE_IDX || end < begin)
1179 return -EINVAL; 1207 return -EINVAL;
1180 1208
@@ -1200,14 +1228,14 @@ static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
1200 1228
1201/* Make space on the wanted location */ 1229/* Make space on the wanted location */
1202static int gfar_expand_filer_entries(u32 begin, u32 length, 1230static int gfar_expand_filer_entries(u32 begin, u32 length,
1203 struct filer_table *tab) 1231 struct filer_table *tab)
1204{ 1232{
1205 if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || begin 1233 if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
1206 > MAX_FILER_CACHE_IDX) 1234 begin > MAX_FILER_CACHE_IDX)
1207 return -EINVAL; 1235 return -EINVAL;
1208 1236
1209 gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]), 1237 gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
1210 tab->index - length + 1); 1238 tab->index - length + 1);
1211 1239
1212 tab->index += length; 1240 tab->index += length;
1213 return 0; 1241 return 0;
@@ -1215,9 +1243,10 @@ static int gfar_expand_filer_entries(u32 begin, u32 length,
1215 1243
1216static int gfar_get_next_cluster_start(int start, struct filer_table *tab) 1244static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
1217{ 1245{
1218 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) { 1246 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1219 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) 1247 start++) {
1220 == (RQFCR_AND | RQFCR_CLE)) 1248 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1249 (RQFCR_AND | RQFCR_CLE))
1221 return start; 1250 return start;
1222 } 1251 }
1223 return -1; 1252 return -1;
@@ -1225,16 +1254,16 @@ static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
1225 1254
1226static int gfar_get_next_cluster_end(int start, struct filer_table *tab) 1255static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
1227{ 1256{
1228 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) { 1257 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1229 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) 1258 start++) {
1230 == (RQFCR_CLE)) 1259 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1260 (RQFCR_CLE))
1231 return start; 1261 return start;
1232 } 1262 }
1233 return -1; 1263 return -1;
1234} 1264}
1235 1265
1236/* 1266/* Uses hardwares clustering option to reduce
1237 * Uses hardwares clustering option to reduce
1238 * the number of filer table entries 1267 * the number of filer table entries
1239 */ 1268 */
1240static void gfar_cluster_filer(struct filer_table *tab) 1269static void gfar_cluster_filer(struct filer_table *tab)
@@ -1244,8 +1273,7 @@ static void gfar_cluster_filer(struct filer_table *tab)
1244 while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) { 1273 while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
1245 j = i; 1274 j = i;
1246 while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) { 1275 while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
1247 /* 1276 /* The cluster entries self and the previous one
1248 * The cluster entries self and the previous one
1249 * (a mask) must be identical! 1277 * (a mask) must be identical!
1250 */ 1278 */
1251 if (tab->fe[i].ctrl != tab->fe[j].ctrl) 1279 if (tab->fe[i].ctrl != tab->fe[j].ctrl)
@@ -1260,21 +1288,21 @@ static void gfar_cluster_filer(struct filer_table *tab)
1260 jend = gfar_get_next_cluster_end(j, tab); 1288 jend = gfar_get_next_cluster_end(j, tab);
1261 if (jend == -1 || iend == -1) 1289 if (jend == -1 || iend == -1)
1262 break; 1290 break;
1263 /* 1291
1264 * First we make some free space, where our cluster 1292 /* First we make some free space, where our cluster
1265 * element should be. Then we copy it there and finally 1293 * element should be. Then we copy it there and finally
1266 * delete in from its old location. 1294 * delete in from its old location.
1267 */ 1295 */
1268 1296 if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
1269 if (gfar_expand_filer_entries(iend, (jend - j), tab) 1297 -EINVAL)
1270 == -EINVAL)
1271 break; 1298 break;
1272 1299
1273 gfar_copy_filer_entries(&(tab->fe[iend + 1]), 1300 gfar_copy_filer_entries(&(tab->fe[iend + 1]),
1274 &(tab->fe[jend + 1]), jend - j); 1301 &(tab->fe[jend + 1]), jend - j);
1275 1302
1276 if (gfar_trim_filer_entries(jend - 1, 1303 if (gfar_trim_filer_entries(jend - 1,
1277 jend + (jend - j), tab) == -EINVAL) 1304 jend + (jend - j),
1305 tab) == -EINVAL)
1278 return; 1306 return;
1279 1307
1280 /* Mask out cluster bit */ 1308 /* Mask out cluster bit */
@@ -1285,8 +1313,9 @@ static void gfar_cluster_filer(struct filer_table *tab)
1285 1313
1286/* Swaps the masked bits of a1<>a2 and b1<>b2 */ 1314/* Swaps the masked bits of a1<>a2 and b1<>b2 */
1287static void gfar_swap_bits(struct gfar_filer_entry *a1, 1315static void gfar_swap_bits(struct gfar_filer_entry *a1,
1288 struct gfar_filer_entry *a2, struct gfar_filer_entry *b1, 1316 struct gfar_filer_entry *a2,
1289 struct gfar_filer_entry *b2, u32 mask) 1317 struct gfar_filer_entry *b1,
1318 struct gfar_filer_entry *b2, u32 mask)
1290{ 1319{
1291 u32 temp[4]; 1320 u32 temp[4];
1292 temp[0] = a1->ctrl & mask; 1321 temp[0] = a1->ctrl & mask;
@@ -1305,13 +1334,12 @@ static void gfar_swap_bits(struct gfar_filer_entry *a1,
1305 b2->ctrl |= temp[2]; 1334 b2->ctrl |= temp[2];
1306} 1335}
1307 1336
1308/* 1337/* Generate a list consisting of masks values with their start and
1309 * Generate a list consisting of masks values with their start and
1310 * end of validity and block as indicator for parts belonging 1338 * end of validity and block as indicator for parts belonging
1311 * together (glued by ANDs) in mask_table 1339 * together (glued by ANDs) in mask_table
1312 */ 1340 */
1313static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table, 1341static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
1314 struct filer_table *tab) 1342 struct filer_table *tab)
1315{ 1343{
1316 u32 i, and_index = 0, block_index = 1; 1344 u32 i, and_index = 0, block_index = 1;
1317 1345
@@ -1327,13 +1355,13 @@ static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
1327 and_index++; 1355 and_index++;
1328 } 1356 }
1329 /* cluster starts and ends will be separated because they should 1357 /* cluster starts and ends will be separated because they should
1330 * hold their position */ 1358 * hold their position
1359 */
1331 if (tab->fe[i].ctrl & RQFCR_CLE) 1360 if (tab->fe[i].ctrl & RQFCR_CLE)
1332 block_index++; 1361 block_index++;
1333 /* A not set AND indicates the end of a depended block */ 1362 /* A not set AND indicates the end of a depended block */
1334 if (!(tab->fe[i].ctrl & RQFCR_AND)) 1363 if (!(tab->fe[i].ctrl & RQFCR_AND))
1335 block_index++; 1364 block_index++;
1336
1337 } 1365 }
1338 1366
1339 mask_table[and_index - 1].end = i - 1; 1367 mask_table[and_index - 1].end = i - 1;
@@ -1341,14 +1369,13 @@ static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
1341 return and_index; 1369 return and_index;
1342} 1370}
1343 1371
1344/* 1372/* Sorts the entries of mask_table by the values of the masks.
1345 * Sorts the entries of mask_table by the values of the masks.
1346 * Important: The 0xFF80 flags of the first and last entry of a 1373 * Important: The 0xFF80 flags of the first and last entry of a
1347 * block must hold their position (which queue, CLusterEnable, ReJEct, 1374 * block must hold their position (which queue, CLusterEnable, ReJEct,
1348 * AND) 1375 * AND)
1349 */ 1376 */
1350static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table, 1377static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
1351 struct filer_table *temp_table, u32 and_index) 1378 struct filer_table *temp_table, u32 and_index)
1352{ 1379{
1353 /* Pointer to compare function (_asc or _desc) */ 1380 /* Pointer to compare function (_asc or _desc) */
1354 int (*gfar_comp)(const void *, const void *); 1381 int (*gfar_comp)(const void *, const void *);
@@ -1359,16 +1386,16 @@ static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
1359 gfar_comp = &gfar_comp_desc; 1386 gfar_comp = &gfar_comp_desc;
1360 1387
1361 for (i = 0; i < and_index; i++) { 1388 for (i = 0; i < and_index; i++) {
1362
1363 if (prev != mask_table[i].block) { 1389 if (prev != mask_table[i].block) {
1364 old_first = mask_table[start].start + 1; 1390 old_first = mask_table[start].start + 1;
1365 old_last = mask_table[i - 1].end; 1391 old_last = mask_table[i - 1].end;
1366 sort(mask_table + start, size, 1392 sort(mask_table + start, size,
1367 sizeof(struct gfar_mask_entry), 1393 sizeof(struct gfar_mask_entry),
1368 gfar_comp, &gfar_swap); 1394 gfar_comp, &gfar_swap);
1369 1395
1370 /* Toggle order for every block. This makes the 1396 /* Toggle order for every block. This makes the
1371 * thing more efficient! */ 1397 * thing more efficient!
1398 */
1372 if (gfar_comp == gfar_comp_desc) 1399 if (gfar_comp == gfar_comp_desc)
1373 gfar_comp = &gfar_comp_asc; 1400 gfar_comp = &gfar_comp_asc;
1374 else 1401 else
@@ -1378,12 +1405,11 @@ static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
1378 new_last = mask_table[i - 1].end; 1405 new_last = mask_table[i - 1].end;
1379 1406
1380 gfar_swap_bits(&temp_table->fe[new_first], 1407 gfar_swap_bits(&temp_table->fe[new_first],
1381 &temp_table->fe[old_first], 1408 &temp_table->fe[old_first],
1382 &temp_table->fe[new_last], 1409 &temp_table->fe[new_last],
1383 &temp_table->fe[old_last], 1410 &temp_table->fe[old_last],
1384 RQFCR_QUEUE | RQFCR_CLE | 1411 RQFCR_QUEUE | RQFCR_CLE |
1385 RQFCR_RJE | RQFCR_AND 1412 RQFCR_RJE | RQFCR_AND);
1386 );
1387 1413
1388 start = i; 1414 start = i;
1389 size = 0; 1415 size = 0;
@@ -1391,11 +1417,9 @@ static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
1391 size++; 1417 size++;
1392 prev = mask_table[i].block; 1418 prev = mask_table[i].block;
1393 } 1419 }
1394
1395} 1420}
1396 1421
1397/* 1422/* Reduces the number of masks needed in the filer table to save entries
1398 * Reduces the number of masks needed in the filer table to save entries
1399 * This is done by sorting the masks of a depended block. A depended block is 1423 * This is done by sorting the masks of a depended block. A depended block is
1400 * identified by gluing ANDs or CLE. The sorting order toggles after every 1424 * identified by gluing ANDs or CLE. The sorting order toggles after every
1401 * block. Of course entries in scope of a mask must change their location with 1425 * block. Of course entries in scope of a mask must change their location with
@@ -1410,13 +1434,14 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
1410 s32 ret = 0; 1434 s32 ret = 0;
1411 1435
1412 /* We need a copy of the filer table because 1436 /* We need a copy of the filer table because
1413 * we want to change its order */ 1437 * we want to change its order
1438 */
1414 temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL); 1439 temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
1415 if (temp_table == NULL) 1440 if (temp_table == NULL)
1416 return -ENOMEM; 1441 return -ENOMEM;
1417 1442
1418 mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1, 1443 mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
1419 sizeof(struct gfar_mask_entry), GFP_KERNEL); 1444 sizeof(struct gfar_mask_entry), GFP_KERNEL);
1420 1445
1421 if (mask_table == NULL) { 1446 if (mask_table == NULL) {
1422 ret = -ENOMEM; 1447 ret = -ENOMEM;
@@ -1428,7 +1453,8 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
1428 gfar_sort_mask_table(mask_table, temp_table, and_index); 1453 gfar_sort_mask_table(mask_table, temp_table, and_index);
1429 1454
1430 /* Now we can copy the data from our duplicated filer table to 1455 /* Now we can copy the data from our duplicated filer table to
1431 * the real one in the order the mask table says */ 1456 * the real one in the order the mask table says
1457 */
1432 for (i = 0; i < and_index; i++) { 1458 for (i = 0; i < and_index; i++) {
1433 size = mask_table[i].end - mask_table[i].start + 1; 1459 size = mask_table[i].end - mask_table[i].start + 1;
1434 gfar_copy_filer_entries(&(tab->fe[j]), 1460 gfar_copy_filer_entries(&(tab->fe[j]),
@@ -1437,7 +1463,8 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
1437 } 1463 }
1438 1464
1439 /* And finally we just have to check for duplicated masks and drop the 1465 /* And finally we just have to check for duplicated masks and drop the
1440 * second ones */ 1466 * second ones
1467 */
1441 for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { 1468 for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1442 if (tab->fe[i].ctrl == 0x80) { 1469 if (tab->fe[i].ctrl == 0x80) {
1443 previous_mask = i++; 1470 previous_mask = i++;
@@ -1448,7 +1475,8 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
1448 if (tab->fe[i].ctrl == 0x80) { 1475 if (tab->fe[i].ctrl == 0x80) {
1449 if (tab->fe[i].prop == tab->fe[previous_mask].prop) { 1476 if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
1450 /* Two identical ones found! 1477 /* Two identical ones found!
1451 * So drop the second one! */ 1478 * So drop the second one!
1479 */
1452 gfar_trim_filer_entries(i, i, tab); 1480 gfar_trim_filer_entries(i, i, tab);
1453 } else 1481 } else
1454 /* Not identical! */ 1482 /* Not identical! */
@@ -1463,7 +1491,7 @@ end: kfree(temp_table);
1463 1491
1464/* Write the bit-pattern from software's buffer to hardware registers */ 1492/* Write the bit-pattern from software's buffer to hardware registers */
1465static int gfar_write_filer_table(struct gfar_private *priv, 1493static int gfar_write_filer_table(struct gfar_private *priv,
1466 struct filer_table *tab) 1494 struct filer_table *tab)
1467{ 1495{
1468 u32 i = 0; 1496 u32 i = 0;
1469 if (tab->index > MAX_FILER_IDX - 1) 1497 if (tab->index > MAX_FILER_IDX - 1)
@@ -1473,13 +1501,15 @@ static int gfar_write_filer_table(struct gfar_private *priv,
1473 lock_rx_qs(priv); 1501 lock_rx_qs(priv);
1474 1502
1475 /* Fill regular entries */ 1503 /* Fill regular entries */
1476 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); i++) 1504 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
1505 i++)
1477 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); 1506 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
1478 /* Fill the rest with fall-troughs */ 1507 /* Fill the rest with fall-troughs */
1479 for (; i < MAX_FILER_IDX - 1; i++) 1508 for (; i < MAX_FILER_IDX - 1; i++)
1480 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF); 1509 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
1481 /* Last entry must be default accept 1510 /* Last entry must be default accept
1482 * because that's what people expect */ 1511 * because that's what people expect
1512 */
1483 gfar_write_filer(priv, i, 0x20, 0x0); 1513 gfar_write_filer(priv, i, 0x20, 0x0);
1484 1514
1485 unlock_rx_qs(priv); 1515 unlock_rx_qs(priv);
@@ -1488,21 +1518,21 @@ static int gfar_write_filer_table(struct gfar_private *priv,
1488} 1518}
1489 1519
1490static int gfar_check_capability(struct ethtool_rx_flow_spec *flow, 1520static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
1491 struct gfar_private *priv) 1521 struct gfar_private *priv)
1492{ 1522{
1493 1523
1494 if (flow->flow_type & FLOW_EXT) { 1524 if (flow->flow_type & FLOW_EXT) {
1495 if (~flow->m_ext.data[0] || ~flow->m_ext.data[1]) 1525 if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
1496 netdev_warn(priv->ndev, 1526 netdev_warn(priv->ndev,
1497 "User-specific data not supported!\n"); 1527 "User-specific data not supported!\n");
1498 if (~flow->m_ext.vlan_etype) 1528 if (~flow->m_ext.vlan_etype)
1499 netdev_warn(priv->ndev, 1529 netdev_warn(priv->ndev,
1500 "VLAN-etype not supported!\n"); 1530 "VLAN-etype not supported!\n");
1501 } 1531 }
1502 if (flow->flow_type == IP_USER_FLOW) 1532 if (flow->flow_type == IP_USER_FLOW)
1503 if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4) 1533 if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
1504 netdev_warn(priv->ndev, 1534 netdev_warn(priv->ndev,
1505 "IP-Version differing from IPv4 not supported!\n"); 1535 "IP-Version differing from IPv4 not supported!\n");
1506 1536
1507 return 0; 1537 return 0;
1508} 1538}
@@ -1520,15 +1550,18 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
1520 return -ENOMEM; 1550 return -ENOMEM;
1521 1551
1522 /* Now convert the existing filer data from flow_spec into 1552 /* Now convert the existing filer data from flow_spec into
1523 * filer tables binary format */ 1553 * filer tables binary format
1554 */
1524 list_for_each_entry(j, &priv->rx_list.list, list) { 1555 list_for_each_entry(j, &priv->rx_list.list, list) {
1525 ret = gfar_convert_to_filer(&j->fs, tab); 1556 ret = gfar_convert_to_filer(&j->fs, tab);
1526 if (ret == -EBUSY) { 1557 if (ret == -EBUSY) {
1527 netdev_err(priv->ndev, "Rule not added: No free space!\n"); 1558 netdev_err(priv->ndev,
1559 "Rule not added: No free space!\n");
1528 goto end; 1560 goto end;
1529 } 1561 }
1530 if (ret == -1) { 1562 if (ret == -1) {
1531 netdev_err(priv->ndev, "Rule not added: Unsupported Flow-type!\n"); 1563 netdev_err(priv->ndev,
1564 "Rule not added: Unsupported Flow-type!\n");
1532 goto end; 1565 goto end;
1533 } 1566 }
1534 } 1567 }
@@ -1540,9 +1573,9 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
1540 gfar_optimize_filer_masks(tab); 1573 gfar_optimize_filer_masks(tab);
1541 1574
1542 pr_debug("\n\tSummary:\n" 1575 pr_debug("\n\tSummary:\n"
1543 "\tData on hardware: %d\n" 1576 "\tData on hardware: %d\n"
1544 "\tCompression rate: %d%%\n", 1577 "\tCompression rate: %d%%\n",
1545 tab->index, 100 - (100 * tab->index) / i); 1578 tab->index, 100 - (100 * tab->index) / i);
1546 1579
1547 /* Write everything to hardware */ 1580 /* Write everything to hardware */
1548 ret = gfar_write_filer_table(priv, tab); 1581 ret = gfar_write_filer_table(priv, tab);
@@ -1551,7 +1584,8 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
1551 goto end; 1584 goto end;
1552 } 1585 }
1553 1586
1554end: kfree(tab); 1587end:
1588 kfree(tab);
1555 return ret; 1589 return ret;
1556} 1590}
1557 1591
@@ -1569,7 +1603,7 @@ static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
1569} 1603}
1570 1604
1571static int gfar_add_cls(struct gfar_private *priv, 1605static int gfar_add_cls(struct gfar_private *priv,
1572 struct ethtool_rx_flow_spec *flow) 1606 struct ethtool_rx_flow_spec *flow)
1573{ 1607{
1574 struct ethtool_flow_spec_container *temp, *comp; 1608 struct ethtool_flow_spec_container *temp, *comp;
1575 int ret = 0; 1609 int ret = 0;
@@ -1591,7 +1625,6 @@ static int gfar_add_cls(struct gfar_private *priv,
1591 list_add(&temp->list, &priv->rx_list.list); 1625 list_add(&temp->list, &priv->rx_list.list);
1592 goto process; 1626 goto process;
1593 } else { 1627 } else {
1594
1595 list_for_each_entry(comp, &priv->rx_list.list, list) { 1628 list_for_each_entry(comp, &priv->rx_list.list, list) {
1596 if (comp->fs.location > flow->location) { 1629 if (comp->fs.location > flow->location) {
1597 list_add_tail(&temp->list, &comp->list); 1630 list_add_tail(&temp->list, &comp->list);
@@ -1599,8 +1632,8 @@ static int gfar_add_cls(struct gfar_private *priv,
1599 } 1632 }
1600 if (comp->fs.location == flow->location) { 1633 if (comp->fs.location == flow->location) {
1601 netdev_err(priv->ndev, 1634 netdev_err(priv->ndev,
1602 "Rule not added: ID %d not free!\n", 1635 "Rule not added: ID %d not free!\n",
1603 flow->location); 1636 flow->location);
1604 ret = -EBUSY; 1637 ret = -EBUSY;
1605 goto clean_mem; 1638 goto clean_mem;
1606 } 1639 }
@@ -1642,7 +1675,6 @@ static int gfar_del_cls(struct gfar_private *priv, u32 loc)
1642 } 1675 }
1643 1676
1644 return ret; 1677 return ret;
1645
1646} 1678}
1647 1679
1648static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd) 1680static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
@@ -1663,7 +1695,7 @@ static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
1663} 1695}
1664 1696
1665static int gfar_get_cls_all(struct gfar_private *priv, 1697static int gfar_get_cls_all(struct gfar_private *priv,
1666 struct ethtool_rxnfc *cmd, u32 *rule_locs) 1698 struct ethtool_rxnfc *cmd, u32 *rule_locs)
1667{ 1699{
1668 struct ethtool_flow_spec_container *comp; 1700 struct ethtool_flow_spec_container *comp;
1669 u32 i = 0; 1701 u32 i = 0;
@@ -1714,7 +1746,7 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1714} 1746}
1715 1747
1716static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 1748static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1717 u32 *rule_locs) 1749 u32 *rule_locs)
1718{ 1750{
1719 struct gfar_private *priv = netdev_priv(dev); 1751 struct gfar_private *priv = netdev_priv(dev);
1720 int ret = 0; 1752 int ret = 0;
@@ -1748,23 +1780,19 @@ static int gfar_get_ts_info(struct net_device *dev,
1748 struct gfar_private *priv = netdev_priv(dev); 1780 struct gfar_private *priv = netdev_priv(dev);
1749 1781
1750 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) { 1782 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
1751 info->so_timestamping = 1783 info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
1752 SOF_TIMESTAMPING_RX_SOFTWARE | 1784 SOF_TIMESTAMPING_SOFTWARE;
1753 SOF_TIMESTAMPING_SOFTWARE;
1754 info->phc_index = -1; 1785 info->phc_index = -1;
1755 return 0; 1786 return 0;
1756 } 1787 }
1757 info->so_timestamping = 1788 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1758 SOF_TIMESTAMPING_TX_HARDWARE | 1789 SOF_TIMESTAMPING_RX_HARDWARE |
1759 SOF_TIMESTAMPING_RX_HARDWARE | 1790 SOF_TIMESTAMPING_RAW_HARDWARE;
1760 SOF_TIMESTAMPING_RAW_HARDWARE;
1761 info->phc_index = gfar_phc_index; 1791 info->phc_index = gfar_phc_index;
1762 info->tx_types = 1792 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1763 (1 << HWTSTAMP_TX_OFF) | 1793 (1 << HWTSTAMP_TX_ON);
1764 (1 << HWTSTAMP_TX_ON); 1794 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1765 info->rx_filters = 1795 (1 << HWTSTAMP_FILTER_ALL);
1766 (1 << HWTSTAMP_FILTER_NONE) |
1767 (1 << HWTSTAMP_FILTER_ALL);
1768 return 0; 1796 return 0;
1769} 1797}
1770 1798
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 9ac14f80485..21c6574c5f1 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -185,7 +185,7 @@ static void mem_disp(u8 *addr, int size)
185 for (; (u32) i < (u32) addr + size4Aling; i += 4) 185 for (; (u32) i < (u32) addr + size4Aling; i += 4)
186 printk("%08x ", *((u32 *) (i))); 186 printk("%08x ", *((u32 *) (i)));
187 for (; (u32) i < (u32) addr + size; i++) 187 for (; (u32) i < (u32) addr + size; i++)
188 printk("%02x", *((u8 *) (i))); 188 printk("%02x", *((i)));
189 if (notAlign == 1) 189 if (notAlign == 1)
190 printk("\r\n"); 190 printk("\r\n");
191} 191}
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index d496673f090..3f4391bede8 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1217,7 +1217,7 @@ static int hp100_init_rxpdl(struct net_device *dev,
1217 1217
1218 ringptr->pdl = pdlptr + 1; 1218 ringptr->pdl = pdlptr + 1;
1219 ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr + 1); 1219 ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr + 1);
1220 ringptr->skb = (void *) NULL; 1220 ringptr->skb = NULL;
1221 1221
1222 /* 1222 /*
1223 * Write address and length of first PDL Fragment (which is used for 1223 * Write address and length of first PDL Fragment (which is used for
@@ -1243,7 +1243,7 @@ static int hp100_init_txpdl(struct net_device *dev,
1243 1243
1244 ringptr->pdl = pdlptr; /* +1; */ 1244 ringptr->pdl = pdlptr; /* +1; */
1245 ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr); /* +1 */ 1245 ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr); /* +1 */
1246 ringptr->skb = (void *) NULL; 1246 ringptr->skb = NULL;
1247 1247
1248 return roundup(MAX_TX_FRAG * 2 + 2, 4); 1248 return roundup(MAX_TX_FRAG * 2 + 2, 4);
1249} 1249}
@@ -1628,7 +1628,7 @@ static void hp100_clean_txring(struct net_device *dev)
1628 /* Conversion to new PCI API : NOP */ 1628 /* Conversion to new PCI API : NOP */
1629 pci_unmap_single(lp->pci_dev, (dma_addr_t) lp->txrhead->pdl[1], lp->txrhead->pdl[2], PCI_DMA_TODEVICE); 1629 pci_unmap_single(lp->pci_dev, (dma_addr_t) lp->txrhead->pdl[1], lp->txrhead->pdl[2], PCI_DMA_TODEVICE);
1630 dev_kfree_skb_any(lp->txrhead->skb); 1630 dev_kfree_skb_any(lp->txrhead->skb);
1631 lp->txrhead->skb = (void *) NULL; 1631 lp->txrhead->skb = NULL;
1632 lp->txrhead = lp->txrhead->next; 1632 lp->txrhead = lp->txrhead->next;
1633 lp->txrcommit--; 1633 lp->txrcommit--;
1634 } 1634 }
diff --git a/drivers/net/ethernet/i825xx/lp486e.c b/drivers/net/ethernet/i825xx/lp486e.c
index 6c2952c8ea1..3735bfa5360 100644
--- a/drivers/net/ethernet/i825xx/lp486e.c
+++ b/drivers/net/ethernet/i825xx/lp486e.c
@@ -629,10 +629,10 @@ init_i596(struct net_device *dev) {
629 629
630 memcpy ((void *)lp->eth_addr, dev->dev_addr, 6); 630 memcpy ((void *)lp->eth_addr, dev->dev_addr, 6);
631 lp->set_add.command = CmdIASetup; 631 lp->set_add.command = CmdIASetup;
632 i596_add_cmd(dev, (struct i596_cmd *)&lp->set_add); 632 i596_add_cmd(dev, &lp->set_add);
633 633
634 lp->tdr.command = CmdTDR; 634 lp->tdr.command = CmdTDR;
635 i596_add_cmd(dev, (struct i596_cmd *)&lp->tdr); 635 i596_add_cmd(dev, &lp->tdr);
636 636
637 if (lp->scb.command && i596_timeout(dev, "i82596 init", 200)) 637 if (lp->scb.command && i596_timeout(dev, "i82596 init", 200))
638 return 1; 638 return 1;
@@ -737,7 +737,7 @@ i596_cleanup_cmd(struct net_device *dev) {
737 737
738 lp = netdev_priv(dev); 738 lp = netdev_priv(dev);
739 while (lp->cmd_head) { 739 while (lp->cmd_head) {
740 cmd = (struct i596_cmd *)lp->cmd_head; 740 cmd = lp->cmd_head;
741 741
742 lp->cmd_head = pa_to_va(lp->cmd_head->pa_next); 742 lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
743 lp->cmd_backlog--; 743 lp->cmd_backlog--;
@@ -1281,7 +1281,7 @@ static void set_multicast_list(struct net_device *dev) {
1281 lp->i596_config[8] |= 0x01; 1281 lp->i596_config[8] |= 0x01;
1282 } 1282 }
1283 1283
1284 i596_add_cmd(dev, (struct i596_cmd *) &lp->set_conf); 1284 i596_add_cmd(dev, &lp->set_conf);
1285 } 1285 }
1286} 1286}
1287 1287
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index cae17f4bc93..353f57f675d 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -571,7 +571,7 @@ static int init586(struct net_device *dev)
571 } 571 }
572#endif 572#endif
573 573
574 ptr = alloc_rfa(dev,(void *)ptr); /* init receive-frame-area */ 574 ptr = alloc_rfa(dev,ptr); /* init receive-frame-area */
575 575
576 /* 576 /*
577 * alloc xmit-buffs / init xmit_cmds 577 * alloc xmit-buffs / init xmit_cmds
@@ -584,7 +584,7 @@ static int init586(struct net_device *dev)
584 ptr = (char *) ptr + XMIT_BUFF_SIZE; 584 ptr = (char *) ptr + XMIT_BUFF_SIZE;
585 p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */ 585 p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */
586 ptr = (char *) ptr + sizeof(struct tbd_struct); 586 ptr = (char *) ptr + sizeof(struct tbd_struct);
587 if((void *)ptr > (void *)dev->mem_end) 587 if(ptr > (void *)dev->mem_end)
588 { 588 {
589 printk("%s: not enough shared-mem for your configuration!\n",dev->name); 589 printk("%s: not enough shared-mem for your configuration!\n",dev->name);
590 return 1; 590 return 1;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 4fb47f14dbf..cb66f574dc9 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -376,9 +376,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
376 return 0; 376 return 0;
377} 377}
378 378
379/** 379/* allocates memory for a queue and registers pages in phyp */
380 * allocates memory for a queue and registers pages in phyp
381 */
382static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue, 380static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
383 int nr_pages, int wqe_size, int act_nr_sges, 381 int nr_pages, int wqe_size, int act_nr_sges,
384 struct ehea_adapter *adapter, int h_call_q_selector) 382 struct ehea_adapter *adapter, int h_call_q_selector)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index c526279e492..3d683952876 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -399,7 +399,7 @@ void e1000_set_media_type(struct e1000_hw *hw)
399} 399}
400 400
401/** 401/**
402 * e1000_reset_hw: reset the hardware completely 402 * e1000_reset_hw - reset the hardware completely
403 * @hw: Struct containing variables accessed by shared code 403 * @hw: Struct containing variables accessed by shared code
404 * 404 *
405 * Reset the transmit and receive units; mask and clear all interrupts. 405 * Reset the transmit and receive units; mask and clear all interrupts.
@@ -546,7 +546,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
546} 546}
547 547
548/** 548/**
549 * e1000_init_hw: Performs basic configuration of the adapter. 549 * e1000_init_hw - Performs basic configuration of the adapter.
550 * @hw: Struct containing variables accessed by shared code 550 * @hw: Struct containing variables accessed by shared code
551 * 551 *
552 * Assumes that the controller has previously been reset and is in a 552 * Assumes that the controller has previously been reset and is in a
@@ -2591,7 +2591,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
2591 * @hw: Struct containing variables accessed by shared code 2591 * @hw: Struct containing variables accessed by shared code
2592 * @speed: Speed of the connection 2592 * @speed: Speed of the connection
2593 * @duplex: Duplex setting of the connection 2593 * @duplex: Duplex setting of the connection
2594 2594 *
2595 * Detects the current speed and duplex settings of the hardware. 2595 * Detects the current speed and duplex settings of the hardware.
2596 */ 2596 */
2597s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) 2597s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
@@ -2959,7 +2959,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2959 * @hw: Struct containing variables accessed by shared code 2959 * @hw: Struct containing variables accessed by shared code
2960 * @reg_addr: address of the PHY register to write 2960 * @reg_addr: address of the PHY register to write
2961 * @data: data to write to the PHY 2961 * @data: data to write to the PHY
2962 2962 *
2963 * Writes a value to a PHY register 2963 * Writes a value to a PHY register
2964 */ 2964 */
2965s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) 2965s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 7483ca0a628..3bfbb8df898 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -721,9 +721,7 @@ void e1000_reset(struct e1000_adapter *adapter)
721 e1000_release_manageability(adapter); 721 e1000_release_manageability(adapter);
722} 722}
723 723
724/** 724/* Dump the eeprom for users having checksum issues */
725 * Dump the eeprom for users having checksum issues
726 **/
727static void e1000_dump_eeprom(struct e1000_adapter *adapter) 725static void e1000_dump_eeprom(struct e1000_adapter *adapter)
728{ 726{
729 struct net_device *netdev = adapter->netdev; 727 struct net_device *netdev = adapter->netdev;
@@ -1078,18 +1076,18 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1078 netdev->priv_flags |= IFF_SUPP_NOFCS; 1076 netdev->priv_flags |= IFF_SUPP_NOFCS;
1079 1077
1080 netdev->features |= netdev->hw_features; 1078 netdev->features |= netdev->hw_features;
1081 netdev->hw_features |= NETIF_F_RXCSUM; 1079 netdev->hw_features |= (NETIF_F_RXCSUM |
1082 netdev->hw_features |= NETIF_F_RXALL; 1080 NETIF_F_RXALL |
1083 netdev->hw_features |= NETIF_F_RXFCS; 1081 NETIF_F_RXFCS);
1084 1082
1085 if (pci_using_dac) { 1083 if (pci_using_dac) {
1086 netdev->features |= NETIF_F_HIGHDMA; 1084 netdev->features |= NETIF_F_HIGHDMA;
1087 netdev->vlan_features |= NETIF_F_HIGHDMA; 1085 netdev->vlan_features |= NETIF_F_HIGHDMA;
1088 } 1086 }
1089 1087
1090 netdev->vlan_features |= NETIF_F_TSO; 1088 netdev->vlan_features |= (NETIF_F_TSO |
1091 netdev->vlan_features |= NETIF_F_HW_CSUM; 1089 NETIF_F_HW_CSUM |
1092 netdev->vlan_features |= NETIF_F_SG; 1090 NETIF_F_SG);
1093 1091
1094 netdev->priv_flags |= IFF_UNICAST_FLT; 1092 netdev->priv_flags |= IFF_UNICAST_FLT;
1095 1093
@@ -3056,14 +3054,13 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
3056 mmiowb(); 3054 mmiowb();
3057} 3055}
3058 3056
3059/** 3057/* 82547 workaround to avoid controller hang in half-duplex environment.
3060 * 82547 workaround to avoid controller hang in half-duplex environment.
3061 * The workaround is to avoid queuing a large packet that would span 3058 * The workaround is to avoid queuing a large packet that would span
3062 * the internal Tx FIFO ring boundary by notifying the stack to resend 3059 * the internal Tx FIFO ring boundary by notifying the stack to resend
3063 * the packet at a later time. This gives the Tx FIFO an opportunity to 3060 * the packet at a later time. This gives the Tx FIFO an opportunity to
3064 * flush all packets. When that occurs, we reset the Tx FIFO pointers 3061 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3065 * to the beginning of the Tx FIFO. 3062 * to the beginning of the Tx FIFO.
3066 **/ 3063 */
3067 3064
3068#define E1000_FIFO_HDR 0x10 3065#define E1000_FIFO_HDR 0x10
3069#define E1000_82547_PAD_LEN 0x3E0 3066#define E1000_82547_PAD_LEN 0x3E0
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 623e30b9964..ca477e87eb8 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2159,7 +2159,7 @@ void e1000e_release_hw_control(struct e1000_adapter *adapter)
2159} 2159}
2160 2160
2161/** 2161/**
2162 * @e1000_alloc_ring - allocate memory for a ring structure 2162 * e1000_alloc_ring_dma - allocate memory for a ring structure
2163 **/ 2163 **/
2164static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, 2164static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2165 struct e1000_ring *ring) 2165 struct e1000_ring *ring)
@@ -6191,7 +6191,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6191 } 6191 }
6192 6192
6193 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) 6193 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
6194 e_info("PHY reset is blocked due to SOL/IDER session.\n"); 6194 dev_info(&pdev->dev,
6195 "PHY reset is blocked due to SOL/IDER session.\n");
6195 6196
6196 /* Set initial default active device features */ 6197 /* Set initial default active device features */
6197 netdev->features = (NETIF_F_SG | 6198 netdev->features = (NETIF_F_SG |
@@ -6241,7 +6242,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6241 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) 6242 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
6242 break; 6243 break;
6243 if (i == 2) { 6244 if (i == 2) {
6244 e_err("The NVM Checksum Is Not Valid\n"); 6245 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
6245 err = -EIO; 6246 err = -EIO;
6246 goto err_eeprom; 6247 goto err_eeprom;
6247 } 6248 }
@@ -6251,13 +6252,15 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6251 6252
6252 /* copy the MAC address */ 6253 /* copy the MAC address */
6253 if (e1000e_read_mac_addr(&adapter->hw)) 6254 if (e1000e_read_mac_addr(&adapter->hw))
6254 e_err("NVM Read Error while reading MAC address\n"); 6255 dev_err(&pdev->dev,
6256 "NVM Read Error while reading MAC address\n");
6255 6257
6256 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 6258 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
6257 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 6259 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
6258 6260
6259 if (!is_valid_ether_addr(netdev->perm_addr)) { 6261 if (!is_valid_ether_addr(netdev->perm_addr)) {
6260 e_err("Invalid MAC Address: %pM\n", netdev->perm_addr); 6262 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
6263 netdev->perm_addr);
6261 err = -EIO; 6264 err = -EIO;
6262 goto err_eeprom; 6265 goto err_eeprom;
6263 } 6266 }
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 55cc1565bc2..dfbfa7fd98c 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -199,16 +199,19 @@ static int __devinit e1000_validate_option(unsigned int *value,
199 case enable_option: 199 case enable_option:
200 switch (*value) { 200 switch (*value) {
201 case OPTION_ENABLED: 201 case OPTION_ENABLED:
202 e_info("%s Enabled\n", opt->name); 202 dev_info(&adapter->pdev->dev, "%s Enabled\n",
203 opt->name);
203 return 0; 204 return 0;
204 case OPTION_DISABLED: 205 case OPTION_DISABLED:
205 e_info("%s Disabled\n", opt->name); 206 dev_info(&adapter->pdev->dev, "%s Disabled\n",
207 opt->name);
206 return 0; 208 return 0;
207 } 209 }
208 break; 210 break;
209 case range_option: 211 case range_option:
210 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 212 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
211 e_info("%s set to %i\n", opt->name, *value); 213 dev_info(&adapter->pdev->dev, "%s set to %i\n",
214 opt->name, *value);
212 return 0; 215 return 0;
213 } 216 }
214 break; 217 break;
@@ -220,7 +223,8 @@ static int __devinit e1000_validate_option(unsigned int *value,
220 ent = &opt->arg.l.p[i]; 223 ent = &opt->arg.l.p[i];
221 if (*value == ent->i) { 224 if (*value == ent->i) {
222 if (ent->str[0] != '\0') 225 if (ent->str[0] != '\0')
223 e_info("%s\n", ent->str); 226 dev_info(&adapter->pdev->dev, "%s\n",
227 ent->str);
224 return 0; 228 return 0;
225 } 229 }
226 } 230 }
@@ -230,8 +234,8 @@ static int __devinit e1000_validate_option(unsigned int *value,
230 BUG(); 234 BUG();
231 } 235 }
232 236
233 e_info("Invalid %s value specified (%i) %s\n", opt->name, *value, 237 dev_info(&adapter->pdev->dev, "Invalid %s value specified (%i) %s\n",
234 opt->err); 238 opt->name, *value, opt->err);
235 *value = opt->def; 239 *value = opt->def;
236 return -1; 240 return -1;
237} 241}
@@ -251,8 +255,10 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
251 int bd = adapter->bd_number; 255 int bd = adapter->bd_number;
252 256
253 if (bd >= E1000_MAX_NIC) { 257 if (bd >= E1000_MAX_NIC) {
254 e_notice("Warning: no configuration for board #%i\n", bd); 258 dev_notice(&adapter->pdev->dev,
255 e_notice("Using defaults for all values\n"); 259 "Warning: no configuration for board #%i\n", bd);
260 dev_notice(&adapter->pdev->dev,
261 "Using defaults for all values\n");
256 } 262 }
257 263
258 { /* Transmit Interrupt Delay */ 264 { /* Transmit Interrupt Delay */
@@ -366,27 +372,32 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
366 * default values 372 * default values
367 */ 373 */
368 if (adapter->itr > 4) 374 if (adapter->itr > 4)
369 e_info("%s set to default %d\n", opt.name, 375 dev_info(&adapter->pdev->dev,
370 adapter->itr); 376 "%s set to default %d\n", opt.name,
377 adapter->itr);
371 } 378 }
372 379
373 adapter->itr_setting = adapter->itr; 380 adapter->itr_setting = adapter->itr;
374 switch (adapter->itr) { 381 switch (adapter->itr) {
375 case 0: 382 case 0:
376 e_info("%s turned off\n", opt.name); 383 dev_info(&adapter->pdev->dev, "%s turned off\n",
384 opt.name);
377 break; 385 break;
378 case 1: 386 case 1:
379 e_info("%s set to dynamic mode\n", opt.name); 387 dev_info(&adapter->pdev->dev,
388 "%s set to dynamic mode\n", opt.name);
380 adapter->itr = 20000; 389 adapter->itr = 20000;
381 break; 390 break;
382 case 3: 391 case 3:
383 e_info("%s set to dynamic conservative mode\n", 392 dev_info(&adapter->pdev->dev,
384 opt.name); 393 "%s set to dynamic conservative mode\n",
394 opt.name);
385 adapter->itr = 20000; 395 adapter->itr = 20000;
386 break; 396 break;
387 case 4: 397 case 4:
388 e_info("%s set to simplified (2000-8000 ints) mode\n", 398 dev_info(&adapter->pdev->dev,
389 opt.name); 399 "%s set to simplified (2000-8000 ints) mode\n",
400 opt.name);
390 break; 401 break;
391 default: 402 default:
392 /* 403 /*
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 35d1e4f2c92..10efcd88dca 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -117,6 +117,7 @@
117 117
118/* TX Rate Limit Registers */ 118/* TX Rate Limit Registers */
119#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */ 119#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
120#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */
120#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */ 121#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
121 122
122/* Split and Replication RX Control - RW */ 123/* Split and Replication RX Control - RW */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index ae6d3f393a5..9e572dd29ab 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -65,19 +65,30 @@ struct igb_adapter;
65#define MAX_Q_VECTORS 8 65#define MAX_Q_VECTORS 8
66 66
67/* Transmit and receive queues */ 67/* Transmit and receive queues */
68#define IGB_MAX_RX_QUEUES ((adapter->vfs_allocated_count ? 2 : \ 68#define IGB_MAX_RX_QUEUES 8
69 (hw->mac.type > e1000_82575 ? 8 : 4))) 69#define IGB_MAX_RX_QUEUES_82575 4
70#define IGB_MAX_RX_QUEUES_I210 4
71#define IGB_MAX_RX_QUEUES_I211 2 70#define IGB_MAX_RX_QUEUES_I211 2
72#define IGB_MAX_TX_QUEUES 16 71#define IGB_MAX_TX_QUEUES 8
73#define IGB_MAX_TX_QUEUES_I210 4
74#define IGB_MAX_TX_QUEUES_I211 2
75#define IGB_MAX_VF_MC_ENTRIES 30 72#define IGB_MAX_VF_MC_ENTRIES 30
76#define IGB_MAX_VF_FUNCTIONS 8 73#define IGB_MAX_VF_FUNCTIONS 8
77#define IGB_MAX_VFTA_ENTRIES 128 74#define IGB_MAX_VFTA_ENTRIES 128
78#define IGB_82576_VF_DEV_ID 0x10CA 75#define IGB_82576_VF_DEV_ID 0x10CA
79#define IGB_I350_VF_DEV_ID 0x1520 76#define IGB_I350_VF_DEV_ID 0x1520
80 77
78/* NVM version defines */
79#define IGB_MAJOR_MASK 0xF000
80#define IGB_MINOR_MASK 0x0FF0
81#define IGB_BUILD_MASK 0x000F
82#define IGB_COMB_VER_MASK 0x00FF
83#define IGB_MAJOR_SHIFT 12
84#define IGB_MINOR_SHIFT 4
85#define IGB_COMB_VER_SHFT 8
86#define IGB_NVM_VER_INVALID 0xFFFF
87#define IGB_ETRACK_SHIFT 16
88#define NVM_ETRACK_WORD 0x0042
89#define NVM_COMB_VER_OFF 0x0083
90#define NVM_COMB_VER_PTR 0x003d
91
81struct vf_data_storage { 92struct vf_data_storage {
82 unsigned char vf_mac_addresses[ETH_ALEN]; 93 unsigned char vf_mac_addresses[ETH_ALEN];
83 u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; 94 u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
@@ -371,6 +382,7 @@ struct igb_adapter {
371 spinlock_t tmreg_lock; 382 spinlock_t tmreg_lock;
372 struct cyclecounter cc; 383 struct cyclecounter cc;
373 struct timecounter tc; 384 struct timecounter tc;
385 char fw_version[32];
374}; 386};
375 387
376#define IGB_FLAG_HAS_MSI (1 << 0) 388#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -420,6 +432,7 @@ extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
420extern bool igb_has_link(struct igb_adapter *adapter); 432extern bool igb_has_link(struct igb_adapter *adapter);
421extern void igb_set_ethtool_ops(struct net_device *); 433extern void igb_set_ethtool_ops(struct net_device *);
422extern void igb_power_up_link(struct igb_adapter *); 434extern void igb_power_up_link(struct igb_adapter *);
435extern void igb_set_fw_version(struct igb_adapter *);
423#ifdef CONFIG_IGB_PTP 436#ifdef CONFIG_IGB_PTP
424extern void igb_ptp_init(struct igb_adapter *adapter); 437extern void igb_ptp_init(struct igb_adapter *adapter);
425extern void igb_ptp_remove(struct igb_adapter *adapter); 438extern void igb_ptp_remove(struct igb_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 812d4f963bd..a19c84cad0e 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -710,6 +710,7 @@ static int igb_set_eeprom(struct net_device *netdev,
710 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG))) 710 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
711 hw->nvm.ops.update(hw); 711 hw->nvm.ops.update(hw);
712 712
713 igb_set_fw_version(adapter);
713 kfree(eeprom_buff); 714 kfree(eeprom_buff);
714 return ret_val; 715 return ret_val;
715} 716}
@@ -718,20 +719,16 @@ static void igb_get_drvinfo(struct net_device *netdev,
718 struct ethtool_drvinfo *drvinfo) 719 struct ethtool_drvinfo *drvinfo)
719{ 720{
720 struct igb_adapter *adapter = netdev_priv(netdev); 721 struct igb_adapter *adapter = netdev_priv(netdev);
721 u16 eeprom_data;
722 722
723 strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver)); 723 strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
724 strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version)); 724 strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version));
725 725
726 /* EEPROM image version # is reported as firmware version # for 726 /*
727 * 82575 controllers */ 727 * EEPROM image version # is reported as firmware version # for
728 adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data); 728 * 82575 controllers
729 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 729 */
730 "%d.%d-%d", 730 strlcpy(drvinfo->fw_version, adapter->fw_version,
731 (eeprom_data & 0xF000) >> 12, 731 sizeof(drvinfo->fw_version));
732 (eeprom_data & 0x0FF0) >> 4,
733 eeprom_data & 0x000F);
734
735 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 732 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
736 sizeof(drvinfo->bus_info)); 733 sizeof(drvinfo->bus_info));
737 drvinfo->n_stats = IGB_STATS_LEN; 734 drvinfo->n_stats = IGB_STATS_LEN;
@@ -2271,6 +2268,38 @@ static void igb_ethtool_complete(struct net_device *netdev)
2271 pm_runtime_put(&adapter->pdev->dev); 2268 pm_runtime_put(&adapter->pdev->dev);
2272} 2269}
2273 2270
2271#ifdef CONFIG_IGB_PTP
2272static int igb_ethtool_get_ts_info(struct net_device *dev,
2273 struct ethtool_ts_info *info)
2274{
2275 struct igb_adapter *adapter = netdev_priv(dev);
2276
2277 info->so_timestamping =
2278 SOF_TIMESTAMPING_TX_HARDWARE |
2279 SOF_TIMESTAMPING_RX_HARDWARE |
2280 SOF_TIMESTAMPING_RAW_HARDWARE;
2281
2282 if (adapter->ptp_clock)
2283 info->phc_index = ptp_clock_index(adapter->ptp_clock);
2284 else
2285 info->phc_index = -1;
2286
2287 info->tx_types =
2288 (1 << HWTSTAMP_TX_OFF) |
2289 (1 << HWTSTAMP_TX_ON);
2290
2291 info->rx_filters =
2292 (1 << HWTSTAMP_FILTER_NONE) |
2293 (1 << HWTSTAMP_FILTER_ALL) |
2294 (1 << HWTSTAMP_FILTER_SOME) |
2295 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
2296 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
2297 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2298
2299 return 0;
2300}
2301
2302#endif
2274static const struct ethtool_ops igb_ethtool_ops = { 2303static const struct ethtool_ops igb_ethtool_ops = {
2275 .get_settings = igb_get_settings, 2304 .get_settings = igb_get_settings,
2276 .set_settings = igb_set_settings, 2305 .set_settings = igb_set_settings,
@@ -2299,6 +2328,9 @@ static const struct ethtool_ops igb_ethtool_ops = {
2299 .set_coalesce = igb_set_coalesce, 2328 .set_coalesce = igb_set_coalesce,
2300 .begin = igb_ethtool_begin, 2329 .begin = igb_ethtool_begin,
2301 .complete = igb_ethtool_complete, 2330 .complete = igb_ethtool_complete,
2331#ifdef CONFIG_IGB_PTP
2332 .get_ts_info = igb_ethtool_get_ts_info,
2333#endif
2302}; 2334};
2303 2335
2304void igb_set_ethtool_ops(struct net_device *netdev) 2336void igb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index dd3bfe8cd36..60e307548f4 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -59,9 +59,9 @@
59#endif 59#endif
60#include "igb.h" 60#include "igb.h"
61 61
62#define MAJ 3 62#define MAJ 4
63#define MIN 4 63#define MIN 0
64#define BUILD 7 64#define BUILD 1
65#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 65#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
66__stringify(BUILD) "-k" 66__stringify(BUILD) "-k"
67char igb_driver_name[] = "igb"; 67char igb_driver_name[] = "igb";
@@ -1048,11 +1048,6 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter)
1048 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) 1048 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1049 numvecs += adapter->num_tx_queues; 1049 numvecs += adapter->num_tx_queues;
1050 1050
1051 /* i210 and i211 can only have 4 MSIX vectors for rx/tx queues. */
1052 if ((adapter->hw.mac.type == e1000_i210)
1053 || (adapter->hw.mac.type == e1000_i211))
1054 numvecs = 4;
1055
1056 /* store the number of vectors reserved for queues */ 1051 /* store the number of vectors reserved for queues */
1057 adapter->num_q_vectors = numvecs; 1052 adapter->num_q_vectors = numvecs;
1058 1053
@@ -1821,6 +1816,69 @@ static const struct net_device_ops igb_netdev_ops = {
1821}; 1816};
1822 1817
1823/** 1818/**
1819 * igb_set_fw_version - Configure version string for ethtool
1820 * @adapter: adapter struct
1821 *
1822 **/
1823void igb_set_fw_version(struct igb_adapter *adapter)
1824{
1825 struct e1000_hw *hw = &adapter->hw;
1826 u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
1827 u16 major, build, patch, fw_version;
1828 u32 etrack_id;
1829
1830 hw->nvm.ops.read(hw, 5, 1, &fw_version);
1831 if (adapter->hw.mac.type != e1000_i211) {
1832 hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
1833 hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
1834 etrack_id = (eeprom_verh << IGB_ETRACK_SHIFT) | eeprom_verl;
1835
1836 /* combo image version needs to be found */
1837 hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
1838 if ((comb_offset != 0x0) &&
1839 (comb_offset != IGB_NVM_VER_INVALID)) {
1840 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
1841 + 1), 1, &comb_verh);
1842 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
1843 1, &comb_verl);
1844
1845 /* Only display Option Rom if it exists and is valid */
1846 if ((comb_verh && comb_verl) &&
1847 ((comb_verh != IGB_NVM_VER_INVALID) &&
1848 (comb_verl != IGB_NVM_VER_INVALID))) {
1849 major = comb_verl >> IGB_COMB_VER_SHFT;
1850 build = (comb_verl << IGB_COMB_VER_SHFT) |
1851 (comb_verh >> IGB_COMB_VER_SHFT);
1852 patch = comb_verh & IGB_COMB_VER_MASK;
1853 snprintf(adapter->fw_version,
1854 sizeof(adapter->fw_version),
1855 "%d.%d%d, 0x%08x, %d.%d.%d",
1856 (fw_version & IGB_MAJOR_MASK) >>
1857 IGB_MAJOR_SHIFT,
1858 (fw_version & IGB_MINOR_MASK) >>
1859 IGB_MINOR_SHIFT,
1860 (fw_version & IGB_BUILD_MASK),
1861 etrack_id, major, build, patch);
1862 goto out;
1863 }
1864 }
1865 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
1866 "%d.%d%d, 0x%08x",
1867 (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
1868 (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
1869 (fw_version & IGB_BUILD_MASK), etrack_id);
1870 } else {
1871 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
1872 "%d.%d%d",
1873 (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
1874 (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
1875 (fw_version & IGB_BUILD_MASK));
1876 }
1877out:
1878 return;
1879}
1880
1881/**
1824 * igb_probe - Device Initialization Routine 1882 * igb_probe - Device Initialization Routine
1825 * @pdev: PCI device information struct 1883 * @pdev: PCI device information struct
1826 * @ent: entry in igb_pci_tbl 1884 * @ent: entry in igb_pci_tbl
@@ -2030,6 +2088,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
2030 goto err_eeprom; 2088 goto err_eeprom;
2031 } 2089 }
2032 2090
2091 /* get firmware version for ethtool -i */
2092 igb_set_fw_version(adapter);
2093
2033 setup_timer(&adapter->watchdog_timer, igb_watchdog, 2094 setup_timer(&adapter->watchdog_timer, igb_watchdog,
2034 (unsigned long) adapter); 2095 (unsigned long) adapter);
2035 setup_timer(&adapter->phy_info_timer, igb_update_phy_info, 2096 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
@@ -2338,6 +2399,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2338 struct e1000_hw *hw = &adapter->hw; 2399 struct e1000_hw *hw = &adapter->hw;
2339 struct net_device *netdev = adapter->netdev; 2400 struct net_device *netdev = adapter->netdev;
2340 struct pci_dev *pdev = adapter->pdev; 2401 struct pci_dev *pdev = adapter->pdev;
2402 u32 max_rss_queues;
2341 2403
2342 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); 2404 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2343 2405
@@ -2370,40 +2432,69 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2370 } else 2432 } else
2371 adapter->vfs_allocated_count = max_vfs; 2433 adapter->vfs_allocated_count = max_vfs;
2372 break; 2434 break;
2373 case e1000_i210:
2374 case e1000_i211:
2375 adapter->vfs_allocated_count = 0;
2376 break;
2377 default: 2435 default:
2378 break; 2436 break;
2379 } 2437 }
2380#endif /* CONFIG_PCI_IOV */ 2438#endif /* CONFIG_PCI_IOV */
2439
2440 /* Determine the maximum number of RSS queues supported. */
2381 switch (hw->mac.type) { 2441 switch (hw->mac.type) {
2442 case e1000_i211:
2443 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
2444 break;
2445 case e1000_82575:
2382 case e1000_i210: 2446 case e1000_i210:
2383 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I210, 2447 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
2384 num_online_cpus());
2385 break; 2448 break;
2449 case e1000_i350:
2450 /* I350 cannot do RSS and SR-IOV at the same time */
2451 if (!!adapter->vfs_allocated_count) {
2452 max_rss_queues = 1;
2453 break;
2454 }
2455 /* fall through */
2456 case e1000_82576:
2457 if (!!adapter->vfs_allocated_count) {
2458 max_rss_queues = 2;
2459 break;
2460 }
2461 /* fall through */
2462 case e1000_82580:
2463 default:
2464 max_rss_queues = IGB_MAX_RX_QUEUES;
2465 break;
2466 }
2467
2468 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
2469
2470 /* Determine if we need to pair queues. */
2471 switch (hw->mac.type) {
2472 case e1000_82575:
2386 case e1000_i211: 2473 case e1000_i211:
2387 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I211, 2474 /* Device supports enough interrupts without queue pairing. */
2388 num_online_cpus());
2389 break; 2475 break;
2476 case e1000_82576:
2477 /*
2478 * If VFs are going to be allocated with RSS queues then we
2479 * should pair the queues in order to conserve interrupts due
2480 * to limited supply.
2481 */
2482 if ((adapter->rss_queues > 1) &&
2483 (adapter->vfs_allocated_count > 6))
2484 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2485 /* fall through */
2486 case e1000_82580:
2487 case e1000_i350:
2488 case e1000_i210:
2390 default: 2489 default:
2391 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, 2490 /*
2392 num_online_cpus()); 2491 * If rss_queues > half of max_rss_queues, pair the queues in
2492 * order to conserve interrupts due to limited supply.
2493 */
2494 if (adapter->rss_queues > (max_rss_queues / 2))
2495 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2393 break; 2496 break;
2394 } 2497 }
2395 /* i350 cannot do RSS and SR-IOV at the same time */
2396 if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
2397 adapter->rss_queues = 1;
2398
2399 /*
2400 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
2401 * then we should combine the queues into a queue pair in order to
2402 * conserve interrupts due to limited supply
2403 */
2404 if ((adapter->rss_queues > 4) ||
2405 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
2406 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2407 2498
2408 /* Setup and initialize a copy of the hw vlan table array */ 2499 /* Setup and initialize a copy of the hw vlan table array */
2409 adapter->shadow_vfta = kzalloc(sizeof(u32) * 2500 adapter->shadow_vfta = kzalloc(sizeof(u32) *
@@ -5686,6 +5777,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
5686/** 5777/**
5687 * igb_clean_tx_irq - Reclaim resources after transmit completes 5778 * igb_clean_tx_irq - Reclaim resources after transmit completes
5688 * @q_vector: pointer to q_vector containing needed info 5779 * @q_vector: pointer to q_vector containing needed info
5780 *
5689 * returns true if ring is completely cleaned 5781 * returns true if ring is completely cleaned
5690 **/ 5782 **/
5691static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) 5783static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
@@ -6997,6 +7089,11 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6997 } 7089 }
6998 7090
6999 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */ 7091 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
7092 /*
7093 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
7094 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
7095 */
7096 wr32(E1000_RTTBCNRM, 0x14);
7000 wr32(E1000_RTTBCNRC, bcnrc_val); 7097 wr32(E1000_RTTBCNRC, bcnrc_val);
7001} 7098}
7002 7099
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index d5ee7fa5072..c846ea9131a 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -330,7 +330,17 @@ void igb_ptp_init(struct igb_adapter *adapter)
330 330
331void igb_ptp_remove(struct igb_adapter *adapter) 331void igb_ptp_remove(struct igb_adapter *adapter)
332{ 332{
333 cancel_delayed_work_sync(&adapter->overflow_work); 333 switch (adapter->hw.mac.type) {
334 case e1000_i211:
335 case e1000_i210:
336 case e1000_i350:
337 case e1000_82580:
338 case e1000_82576:
339 cancel_delayed_work_sync(&adapter->overflow_work);
340 break;
341 default:
342 return;
343 }
334 344
335 if (adapter->ptp_clock) { 345 if (adapter->ptp_clock) {
336 ptp_clock_unregister(adapter->ptp_clock); 346 ptp_clock_unregister(adapter->ptp_clock);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 8ec74b07f94..0696abfe994 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -766,6 +766,7 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
766/** 766/**
767 * igbvf_clean_tx_irq - Reclaim resources after transmit completes 767 * igbvf_clean_tx_irq - Reclaim resources after transmit completes
768 * @adapter: board private structure 768 * @adapter: board private structure
769 *
769 * returns true if ring is completely cleaned 770 * returns true if ring is completely cleaned
770 **/ 771 **/
771static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) 772static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index 30a6cc42603..eea0e10ce12 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -283,7 +283,8 @@ static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set)
283 return err; 283 return err;
284} 284}
285 285
286/** e1000_rlpml_set_vf - Set the maximum receive packet length 286/**
287 * e1000_rlpml_set_vf - Set the maximum receive packet length
287 * @hw: pointer to the HW structure 288 * @hw: pointer to the HW structure
288 * @max_size: value to assign to max frame size 289 * @max_size: value to assign to max frame size
289 **/ 290 **/
@@ -302,7 +303,7 @@ void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size)
302 * e1000_rar_set_vf - set device MAC address 303 * e1000_rar_set_vf - set device MAC address
303 * @hw: pointer to the HW structure 304 * @hw: pointer to the HW structure
304 * @addr: pointer to the receive address 305 * @addr: pointer to the receive address
305 * @index receive address array register 306 * @index: receive address array register
306 **/ 307 **/
307static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) 308static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
308{ 309{
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 5fce363d810..aab649f8c5f 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -2276,9 +2276,9 @@ static void ixgb_netpoll(struct net_device *dev)
2276#endif 2276#endif
2277 2277
2278/** 2278/**
2279 * ixgb_io_error_detected() - called when PCI error is detected 2279 * ixgb_io_error_detected - called when PCI error is detected
2280 * @pdev pointer to pci device with error 2280 * @pdev: pointer to pci device with error
2281 * @state pci channel state after error 2281 * @state: pci channel state after error
2282 * 2282 *
2283 * This callback is called by the PCI subsystem whenever 2283 * This callback is called by the PCI subsystem whenever
2284 * a PCI bus error is detected. 2284 * a PCI bus error is detected.
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 0bdf06bc5c4..5fd5d04c26c 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -34,11 +34,11 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
34 34
35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ 35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ 36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
37 ixgbe_mbx.o ixgbe_x540.o ixgbe_sysfs.o ixgbe_lib.o 37 ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o
38 38
39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ 39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o 40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o
41 41
42ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o 42ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o
43 43ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
44ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o 44ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 7af291e236b..d1acf2451d5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -561,6 +561,7 @@ struct ixgbe_adapter {
561 spinlock_t tmreg_lock; 561 spinlock_t tmreg_lock;
562 struct cyclecounter cc; 562 struct cyclecounter cc;
563 struct timecounter tc; 563 struct timecounter tc;
564 int rx_hwtstamp_filter;
564 u32 base_incval; 565 u32 base_incval;
565 u32 cycle_speed; 566 u32 cycle_speed;
566#endif /* CONFIG_IXGBE_PTP */ 567#endif /* CONFIG_IXGBE_PTP */
@@ -718,6 +719,7 @@ extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
718extern void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector, 719extern void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
719 struct sk_buff *skb); 720 struct sk_buff *skb);
720extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, 721extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
722 union ixgbe_adv_rx_desc *rx_desc,
721 struct sk_buff *skb); 723 struct sk_buff *skb);
722extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, 724extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
723 struct ifreq *ifr, int cmd); 725 struct ifreq *ifr, int cmd);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index dee64d2703f..e7dddfd97cb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -241,7 +241,9 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
241 241
242 /* Determine 1G link capabilities off of SFP+ type */ 242 /* Determine 1G link capabilities off of SFP+ type */
243 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || 243 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
244 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) { 244 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
245 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
246 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
245 *speed = IXGBE_LINK_SPEED_1GB_FULL; 247 *speed = IXGBE_LINK_SPEED_1GB_FULL;
246 *negotiation = true; 248 *negotiation = true;
247 goto out; 249 goto out;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 77ac41feb0f..bb7fde45c05 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -3132,7 +3132,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3132} 3132}
3133 3133
3134/** 3134/**
3135 * ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from 3135 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
3136 * the EEPROM 3136 * the EEPROM
3137 * @hw: pointer to hardware structure 3137 * @hw: pointer to hardware structure
3138 * @wwnn_prefix: the alternative WWNN prefix 3138 * @wwnn_prefix: the alternative WWNN prefix
@@ -3325,6 +3325,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw,
3325 * ixgbe_calculate_checksum - Calculate checksum for buffer 3325 * ixgbe_calculate_checksum - Calculate checksum for buffer
3326 * @buffer: pointer to EEPROM 3326 * @buffer: pointer to EEPROM
3327 * @length: size of EEPROM to calculate a checksum for 3327 * @length: size of EEPROM to calculate a checksum for
3328 *
3328 * Calculates the checksum for some buffer on a specified length. The 3329 * Calculates the checksum for some buffer on a specified length. The
3329 * checksum calculated is returned. 3330 * checksum calculated is returned.
3330 **/ 3331 **/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 3178f1ec371..bbc7da5cdb4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -154,100 +154,60 @@ static int ixgbe_get_settings(struct net_device *netdev,
154{ 154{
155 struct ixgbe_adapter *adapter = netdev_priv(netdev); 155 struct ixgbe_adapter *adapter = netdev_priv(netdev);
156 struct ixgbe_hw *hw = &adapter->hw; 156 struct ixgbe_hw *hw = &adapter->hw;
157 ixgbe_link_speed supported_link;
157 u32 link_speed = 0; 158 u32 link_speed = 0;
159 bool autoneg;
158 bool link_up; 160 bool link_up;
159 161
160 ecmd->supported = SUPPORTED_10000baseT_Full; 162 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
161 ecmd->autoneg = AUTONEG_ENABLE; 163
162 ecmd->transceiver = XCVR_EXTERNAL; 164 /* set the supported link speeds */
163 if ((hw->phy.media_type == ixgbe_media_type_copper) || 165 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
164 (hw->phy.multispeed_fiber)) { 166 ecmd->supported |= SUPPORTED_10000baseT_Full;
165 ecmd->supported |= (SUPPORTED_1000baseT_Full | 167 if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
166 SUPPORTED_Autoneg); 168 ecmd->supported |= SUPPORTED_1000baseT_Full;
167 169 if (supported_link & IXGBE_LINK_SPEED_100_FULL)
168 switch (hw->mac.type) { 170 ecmd->supported |= SUPPORTED_100baseT_Full;
169 case ixgbe_mac_X540: 171
170 ecmd->supported |= SUPPORTED_100baseT_Full; 172 /* set the advertised speeds */
171 break; 173 if (hw->phy.autoneg_advertised) {
172 default: 174 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
173 break; 175 ecmd->advertising |= ADVERTISED_100baseT_Full;
174 } 176 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
175 177 ecmd->advertising |= ADVERTISED_10000baseT_Full;
176 ecmd->advertising = ADVERTISED_Autoneg; 178 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
177 if (hw->phy.autoneg_advertised) { 179 ecmd->advertising |= ADVERTISED_1000baseT_Full;
178 if (hw->phy.autoneg_advertised &
179 IXGBE_LINK_SPEED_100_FULL)
180 ecmd->advertising |= ADVERTISED_100baseT_Full;
181 if (hw->phy.autoneg_advertised &
182 IXGBE_LINK_SPEED_10GB_FULL)
183 ecmd->advertising |= ADVERTISED_10000baseT_Full;
184 if (hw->phy.autoneg_advertised &
185 IXGBE_LINK_SPEED_1GB_FULL)
186 ecmd->advertising |= ADVERTISED_1000baseT_Full;
187 } else {
188 /*
189 * Default advertised modes in case
190 * phy.autoneg_advertised isn't set.
191 */
192 ecmd->advertising |= (ADVERTISED_10000baseT_Full |
193 ADVERTISED_1000baseT_Full);
194 if (hw->mac.type == ixgbe_mac_X540)
195 ecmd->advertising |= ADVERTISED_100baseT_Full;
196 }
197
198 if (hw->phy.media_type == ixgbe_media_type_copper) {
199 ecmd->supported |= SUPPORTED_TP;
200 ecmd->advertising |= ADVERTISED_TP;
201 ecmd->port = PORT_TP;
202 } else {
203 ecmd->supported |= SUPPORTED_FIBRE;
204 ecmd->advertising |= ADVERTISED_FIBRE;
205 ecmd->port = PORT_FIBRE;
206 }
207 } else if (hw->phy.media_type == ixgbe_media_type_backplane) {
208 /* Set as FIBRE until SERDES defined in kernel */
209 if (hw->device_id == IXGBE_DEV_ID_82598_BX) {
210 ecmd->supported = (SUPPORTED_1000baseT_Full |
211 SUPPORTED_FIBRE);
212 ecmd->advertising = (ADVERTISED_1000baseT_Full |
213 ADVERTISED_FIBRE);
214 ecmd->port = PORT_FIBRE;
215 ecmd->autoneg = AUTONEG_DISABLE;
216 } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) ||
217 (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
218 ecmd->supported |= (SUPPORTED_1000baseT_Full |
219 SUPPORTED_Autoneg |
220 SUPPORTED_FIBRE);
221 ecmd->advertising = (ADVERTISED_10000baseT_Full |
222 ADVERTISED_1000baseT_Full |
223 ADVERTISED_Autoneg |
224 ADVERTISED_FIBRE);
225 ecmd->port = PORT_FIBRE;
226 } else {
227 ecmd->supported |= (SUPPORTED_1000baseT_Full |
228 SUPPORTED_FIBRE);
229 ecmd->advertising = (ADVERTISED_10000baseT_Full |
230 ADVERTISED_1000baseT_Full |
231 ADVERTISED_FIBRE);
232 ecmd->port = PORT_FIBRE;
233 }
234 } else { 180 } else {
235 ecmd->supported |= SUPPORTED_FIBRE; 181 /* default modes in case phy.autoneg_advertised isn't set */
236 ecmd->advertising = (ADVERTISED_10000baseT_Full | 182 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
237 ADVERTISED_FIBRE); 183 ecmd->advertising |= ADVERTISED_10000baseT_Full;
238 ecmd->port = PORT_FIBRE; 184 if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
239 ecmd->autoneg = AUTONEG_DISABLE; 185 ecmd->advertising |= ADVERTISED_1000baseT_Full;
186 if (supported_link & IXGBE_LINK_SPEED_100_FULL)
187 ecmd->advertising |= ADVERTISED_100baseT_Full;
240 } 188 }
241 189
242 /* Get PHY type */ 190 if (autoneg) {
191 ecmd->supported |= SUPPORTED_Autoneg;
192 ecmd->advertising |= ADVERTISED_Autoneg;
193 ecmd->autoneg = AUTONEG_ENABLE;
194 } else
195 ecmd->autoneg = AUTONEG_DISABLE;
196
197 ecmd->transceiver = XCVR_EXTERNAL;
198
199 /* Determine the remaining settings based on the PHY type. */
243 switch (adapter->hw.phy.type) { 200 switch (adapter->hw.phy.type) {
244 case ixgbe_phy_tn: 201 case ixgbe_phy_tn:
245 case ixgbe_phy_aq: 202 case ixgbe_phy_aq:
246 case ixgbe_phy_cu_unknown: 203 case ixgbe_phy_cu_unknown:
247 /* Copper 10G-BASET */ 204 ecmd->supported |= SUPPORTED_TP;
205 ecmd->advertising |= ADVERTISED_TP;
248 ecmd->port = PORT_TP; 206 ecmd->port = PORT_TP;
249 break; 207 break;
250 case ixgbe_phy_qt: 208 case ixgbe_phy_qt:
209 ecmd->supported |= SUPPORTED_FIBRE;
210 ecmd->advertising |= ADVERTISED_FIBRE;
251 ecmd->port = PORT_FIBRE; 211 ecmd->port = PORT_FIBRE;
252 break; 212 break;
253 case ixgbe_phy_nl: 213 case ixgbe_phy_nl:
@@ -257,42 +217,59 @@ static int ixgbe_get_settings(struct net_device *netdev,
257 case ixgbe_phy_sfp_avago: 217 case ixgbe_phy_sfp_avago:
258 case ixgbe_phy_sfp_intel: 218 case ixgbe_phy_sfp_intel:
259 case ixgbe_phy_sfp_unknown: 219 case ixgbe_phy_sfp_unknown:
260 switch (adapter->hw.phy.sfp_type) {
261 /* SFP+ devices, further checking needed */ 220 /* SFP+ devices, further checking needed */
221 switch (adapter->hw.phy.sfp_type) {
262 case ixgbe_sfp_type_da_cu: 222 case ixgbe_sfp_type_da_cu:
263 case ixgbe_sfp_type_da_cu_core0: 223 case ixgbe_sfp_type_da_cu_core0:
264 case ixgbe_sfp_type_da_cu_core1: 224 case ixgbe_sfp_type_da_cu_core1:
225 ecmd->supported |= SUPPORTED_FIBRE;
226 ecmd->advertising |= ADVERTISED_FIBRE;
265 ecmd->port = PORT_DA; 227 ecmd->port = PORT_DA;
266 break; 228 break;
267 case ixgbe_sfp_type_sr: 229 case ixgbe_sfp_type_sr:
268 case ixgbe_sfp_type_lr: 230 case ixgbe_sfp_type_lr:
269 case ixgbe_sfp_type_srlr_core0: 231 case ixgbe_sfp_type_srlr_core0:
270 case ixgbe_sfp_type_srlr_core1: 232 case ixgbe_sfp_type_srlr_core1:
233 ecmd->supported |= SUPPORTED_FIBRE;
234 ecmd->advertising |= ADVERTISED_FIBRE;
271 ecmd->port = PORT_FIBRE; 235 ecmd->port = PORT_FIBRE;
272 break; 236 break;
273 case ixgbe_sfp_type_not_present: 237 case ixgbe_sfp_type_not_present:
238 ecmd->supported |= SUPPORTED_FIBRE;
239 ecmd->advertising |= ADVERTISED_FIBRE;
274 ecmd->port = PORT_NONE; 240 ecmd->port = PORT_NONE;
275 break; 241 break;
276 case ixgbe_sfp_type_1g_cu_core0: 242 case ixgbe_sfp_type_1g_cu_core0:
277 case ixgbe_sfp_type_1g_cu_core1: 243 case ixgbe_sfp_type_1g_cu_core1:
244 ecmd->supported |= SUPPORTED_TP;
245 ecmd->advertising |= ADVERTISED_TP;
278 ecmd->port = PORT_TP; 246 ecmd->port = PORT_TP;
279 ecmd->supported = SUPPORTED_TP; 247 break;
280 ecmd->advertising = (ADVERTISED_1000baseT_Full | 248 case ixgbe_sfp_type_1g_sx_core0:
281 ADVERTISED_TP); 249 case ixgbe_sfp_type_1g_sx_core1:
250 ecmd->supported |= SUPPORTED_FIBRE;
251 ecmd->advertising |= ADVERTISED_FIBRE;
252 ecmd->port = PORT_FIBRE;
282 break; 253 break;
283 case ixgbe_sfp_type_unknown: 254 case ixgbe_sfp_type_unknown:
284 default: 255 default:
256 ecmd->supported |= SUPPORTED_FIBRE;
257 ecmd->advertising |= ADVERTISED_FIBRE;
285 ecmd->port = PORT_OTHER; 258 ecmd->port = PORT_OTHER;
286 break; 259 break;
287 } 260 }
288 break; 261 break;
289 case ixgbe_phy_xaui: 262 case ixgbe_phy_xaui:
263 ecmd->supported |= SUPPORTED_FIBRE;
264 ecmd->advertising |= ADVERTISED_FIBRE;
290 ecmd->port = PORT_NONE; 265 ecmd->port = PORT_NONE;
291 break; 266 break;
292 case ixgbe_phy_unknown: 267 case ixgbe_phy_unknown:
293 case ixgbe_phy_generic: 268 case ixgbe_phy_generic:
294 case ixgbe_phy_sfp_unsupported: 269 case ixgbe_phy_sfp_unsupported:
295 default: 270 default:
271 ecmd->supported |= SUPPORTED_FIBRE;
272 ecmd->advertising |= ADVERTISED_FIBRE;
296 ecmd->port = PORT_OTHER; 273 ecmd->port = PORT_OTHER;
297 break; 274 break;
298 } 275 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index bc07933d67d..0ee4dbf4a75 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -38,7 +38,7 @@
38 38
39/** 39/**
40 * ixgbe_fcoe_clear_ddp - clear the given ddp context 40 * ixgbe_fcoe_clear_ddp - clear the given ddp context
41 * @ddp - ptr to the ixgbe_fcoe_ddp 41 * @ddp: ptr to the ixgbe_fcoe_ddp
42 * 42 *
43 * Returns : none 43 * Returns : none
44 * 44 *
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index c377706e81a..f36c3c38dbc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -252,7 +252,7 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
252} 252}
253 253
254/** 254/**
255 * ixgbe_set_sriov_queues: Allocate queues for IOV use 255 * ixgbe_set_sriov_queues - Allocate queues for IOV use
256 * @adapter: board private structure to initialize 256 * @adapter: board private structure to initialize
257 * 257 *
258 * IOV doesn't actually use anything, so just NAK the 258 * IOV doesn't actually use anything, so just NAK the
@@ -265,7 +265,7 @@ static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
265} 265}
266 266
267/** 267/**
268 * ixgbe_set_rss_queues: Allocate queues for RSS 268 * ixgbe_set_rss_queues - Allocate queues for RSS
269 * @adapter: board private structure to initialize 269 * @adapter: board private structure to initialize
270 * 270 *
271 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try 271 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
@@ -288,7 +288,7 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
288} 288}
289 289
290/** 290/**
291 * ixgbe_set_fdir_queues: Allocate queues for Flow Director 291 * ixgbe_set_fdir_queues - Allocate queues for Flow Director
292 * @adapter: board private structure to initialize 292 * @adapter: board private structure to initialize
293 * 293 *
294 * Flow Director is an advanced Rx filter, attempting to get Rx flows back 294 * Flow Director is an advanced Rx filter, attempting to get Rx flows back
@@ -323,7 +323,7 @@ static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
323 323
324#ifdef IXGBE_FCOE 324#ifdef IXGBE_FCOE
325/** 325/**
326 * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) 326 * ixgbe_set_fcoe_queues - Allocate queues for Fiber Channel over Ethernet (FCoE)
327 * @adapter: board private structure to initialize 327 * @adapter: board private structure to initialize
328 * 328 *
329 * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. 329 * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
@@ -410,7 +410,7 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
410#endif 410#endif
411 411
412/** 412/**
413 * ixgbe_set_num_queues: Allocate queues for device, feature dependent 413 * ixgbe_set_num_queues - Allocate queues for device, feature dependent
414 * @adapter: board private structure to initialize 414 * @adapter: board private structure to initialize
415 * 415 *
416 * This is the top level queue allocation routine. The order here is very 416 * This is the top level queue allocation routine. The order here is very
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index e242104ab47..59a3f141feb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -516,7 +516,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
516 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); 516 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
517} 517}
518 518
519/* 519/**
520 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors 520 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
521 * @adapter: pointer to adapter struct 521 * @adapter: pointer to adapter struct
522 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 522 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
@@ -790,12 +790,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
790 total_packets += tx_buffer->gso_segs; 790 total_packets += tx_buffer->gso_segs;
791 791
792#ifdef CONFIG_IXGBE_PTP 792#ifdef CONFIG_IXGBE_PTP
793 if (unlikely(tx_buffer->tx_flags & 793 if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
794 IXGBE_TX_FLAGS_TSTAMP)) 794 ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
795 ixgbe_ptp_tx_hwtstamp(q_vector,
796 tx_buffer->skb);
797
798#endif 795#endif
796
799 /* free the skb */ 797 /* free the skb */
800 dev_kfree_skb_any(tx_buffer->skb); 798 dev_kfree_skb_any(tx_buffer->skb);
801 799
@@ -1399,8 +1397,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1399 ixgbe_rx_checksum(rx_ring, rx_desc, skb); 1397 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1400 1398
1401#ifdef CONFIG_IXGBE_PTP 1399#ifdef CONFIG_IXGBE_PTP
1402 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)) 1400 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
1403 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
1404#endif 1401#endif
1405 1402
1406 if ((dev->features & NETIF_F_HW_VLAN_RX) && 1403 if ((dev->features & NETIF_F_HW_VLAN_RX) &&
@@ -3594,7 +3591,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
3594} 3591}
3595 3592
3596#ifdef CONFIG_IXGBE_DCB 3593#ifdef CONFIG_IXGBE_DCB
3597/* 3594/**
3598 * ixgbe_configure_dcb - Configure DCB hardware 3595 * ixgbe_configure_dcb - Configure DCB hardware
3599 * @adapter: ixgbe adapter struct 3596 * @adapter: ixgbe adapter struct
3600 * 3597 *
@@ -3661,11 +3658,11 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3661/* Additional bittime to account for IXGBE framing */ 3658/* Additional bittime to account for IXGBE framing */
3662#define IXGBE_ETH_FRAMING 20 3659#define IXGBE_ETH_FRAMING 20
3663 3660
3664/* 3661/**
3665 * ixgbe_hpbthresh - calculate high water mark for flow control 3662 * ixgbe_hpbthresh - calculate high water mark for flow control
3666 * 3663 *
3667 * @adapter: board private structure to calculate for 3664 * @adapter: board private structure to calculate for
3668 * @pb - packet buffer to calculate 3665 * @pb: packet buffer to calculate
3669 */ 3666 */
3670static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) 3667static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
3671{ 3668{
@@ -3725,11 +3722,11 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
3725 return marker; 3722 return marker;
3726} 3723}
3727 3724
3728/* 3725/**
3729 * ixgbe_lpbthresh - calculate low water mark for for flow control 3726 * ixgbe_lpbthresh - calculate low water mark for for flow control
3730 * 3727 *
3731 * @adapter: board private structure to calculate for 3728 * @adapter: board private structure to calculate for
3732 * @pb - packet buffer to calculate 3729 * @pb: packet buffer to calculate
3733 */ 3730 */
3734static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter) 3731static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
3735{ 3732{
@@ -5246,7 +5243,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5246 5243
5247/** 5244/**
5248 * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table 5245 * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
5249 * @adapter - pointer to the device adapter structure 5246 * @adapter: pointer to the device adapter structure
5250 **/ 5247 **/
5251static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) 5248static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
5252{ 5249{
@@ -5282,7 +5279,7 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
5282 5279
5283/** 5280/**
5284 * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts 5281 * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
5285 * @adapter - pointer to the device adapter structure 5282 * @adapter: pointer to the device adapter structure
5286 * 5283 *
5287 * This function serves two purposes. First it strobes the interrupt lines 5284 * This function serves two purposes. First it strobes the interrupt lines
5288 * in order to make certain interrupts are occurring. Secondly it sets the 5285 * in order to make certain interrupts are occurring. Secondly it sets the
@@ -5330,8 +5327,8 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
5330 5327
5331/** 5328/**
5332 * ixgbe_watchdog_update_link - update the link status 5329 * ixgbe_watchdog_update_link - update the link status
5333 * @adapter - pointer to the device adapter structure 5330 * @adapter: pointer to the device adapter structure
5334 * @link_speed - pointer to a u32 to store the link_speed 5331 * @link_speed: pointer to a u32 to store the link_speed
5335 **/ 5332 **/
5336static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) 5333static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
5337{ 5334{
@@ -5374,7 +5371,7 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
5374/** 5371/**
5375 * ixgbe_watchdog_link_is_up - update netif_carrier status and 5372 * ixgbe_watchdog_link_is_up - update netif_carrier status and
5376 * print link up message 5373 * print link up message
5377 * @adapter - pointer to the device adapter structure 5374 * @adapter: pointer to the device adapter structure
5378 **/ 5375 **/
5379static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) 5376static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
5380{ 5377{
@@ -5434,7 +5431,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
5434/** 5431/**
5435 * ixgbe_watchdog_link_is_down - update netif_carrier status and 5432 * ixgbe_watchdog_link_is_down - update netif_carrier status and
5436 * print link down message 5433 * print link down message
5437 * @adapter - pointer to the adapter structure 5434 * @adapter: pointer to the adapter structure
5438 **/ 5435 **/
5439static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) 5436static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
5440{ 5437{
@@ -5462,7 +5459,7 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
5462 5459
5463/** 5460/**
5464 * ixgbe_watchdog_flush_tx - flush queues on link down 5461 * ixgbe_watchdog_flush_tx - flush queues on link down
5465 * @adapter - pointer to the device adapter structure 5462 * @adapter: pointer to the device adapter structure
5466 **/ 5463 **/
5467static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) 5464static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
5468{ 5465{
@@ -5511,7 +5508,7 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
5511 5508
5512/** 5509/**
5513 * ixgbe_watchdog_subtask - check and bring link up 5510 * ixgbe_watchdog_subtask - check and bring link up
5514 * @adapter - pointer to the device adapter structure 5511 * @adapter: pointer to the device adapter structure
5515 **/ 5512 **/
5516static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) 5513static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
5517{ 5514{
@@ -5535,7 +5532,7 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
5535 5532
5536/** 5533/**
5537 * ixgbe_sfp_detection_subtask - poll for SFP+ cable 5534 * ixgbe_sfp_detection_subtask - poll for SFP+ cable
5538 * @adapter - the ixgbe adapter structure 5535 * @adapter: the ixgbe adapter structure
5539 **/ 5536 **/
5540static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) 5537static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
5541{ 5538{
@@ -5602,7 +5599,7 @@ sfp_out:
5602 5599
5603/** 5600/**
5604 * ixgbe_sfp_link_config_subtask - set up link SFP after module install 5601 * ixgbe_sfp_link_config_subtask - set up link SFP after module install
5605 * @adapter - the ixgbe adapter structure 5602 * @adapter: the ixgbe adapter structure
5606 **/ 5603 **/
5607static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) 5604static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
5608{ 5605{
@@ -6389,17 +6386,12 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6389 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6386 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6390 struct ixgbe_ring *tx_ring; 6387 struct ixgbe_ring *tx_ring;
6391 6388
6392 if (skb->len <= 0) {
6393 dev_kfree_skb_any(skb);
6394 return NETDEV_TX_OK;
6395 }
6396
6397 /* 6389 /*
6398 * The minimum packet size for olinfo paylen is 17 so pad the skb 6390 * The minimum packet size for olinfo paylen is 17 so pad the skb
6399 * in order to meet this minimum size requirement. 6391 * in order to meet this minimum size requirement.
6400 */ 6392 */
6401 if (skb->len < 17) { 6393 if (unlikely(skb->len < 17)) {
6402 if (skb_padto(skb, 17)) 6394 if (skb_pad(skb, 17 - skb->len))
6403 return NETDEV_TX_OK; 6395 return NETDEV_TX_OK;
6404 skb->len = 17; 6396 skb->len = 17;
6405 } 6397 }
@@ -6594,8 +6586,9 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
6594} 6586}
6595 6587
6596#ifdef CONFIG_IXGBE_DCB 6588#ifdef CONFIG_IXGBE_DCB
6597/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. 6589/**
6598 * #adapter: pointer to ixgbe_adapter 6590 * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
6591 * @adapter: pointer to ixgbe_adapter
6599 * @tc: number of traffic classes currently enabled 6592 * @tc: number of traffic classes currently enabled
6600 * 6593 *
6601 * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm 6594 * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
@@ -6630,8 +6623,8 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
6630 return; 6623 return;
6631} 6624}
6632 6625
6633/* ixgbe_setup_tc - routine to configure net_device for multiple traffic 6626/**
6634 * classes. 6627 * ixgbe_setup_tc - configure net_device for multiple traffic classes
6635 * 6628 *
6636 * @netdev: net device to configure 6629 * @netdev: net device to configure
6637 * @tc: number of traffic classes to enable 6630 * @tc: number of traffic classes to enable
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 24117709d6a..71659edf81a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -907,6 +907,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
907 * 8 SFP_act_lmt_DA_CORE1 - 82599-specific 907 * 8 SFP_act_lmt_DA_CORE1 - 82599-specific
908 * 9 SFP_1g_cu_CORE0 - 82599-specific 908 * 9 SFP_1g_cu_CORE0 - 82599-specific
909 * 10 SFP_1g_cu_CORE1 - 82599-specific 909 * 10 SFP_1g_cu_CORE1 - 82599-specific
910 * 11 SFP_1g_sx_CORE0 - 82599-specific
911 * 12 SFP_1g_sx_CORE1 - 82599-specific
910 */ 912 */
911 if (hw->mac.type == ixgbe_mac_82598EB) { 913 if (hw->mac.type == ixgbe_mac_82598EB) {
912 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 914 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
@@ -957,6 +959,13 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
957 else 959 else
958 hw->phy.sfp_type = 960 hw->phy.sfp_type =
959 ixgbe_sfp_type_1g_cu_core1; 961 ixgbe_sfp_type_1g_cu_core1;
962 } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
963 if (hw->bus.lan_id == 0)
964 hw->phy.sfp_type =
965 ixgbe_sfp_type_1g_sx_core0;
966 else
967 hw->phy.sfp_type =
968 ixgbe_sfp_type_1g_sx_core1;
960 } else { 969 } else {
961 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 970 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
962 } 971 }
@@ -1049,7 +1058,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1049 /* Verify supported 1G SFP modules */ 1058 /* Verify supported 1G SFP modules */
1050 if (comp_codes_10g == 0 && 1059 if (comp_codes_10g == 0 &&
1051 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || 1060 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1052 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) { 1061 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1062 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1063 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1053 hw->phy.type = ixgbe_phy_sfp_unsupported; 1064 hw->phy.type = ixgbe_phy_sfp_unsupported;
1054 status = IXGBE_ERR_SFP_NOT_SUPPORTED; 1065 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1055 goto out; 1066 goto out;
@@ -1064,7 +1075,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1064 hw->mac.ops.get_device_caps(hw, &enforce_sfp); 1075 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
1065 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && 1076 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
1066 !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) || 1077 !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
1067 (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) { 1078 (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) ||
1079 (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0) ||
1080 (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1))) {
1068 /* Make sure we're a supported PHY type */ 1081 /* Make sure we're a supported PHY type */
1069 if (hw->phy.type == ixgbe_phy_sfp_intel) { 1082 if (hw->phy.type == ixgbe_phy_sfp_intel) {
1070 status = 0; 1083 status = 0;
@@ -1128,10 +1141,12 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
1128 * SR modules 1141 * SR modules
1129 */ 1142 */
1130 if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 || 1143 if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
1131 sfp_type == ixgbe_sfp_type_1g_cu_core0) 1144 sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1145 sfp_type == ixgbe_sfp_type_1g_sx_core0)
1132 sfp_type = ixgbe_sfp_type_srlr_core0; 1146 sfp_type = ixgbe_sfp_type_srlr_core0;
1133 else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 || 1147 else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
1134 sfp_type == ixgbe_sfp_type_1g_cu_core1) 1148 sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1149 sfp_type == ixgbe_sfp_type_1g_sx_core1)
1135 sfp_type = ixgbe_sfp_type_srlr_core1; 1150 sfp_type = ixgbe_sfp_type_srlr_core1;
1136 1151
1137 /* Read offset to PHY init contents */ 1152 /* Read offset to PHY init contents */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index dcebd128bec..3456d561714 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -26,6 +26,7 @@
26*******************************************************************************/ 26*******************************************************************************/
27#include "ixgbe.h" 27#include "ixgbe.h"
28#include <linux/export.h> 28#include <linux/export.h>
29#include <linux/ptp_classify.h>
29 30
30/* 31/*
31 * The 82599 and the X540 do not have true 64bit nanosecond scale 32 * The 82599 and the X540 do not have true 64bit nanosecond scale
@@ -100,9 +101,13 @@
100#define NSECS_PER_SEC 1000000000ULL 101#define NSECS_PER_SEC 1000000000ULL
101#endif 102#endif
102 103
104static struct sock_filter ptp_filter[] = {
105 PTP_FILTER
106};
107
103/** 108/**
104 * ixgbe_ptp_read - read raw cycle counter (to be used by time counter) 109 * ixgbe_ptp_read - read raw cycle counter (to be used by time counter)
105 * @cc - the cyclecounter structure 110 * @cc: the cyclecounter structure
106 * 111 *
107 * this function reads the cyclecounter registers and is called by the 112 * this function reads the cyclecounter registers and is called by the
108 * cyclecounter structure used to construct a ns counter from the 113 * cyclecounter structure used to construct a ns counter from the
@@ -123,8 +128,8 @@ static cycle_t ixgbe_ptp_read(const struct cyclecounter *cc)
123 128
124/** 129/**
125 * ixgbe_ptp_adjfreq 130 * ixgbe_ptp_adjfreq
126 * @ptp - the ptp clock structure 131 * @ptp: the ptp clock structure
127 * @ppb - parts per billion adjustment from base 132 * @ppb: parts per billion adjustment from base
128 * 133 *
129 * adjust the frequency of the ptp cycle counter by the 134 * adjust the frequency of the ptp cycle counter by the
130 * indicated ppb from the base frequency. 135 * indicated ppb from the base frequency.
@@ -170,8 +175,8 @@ static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
170 175
171/** 176/**
172 * ixgbe_ptp_adjtime 177 * ixgbe_ptp_adjtime
173 * @ptp - the ptp clock structure 178 * @ptp: the ptp clock structure
174 * @delta - offset to adjust the cycle counter by 179 * @delta: offset to adjust the cycle counter by
175 * 180 *
176 * adjust the timer by resetting the timecounter structure. 181 * adjust the timer by resetting the timecounter structure.
177 */ 182 */
@@ -198,8 +203,8 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
198 203
199/** 204/**
200 * ixgbe_ptp_gettime 205 * ixgbe_ptp_gettime
201 * @ptp - the ptp clock structure 206 * @ptp: the ptp clock structure
202 * @ts - timespec structure to hold the current time value 207 * @ts: timespec structure to hold the current time value
203 * 208 *
204 * read the timecounter and return the correct value on ns, 209 * read the timecounter and return the correct value on ns,
205 * after converting it into a struct timespec. 210 * after converting it into a struct timespec.
@@ -224,8 +229,8 @@ static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
224 229
225/** 230/**
226 * ixgbe_ptp_settime 231 * ixgbe_ptp_settime
227 * @ptp - the ptp clock structure 232 * @ptp: the ptp clock structure
228 * @ts - the timespec containing the new time for the cycle counter 233 * @ts: the timespec containing the new time for the cycle counter
229 * 234 *
230 * reset the timecounter to use a new base value instead of the kernel 235 * reset the timecounter to use a new base value instead of the kernel
231 * wall timer value. 236 * wall timer value.
@@ -251,9 +256,9 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
251 256
252/** 257/**
253 * ixgbe_ptp_enable 258 * ixgbe_ptp_enable
254 * @ptp - the ptp clock structure 259 * @ptp: the ptp clock structure
255 * @rq - the requested feature to change 260 * @rq: the requested feature to change
256 * @on - whether to enable or disable the feature 261 * @on: whether to enable or disable the feature
257 * 262 *
258 * enable (or disable) ancillary features of the phc subsystem. 263 * enable (or disable) ancillary features of the phc subsystem.
259 * our driver only supports the PPS feature on the X540 264 * our driver only supports the PPS feature on the X540
@@ -289,8 +294,8 @@ static int ixgbe_ptp_enable(struct ptp_clock_info *ptp,
289 294
290/** 295/**
291 * ixgbe_ptp_check_pps_event 296 * ixgbe_ptp_check_pps_event
292 * @adapter - the private adapter structure 297 * @adapter: the private adapter structure
293 * @eicr - the interrupt cause register value 298 * @eicr: the interrupt cause register value
294 * 299 *
295 * This function is called by the interrupt routine when checking for 300 * This function is called by the interrupt routine when checking for
296 * interrupts. It will check and handle a pps event. 301 * interrupts. It will check and handle a pps event.
@@ -307,20 +312,21 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr)
307 !(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED)) 312 !(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED))
308 return; 313 return;
309 314
310 switch (hw->mac.type) { 315 if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) {
311 case ixgbe_mac_X540: 316 switch (hw->mac.type) {
312 if (eicr & IXGBE_EICR_TIMESYNC) 317 case ixgbe_mac_X540:
313 ptp_clock_event(adapter->ptp_clock, &event); 318 ptp_clock_event(adapter->ptp_clock, &event);
314 break; 319 break;
315 default: 320 default:
316 break; 321 break;
322 }
317 } 323 }
318} 324}
319 325
320/** 326/**
321 * ixgbe_ptp_enable_sdp 327 * ixgbe_ptp_enable_sdp
322 * @hw - the hardware private structure 328 * @hw: the hardware private structure
323 * @shift - the clock shift for calculating nanoseconds 329 * @shift: the clock shift for calculating nanoseconds
324 * 330 *
325 * this function enables the clock out feature on the sdp0 for the 331 * this function enables the clock out feature on the sdp0 for the
326 * X540 device. It will create a 1second periodic output that can be 332 * X540 device. It will create a 1second periodic output that can be
@@ -393,7 +399,7 @@ static void ixgbe_ptp_enable_sdp(struct ixgbe_hw *hw, int shift)
393 399
394/** 400/**
395 * ixgbe_ptp_disable_sdp 401 * ixgbe_ptp_disable_sdp
396 * @hw - the private hardware structure 402 * @hw: the private hardware structure
397 * 403 *
398 * this function disables the auxiliary SDP clock out feature 404 * this function disables the auxiliary SDP clock out feature
399 */ 405 */
@@ -425,6 +431,68 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
425} 431}
426 432
427/** 433/**
434 * ixgbe_ptp_match - determine if this skb matches a ptp packet
435 * @skb: pointer to the skb
436 * @hwtstamp: pointer to the hwtstamp_config to check
437 *
438 * Determine whether the skb should have been timestamped, assuming the
439 * hwtstamp was set via the hwtstamp ioctl. Returns non-zero when the packet
440 * should have a timestamp waiting in the registers, and 0 otherwise.
441 *
442 * V1 packets have to check the version type to determine whether they are
443 * correct. However, we can't directly access the data because it might be
444 * fragmented in the SKB, in paged memory. In order to work around this, we
445 * use skb_copy_bits which will properly copy the data whether it is in the
446 * paged memory fragments or not. We have to copy the IP header as well as the
447 * message type.
448 */
449static int ixgbe_ptp_match(struct sk_buff *skb, int rx_filter)
450{
451 struct iphdr iph;
452 u8 msgtype;
453 unsigned int type, offset;
454
455 if (rx_filter == HWTSTAMP_FILTER_NONE)
456 return 0;
457
458 type = sk_run_filter(skb, ptp_filter);
459
460 if (likely(rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT))
461 return type & PTP_CLASS_V2;
462
463 /* For the remaining cases actually check message type */
464 switch (type) {
465 case PTP_CLASS_V1_IPV4:
466 skb_copy_bits(skb, OFF_IHL, &iph, sizeof(iph));
467 offset = ETH_HLEN + (iph.ihl << 2) + UDP_HLEN + OFF_PTP_CONTROL;
468 break;
469 case PTP_CLASS_V1_IPV6:
470 offset = OFF_PTP6 + OFF_PTP_CONTROL;
471 break;
472 default:
473 /* other cases invalid or handled above */
474 return 0;
475 }
476
477 /* Make sure our buffer is long enough */
478 if (skb->len < offset)
479 return 0;
480
481 skb_copy_bits(skb, offset, &msgtype, sizeof(msgtype));
482
483 switch (rx_filter) {
484 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
485 return (msgtype == IXGBE_RXMTRL_V1_SYNC_MSG);
486 break;
487 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
488 return (msgtype == IXGBE_RXMTRL_V1_DELAY_REQ_MSG);
489 break;
490 default:
491 return 0;
492 }
493}
494
495/**
428 * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp 496 * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp
429 * @q_vector: structure containing interrupt and ring information 497 * @q_vector: structure containing interrupt and ring information
430 * @skb: particular skb to send timestamp with 498 * @skb: particular skb to send timestamp with
@@ -473,6 +541,7 @@ void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
473/** 541/**
474 * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp 542 * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
475 * @q_vector: structure containing interrupt and ring information 543 * @q_vector: structure containing interrupt and ring information
544 * @rx_desc: the rx descriptor
476 * @skb: particular skb to send timestamp with 545 * @skb: particular skb to send timestamp with
477 * 546 *
478 * if the timestamp is valid, we convert it into the timecounter ns 547 * if the timestamp is valid, we convert it into the timecounter ns
@@ -480,6 +549,7 @@ void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
480 * is passed up the network stack 549 * is passed up the network stack
481 */ 550 */
482void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, 551void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
552 union ixgbe_adv_rx_desc *rx_desc,
483 struct sk_buff *skb) 553 struct sk_buff *skb)
484{ 554{
485 struct ixgbe_adapter *adapter; 555 struct ixgbe_adapter *adapter;
@@ -497,21 +567,33 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
497 hw = &adapter->hw; 567 hw = &adapter->hw;
498 568
499 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 569 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
570
571 /* Check if we have a valid timestamp and make sure the skb should
572 * have been timestamped */
573 if (likely(!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID) ||
574 !ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
575 return;
576
577 /*
578 * Always read the registers, in order to clear a possible fault
579 * because of stagnant RX timestamp values for a packet that never
580 * reached the queue.
581 */
500 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 582 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
501 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32; 583 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
502 584
503 /* 585 /*
504 * If this bit is set, then the RX registers contain the time stamp. No 586 * If the timestamp bit is set in the packet's descriptor, we know the
505 * other packet will be time stamped until we read these registers, so 587 * timestamp belongs to this packet. No other packet can be
506 * read the registers to make them available again. Because only one 588 * timestamped until the registers for timestamping have been read.
507 * packet can be time stamped at a time, we know that the register 589 * Therefor only one packet with this bit can be in the queue at a
508 * values must belong to this one here and therefore we don't need to 590 * time, and the rx timestamp values that were in the registers belong
509 * compare any of the additional attributes stored for it. 591 * to this packet.
510 * 592 *
511 * If nothing went wrong, then it should have a skb_shared_tx that we 593 * If nothing went wrong, then it should have a skb_shared_tx that we
512 * can turn into a skb_shared_hwtstamps. 594 * can turn into a skb_shared_hwtstamps.
513 */ 595 */
514 if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) 596 if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
515 return; 597 return;
516 598
517 spin_lock_irqsave(&adapter->tmreg_lock, flags); 599 spin_lock_irqsave(&adapter->tmreg_lock, flags);
@@ -539,6 +621,11 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
539 * type has to be specified. Matching the kind of event packet is 621 * type has to be specified. Matching the kind of event packet is
540 * not supported, with the exception of "all V2 events regardless of 622 * not supported, with the exception of "all V2 events regardless of
541 * level 2 or 4". 623 * level 2 or 4".
624 *
625 * Since hardware always timestamps Path delay packets when timestamping V2
626 * packets, regardless of the type specified in the register, only use V2
627 * Event mode. This more accurately tells the user what the hardware is going
628 * to do anyways.
542 */ 629 */
543int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, 630int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
544 struct ifreq *ifr, int cmd) 631 struct ifreq *ifr, int cmd)
@@ -582,41 +669,30 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
582 tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG; 669 tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
583 is_l4 = true; 670 is_l4 = true;
584 break; 671 break;
672 case HWTSTAMP_FILTER_PTP_V2_EVENT:
673 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
674 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
585 case HWTSTAMP_FILTER_PTP_V2_SYNC: 675 case HWTSTAMP_FILTER_PTP_V2_SYNC:
586 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 676 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
587 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 677 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
588 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
589 tsync_rx_mtrl = IXGBE_RXMTRL_V2_SYNC_MSG;
590 is_l2 = true;
591 is_l4 = true;
592 config.rx_filter = HWTSTAMP_FILTER_SOME;
593 break;
594 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 678 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
595 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 679 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
596 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 680 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
597 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
598 tsync_rx_mtrl = IXGBE_RXMTRL_V2_DELAY_REQ_MSG;
599 is_l2 = true;
600 is_l4 = true;
601 config.rx_filter = HWTSTAMP_FILTER_SOME;
602 break;
603 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
604 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
605 case HWTSTAMP_FILTER_PTP_V2_EVENT:
606 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; 681 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
607 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
608 is_l2 = true; 682 is_l2 = true;
609 is_l4 = true; 683 is_l4 = true;
684 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
610 break; 685 break;
611 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 686 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
612 case HWTSTAMP_FILTER_ALL: 687 case HWTSTAMP_FILTER_ALL:
613 default: 688 default:
614 /* 689 /*
615 * register RXMTRL must be set, therefore it is not 690 * register RXMTRL must be set in order to do V1 packets,
616 * possible to time stamp both V1 Sync and Delay_Req messages 691 * therefore it is not possible to time stamp both V1 Sync and
617 * and hardware does not support timestamping all packets 692 * Delay_Req messages and hardware does not support
618 * => return error 693 * timestamping all packets => return error
619 */ 694 */
695 config.rx_filter = HWTSTAMP_FILTER_NONE;
620 return -ERANGE; 696 return -ERANGE;
621 } 697 }
622 698
@@ -626,6 +702,9 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
626 return 0; 702 return 0;
627 } 703 }
628 704
705 /* Store filter value for later use */
706 adapter->rx_hwtstamp_filter = config.rx_filter;
707
629 /* define ethertype filter for timestamped packets */ 708 /* define ethertype filter for timestamped packets */
630 if (is_l2) 709 if (is_l2)
631 IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), 710 IXGBE_WRITE_REG(hw, IXGBE_ETQF(3),
@@ -690,7 +769,7 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
690 769
691/** 770/**
692 * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw 771 * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw
693 * @adapter - pointer to the adapter structure 772 * @adapter: pointer to the adapter structure
694 * 773 *
695 * this function initializes the timecounter and cyclecounter 774 * this function initializes the timecounter and cyclecounter
696 * structures for use in generated a ns counter from the arbitrary 775 * structures for use in generated a ns counter from the arbitrary
@@ -826,7 +905,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
826 905
827/** 906/**
828 * ixgbe_ptp_init 907 * ixgbe_ptp_init
829 * @adapter - the ixgbe private adapter structure 908 * @adapter: the ixgbe private adapter structure
830 * 909 *
831 * This function performs the required steps for enabling ptp 910 * This function performs the required steps for enabling ptp
832 * support. If ptp support has already been loaded it simply calls the 911 * support. If ptp support has already been loaded it simply calls the
@@ -870,6 +949,10 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
870 return; 949 return;
871 } 950 }
872 951
952 /* initialize the ptp filter */
953 if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter)))
954 e_dev_warn("ptp_filter_init failed\n");
955
873 spin_lock_init(&adapter->tmreg_lock); 956 spin_lock_init(&adapter->tmreg_lock);
874 957
875 ixgbe_ptp_start_cyclecounter(adapter); 958 ixgbe_ptp_start_cyclecounter(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index 1d80b1cefa6..16ddf14e8ba 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -37,7 +37,6 @@
37#include <linux/netdevice.h> 37#include <linux/netdevice.h>
38#include <linux/hwmon.h> 38#include <linux/hwmon.h>
39 39
40#ifdef CONFIG_IXGBE_HWMON
41/* hwmon callback functions */ 40/* hwmon callback functions */
42static ssize_t ixgbe_hwmon_show_location(struct device *dev, 41static ssize_t ixgbe_hwmon_show_location(struct device *dev,
43 struct device_attribute *attr, 42 struct device_attribute *attr,
@@ -96,11 +95,11 @@ static ssize_t ixgbe_hwmon_show_maxopthresh(struct device *dev,
96 return sprintf(buf, "%u\n", value); 95 return sprintf(buf, "%u\n", value);
97} 96}
98 97
99/* 98/**
100 * ixgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. 99 * ixgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
101 * @ adapter: pointer to the adapter structure 100 * @adapter: pointer to the adapter structure
102 * @ offset: offset in the eeprom sensor data table 101 * @offset: offset in the eeprom sensor data table
103 * @ type: type of sensor data to display 102 * @type: type of sensor data to display
104 * 103 *
105 * For each file we want in hwmon's sysfs interface we need a device_attribute 104 * For each file we want in hwmon's sysfs interface we need a device_attribute
106 * This is included in our hwmon_attr struct that contains the references to 105 * This is included in our hwmon_attr struct that contains the references to
@@ -241,5 +240,4 @@ err:
241exit: 240exit:
242 return rc; 241 return rc;
243} 242}
244#endif /* CONFIG_IXGBE_HWMON */
245 243
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 204848d2448..7416d22ec22 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -2419,7 +2419,7 @@ typedef u32 ixgbe_physical_layer;
2419 */ 2419 */
2420 2420
2421/* BitTimes (BT) conversion */ 2421/* BitTimes (BT) conversion */
2422#define IXGBE_BT2KB(BT) ((BT + 1023) / (8 * 1024)) 2422#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024))
2423#define IXGBE_B2BT(BT) (BT * 8) 2423#define IXGBE_B2BT(BT) (BT * 8)
2424 2424
2425/* Calculate Delay to respond to PFC */ 2425/* Calculate Delay to respond to PFC */
@@ -2450,24 +2450,31 @@ typedef u32 ixgbe_physical_layer;
2450#define IXGBE_PCI_DELAY 10000 2450#define IXGBE_PCI_DELAY 10000
2451 2451
2452/* Calculate X540 delay value in bit times */ 2452/* Calculate X540 delay value in bit times */
2453#define IXGBE_FILL_RATE (36 / 25) 2453#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \
2454 2454 ((36 * \
2455#define IXGBE_DV_X540(LINK, TC) (IXGBE_FILL_RATE * \ 2455 (IXGBE_B2BT(_max_frame_link) + \
2456 (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \ 2456 IXGBE_PFC_D + \
2457 (2 * IXGBE_CABLE_DC) + \ 2457 (2 * IXGBE_CABLE_DC) + \
2458 (2 * IXGBE_ID_X540) + \ 2458 (2 * IXGBE_ID_X540) + \
2459 IXGBE_HD + IXGBE_B2BT(TC))) 2459 IXGBE_HD) / 25 + 1) + \
2460 2 * IXGBE_B2BT(_max_frame_tc))
2460 2461
2461/* Calculate 82599, 82598 delay value in bit times */ 2462/* Calculate 82599, 82598 delay value in bit times */
2462#define IXGBE_DV(LINK, TC) (IXGBE_FILL_RATE * \ 2463#define IXGBE_DV(_max_frame_link, _max_frame_tc) \
2463 (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \ 2464 ((36 * \
2464 (2 * IXGBE_CABLE_DC) + (2 * IXGBE_ID) + \ 2465 (IXGBE_B2BT(_max_frame_link) + \
2465 IXGBE_HD + IXGBE_B2BT(TC))) 2466 IXGBE_PFC_D + \
2467 (2 * IXGBE_CABLE_DC) + \
2468 (2 * IXGBE_ID) + \
2469 IXGBE_HD) / 25 + 1) + \
2470 2 * IXGBE_B2BT(_max_frame_tc))
2466 2471
2467/* Calculate low threshold delay values */ 2472/* Calculate low threshold delay values */
2468#define IXGBE_LOW_DV_X540(TC) (2 * IXGBE_B2BT(TC) + \ 2473#define IXGBE_LOW_DV_X540(_max_frame_tc) \
2469 (IXGBE_FILL_RATE * IXGBE_PCI_DELAY)) 2474 (2 * IXGBE_B2BT(_max_frame_tc) + \
2470#define IXGBE_LOW_DV(TC) (2 * IXGBE_LOW_DV_X540(TC)) 2475 (36 * IXGBE_PCI_DELAY / 25) + 1)
2476#define IXGBE_LOW_DV(_max_frame_tc) \
2477 (2 * IXGBE_LOW_DV_X540(_max_frame_tc))
2471 2478
2472/* Software ATR hash keys */ 2479/* Software ATR hash keys */
2473#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 2480#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
@@ -2597,6 +2604,8 @@ enum ixgbe_sfp_type {
2597 ixgbe_sfp_type_da_act_lmt_core1 = 8, 2604 ixgbe_sfp_type_da_act_lmt_core1 = 8,
2598 ixgbe_sfp_type_1g_cu_core0 = 9, 2605 ixgbe_sfp_type_1g_cu_core0 = 9,
2599 ixgbe_sfp_type_1g_cu_core1 = 10, 2606 ixgbe_sfp_type_1g_cu_core1 = 10,
2607 ixgbe_sfp_type_1g_sx_core0 = 11,
2608 ixgbe_sfp_type_1g_sx_core1 = 12,
2600 ixgbe_sfp_type_not_present = 0xFFFE, 2609 ixgbe_sfp_type_not_present = 0xFFFE,
2601 ixgbe_sfp_type_unknown = 0xFFFF 2610 ixgbe_sfp_type_unknown = 0xFFFF
2602}; 2611};
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index f69ec4288b1..0368160286f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -115,7 +115,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
115 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val); 115 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
116} 116}
117 117
118/* 118/**
119 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors 119 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
120 * @adapter: pointer to adapter struct 120 * @adapter: pointer to adapter struct
121 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 121 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
@@ -1942,8 +1942,8 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1942 } 1942 }
1943} 1943}
1944 1944
1945/* 1945/**
1946 * ixgbevf_set_num_queues: Allocate queues for device, feature dependent 1946 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
1947 * @adapter: board private structure to initialize 1947 * @adapter: board private structure to initialize
1948 * 1948 *
1949 * This is the top level queue allocation routine. The order here is very 1949 * This is the top level queue allocation routine. The order here is very
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 5dc9cbd5151..9fa39ebf545 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -149,7 +149,6 @@ ltq_etop_hw_receive(struct ltq_etop_chan *ch)
149 spin_unlock_irqrestore(&priv->lock, flags); 149 spin_unlock_irqrestore(&priv->lock, flags);
150 150
151 skb_put(skb, len); 151 skb_put(skb, len);
152 skb->dev = ch->netdev;
153 skb->protocol = eth_type_trans(skb, ch->netdev); 152 skb->protocol = eth_type_trans(skb, ch->netdev);
154 netif_receive_skb(skb); 153 netif_receive_skb(skb);
155} 154}
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index f0f06b2bc28..770ee557924 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1896,7 +1896,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1896 goto out_free; 1896 goto out_free;
1897 } 1897 }
1898 1898
1899 rx_desc = (struct rx_desc *)rxq->rx_desc_area; 1899 rx_desc = rxq->rx_desc_area;
1900 for (i = 0; i < rxq->rx_ring_size; i++) { 1900 for (i = 0; i < rxq->rx_ring_size; i++) {
1901 int nexti; 1901 int nexti;
1902 1902
@@ -2001,7 +2001,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
2001 2001
2002 txq->tx_desc_area_size = size; 2002 txq->tx_desc_area_size = size;
2003 2003
2004 tx_desc = (struct tx_desc *)txq->tx_desc_area; 2004 tx_desc = txq->tx_desc_area;
2005 for (i = 0; i < txq->tx_ring_size; i++) { 2005 for (i = 0; i < txq->tx_ring_size; i++) {
2006 struct tx_desc *txd = tx_desc + i; 2006 struct tx_desc *txd = tx_desc + i;
2007 int nexti; 2007 int nexti;
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 1db023b075a..59489722e89 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1032,7 +1032,7 @@ static int rxq_init(struct net_device *dev)
1032 } 1032 }
1033 memset((void *)pep->p_rx_desc_area, 0, size); 1033 memset((void *)pep->p_rx_desc_area, 0, size);
1034 /* initialize the next_desc_ptr links in the Rx descriptors ring */ 1034 /* initialize the next_desc_ptr links in the Rx descriptors ring */
1035 p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area; 1035 p_rx_desc = pep->p_rx_desc_area;
1036 for (i = 0; i < rx_desc_num; i++) { 1036 for (i = 0; i < rx_desc_num; i++) {
1037 p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma + 1037 p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
1038 ((i + 1) % rx_desc_num) * sizeof(struct rx_desc); 1038 ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
@@ -1095,7 +1095,7 @@ static int txq_init(struct net_device *dev)
1095 } 1095 }
1096 memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size); 1096 memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
1097 /* Initialize the next_desc_ptr links in the Tx descriptors ring */ 1097 /* Initialize the next_desc_ptr links in the Tx descriptors ring */
1098 p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area; 1098 p_tx_desc = pep->p_tx_desc_area;
1099 for (i = 0; i < tx_desc_num; i++) { 1099 for (i = 0; i < tx_desc_num; i++) {
1100 p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma + 1100 p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
1101 ((i + 1) % tx_desc_num) * sizeof(struct tx_desc); 1101 ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 28a54451a3e..2b0748dba8b 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -141,6 +141,7 @@ static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
141 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */ 141 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
142 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */ 142 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
143 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */ 143 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
144 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */
144 { 0 } 145 { 0 }
145}; 146};
146 147
@@ -3079,8 +3080,10 @@ static irqreturn_t sky2_intr(int irq, void *dev_id)
3079 3080
3080 /* Reading this mask interrupts as side effect */ 3081 /* Reading this mask interrupts as side effect */
3081 status = sky2_read32(hw, B0_Y2_SP_ISRC2); 3082 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
3082 if (status == 0 || status == ~0) 3083 if (status == 0 || status == ~0) {
3084 sky2_write32(hw, B0_Y2_SP_ICR, 2);
3083 return IRQ_NONE; 3085 return IRQ_NONE;
3086 }
3084 3087
3085 prefetch(&hw->st_le[hw->st_idx]); 3088 prefetch(&hw->st_le[hw->st_idx]);
3086 3089
@@ -3349,6 +3352,17 @@ static void sky2_reset(struct sky2_hw *hw)
3349 sky2_pci_write16(hw, pdev->pcie_cap + PCI_EXP_LNKCTL, 3352 sky2_pci_write16(hw, pdev->pcie_cap + PCI_EXP_LNKCTL,
3350 reg); 3353 reg);
3351 3354
3355 if (hw->chip_id == CHIP_ID_YUKON_PRM &&
3356 hw->chip_rev == CHIP_REV_YU_PRM_A0) {
3357 /* change PHY Interrupt polarity to low active */
3358 reg = sky2_read16(hw, GPHY_CTRL);
3359 sky2_write16(hw, GPHY_CTRL, reg | GPC_INTPOL);
3360
3361 /* adapt HW for low active PHY Interrupt */
3362 reg = sky2_read16(hw, Y2_CFG_SPC + PCI_LDO_CTRL);
3363 sky2_write16(hw, Y2_CFG_SPC + PCI_LDO_CTRL, reg | PHY_M_UNDOC1);
3364 }
3365
3352 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3366 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3353 3367
3354 /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ 3368 /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
@@ -4871,7 +4885,7 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
4871 "UL 2", /* 0xba */ 4885 "UL 2", /* 0xba */
4872 "Unknown", /* 0xbb */ 4886 "Unknown", /* 0xbb */
4873 "Optima", /* 0xbc */ 4887 "Optima", /* 0xbc */
4874 "Optima Prime", /* 0xbd */ 4888 "OptimaEEE", /* 0xbd */
4875 "Optima 2", /* 0xbe */ 4889 "Optima 2", /* 0xbe */
4876 }; 4890 };
4877 4891
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index 3c896ce80b7..615ac63ea86 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -23,6 +23,7 @@ enum {
23 PSM_CONFIG_REG3 = 0x164, 23 PSM_CONFIG_REG3 = 0x164,
24 PSM_CONFIG_REG4 = 0x168, 24 PSM_CONFIG_REG4 = 0x168,
25 25
26 PCI_LDO_CTRL = 0xbc,
26}; 27};
27 28
28/* Yukon-2 */ 29/* Yukon-2 */
@@ -586,6 +587,10 @@ enum yukon_supr_rev {
586 CHIP_REV_YU_SU_B1 = 3, 587 CHIP_REV_YU_SU_B1 = 3,
587}; 588};
588 589
590enum yukon_prm_rev {
591 CHIP_REV_YU_PRM_Z1 = 1,
592 CHIP_REV_YU_PRM_A0 = 2,
593};
589 594
590/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */ 595/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */
591enum { 596enum {
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 842c8ce9494..7e94987d030 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1080,6 +1080,25 @@ static struct mlx4_cmd_info cmd_info[] = {
1080 .verify = NULL, 1080 .verify = NULL,
1081 .wrapper = NULL 1081 .wrapper = NULL
1082 }, 1082 },
1083 /* flow steering commands */
1084 {
1085 .opcode = MLX4_QP_FLOW_STEERING_ATTACH,
1086 .has_inbox = true,
1087 .has_outbox = false,
1088 .out_is_imm = true,
1089 .encode_slave_id = false,
1090 .verify = NULL,
1091 .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1092 },
1093 {
1094 .opcode = MLX4_QP_FLOW_STEERING_DETACH,
1095 .has_inbox = false,
1096 .has_outbox = false,
1097 .out_is_imm = false,
1098 .encode_slave_id = false,
1099 .verify = NULL,
1100 .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
1101 },
1083}; 1102};
1084 1103
1085static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, 1104static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 72901ce2b08..dd6a77b2114 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -38,6 +38,10 @@
38#include "mlx4_en.h" 38#include "mlx4_en.h"
39#include "en_port.h" 39#include "en_port.h"
40 40
41#define EN_ETHTOOL_QP_ATTACH (1ull << 63)
42#define EN_ETHTOOL_MAC_MASK 0xffffffffffffULL
43#define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
44#define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff)
41 45
42static void 46static void
43mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) 47mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
@@ -599,16 +603,369 @@ static int mlx4_en_set_rxfh_indir(struct net_device *dev,
599 return err; 603 return err;
600} 604}
601 605
606#define all_zeros_or_all_ones(field) \
607 ((field) == 0 || (field) == (__force typeof(field))-1)
608
609static int mlx4_en_validate_flow(struct net_device *dev,
610 struct ethtool_rxnfc *cmd)
611{
612 struct ethtool_usrip4_spec *l3_mask;
613 struct ethtool_tcpip4_spec *l4_mask;
614 struct ethhdr *eth_mask;
615 u64 full_mac = ~0ull;
616 u64 zero_mac = 0;
617
618 if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
619 return -EINVAL;
620
621 switch (cmd->fs.flow_type & ~FLOW_EXT) {
622 case TCP_V4_FLOW:
623 case UDP_V4_FLOW:
624 if (cmd->fs.m_u.tcp_ip4_spec.tos)
625 return -EINVAL;
626 l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
627 /* don't allow mask which isn't all 0 or 1 */
628 if (!all_zeros_or_all_ones(l4_mask->ip4src) ||
629 !all_zeros_or_all_ones(l4_mask->ip4dst) ||
630 !all_zeros_or_all_ones(l4_mask->psrc) ||
631 !all_zeros_or_all_ones(l4_mask->pdst))
632 return -EINVAL;
633 break;
634 case IP_USER_FLOW:
635 l3_mask = &cmd->fs.m_u.usr_ip4_spec;
636 if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
637 cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
638 (!l3_mask->ip4src && !l3_mask->ip4dst) ||
639 !all_zeros_or_all_ones(l3_mask->ip4src) ||
640 !all_zeros_or_all_ones(l3_mask->ip4dst))
641 return -EINVAL;
642 break;
643 case ETHER_FLOW:
644 eth_mask = &cmd->fs.m_u.ether_spec;
645 /* source mac mask must not be set */
646 if (memcmp(eth_mask->h_source, &zero_mac, ETH_ALEN))
647 return -EINVAL;
648
649 /* dest mac mask must be ff:ff:ff:ff:ff:ff */
650 if (memcmp(eth_mask->h_dest, &full_mac, ETH_ALEN))
651 return -EINVAL;
652
653 if (!all_zeros_or_all_ones(eth_mask->h_proto))
654 return -EINVAL;
655 break;
656 default:
657 return -EINVAL;
658 }
659
660 if ((cmd->fs.flow_type & FLOW_EXT)) {
661 if (cmd->fs.m_ext.vlan_etype ||
662 !(cmd->fs.m_ext.vlan_tci == 0 ||
663 cmd->fs.m_ext.vlan_tci == cpu_to_be16(0xfff)))
664 return -EINVAL;
665 }
666
667 return 0;
668}
669
670static int add_ip_rule(struct mlx4_en_priv *priv,
671 struct ethtool_rxnfc *cmd,
672 struct list_head *list_h)
673{
674 struct mlx4_spec_list *spec_l3;
675 struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
676
677 spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
678 if (!spec_l3) {
679 en_err(priv, "Fail to alloc ethtool rule.\n");
680 return -ENOMEM;
681 }
682
683 spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
684 spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
685 if (l3_mask->ip4src)
686 spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
687 spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst;
688 if (l3_mask->ip4dst)
689 spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
690 list_add_tail(&spec_l3->list, list_h);
691
692 return 0;
693}
694
695static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
696 struct ethtool_rxnfc *cmd,
697 struct list_head *list_h, int proto)
698{
699 struct mlx4_spec_list *spec_l3;
700 struct mlx4_spec_list *spec_l4;
701 struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
702
703 spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
704 spec_l4 = kzalloc(sizeof *spec_l4, GFP_KERNEL);
705 if (!spec_l4 || !spec_l3) {
706 en_err(priv, "Fail to alloc ethtool rule.\n");
707 kfree(spec_l3);
708 kfree(spec_l4);
709 return -ENOMEM;
710 }
711
712 spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
713
714 if (proto == TCP_V4_FLOW) {
715 spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
716 spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
717 spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
718 spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
719 spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
720 } else {
721 spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
722 spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
723 spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
724 spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc;
725 spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst;
726 }
727
728 if (l4_mask->ip4src)
729 spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
730 if (l4_mask->ip4dst)
731 spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
732
733 if (l4_mask->psrc)
734 spec_l4->tcp_udp.src_port_msk = EN_ETHTOOL_SHORT_MASK;
735 if (l4_mask->pdst)
736 spec_l4->tcp_udp.dst_port_msk = EN_ETHTOOL_SHORT_MASK;
737
738 list_add_tail(&spec_l3->list, list_h);
739 list_add_tail(&spec_l4->list, list_h);
740
741 return 0;
742}
743
744static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
745 struct ethtool_rxnfc *cmd,
746 struct list_head *rule_list_h)
747{
748 int err;
749 u64 mac;
750 __be64 be_mac;
751 struct ethhdr *eth_spec;
752 struct mlx4_en_priv *priv = netdev_priv(dev);
753 struct mlx4_spec_list *spec_l2;
754 __be64 mac_msk = cpu_to_be64(EN_ETHTOOL_MAC_MASK << 16);
755
756 err = mlx4_en_validate_flow(dev, cmd);
757 if (err)
758 return err;
759
760 spec_l2 = kzalloc(sizeof *spec_l2, GFP_KERNEL);
761 if (!spec_l2)
762 return -ENOMEM;
763
764 mac = priv->mac & EN_ETHTOOL_MAC_MASK;
765 be_mac = cpu_to_be64(mac << 16);
766
767 spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
768 memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
769 if ((cmd->fs.flow_type & ~FLOW_EXT) != ETHER_FLOW)
770 memcpy(spec_l2->eth.dst_mac, &be_mac, ETH_ALEN);
771
772 if ((cmd->fs.flow_type & FLOW_EXT) && cmd->fs.m_ext.vlan_tci) {
773 spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
774 spec_l2->eth.vlan_id_msk = cpu_to_be16(0xfff);
775 }
776
777 list_add_tail(&spec_l2->list, rule_list_h);
778
779 switch (cmd->fs.flow_type & ~FLOW_EXT) {
780 case ETHER_FLOW:
781 eth_spec = &cmd->fs.h_u.ether_spec;
782 memcpy(&spec_l2->eth.dst_mac, eth_spec->h_dest, ETH_ALEN);
783 spec_l2->eth.ether_type = eth_spec->h_proto;
784 if (eth_spec->h_proto)
785 spec_l2->eth.ether_type_enable = 1;
786 break;
787 case IP_USER_FLOW:
788 err = add_ip_rule(priv, cmd, rule_list_h);
789 break;
790 case TCP_V4_FLOW:
791 err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW);
792 break;
793 case UDP_V4_FLOW:
794 err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW);
795 break;
796 }
797
798 return err;
799}
800
801static int mlx4_en_flow_replace(struct net_device *dev,
802 struct ethtool_rxnfc *cmd)
803{
804 int err;
805 struct mlx4_en_priv *priv = netdev_priv(dev);
806 struct ethtool_flow_id *loc_rule;
807 struct mlx4_spec_list *spec, *tmp_spec;
808 u32 qpn;
809 u64 reg_id;
810
811 struct mlx4_net_trans_rule rule = {
812 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
813 .exclusive = 0,
814 .allow_loopback = 1,
815 .promisc_mode = MLX4_FS_PROMISC_NONE,
816 };
817
818 rule.port = priv->port;
819 rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location;
820 INIT_LIST_HEAD(&rule.list);
821
822 /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */
823 if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC)
824 qpn = priv->drop_qp.qpn;
825 else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
826 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
827 } else {
828 if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
829 en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n",
830 cmd->fs.ring_cookie);
831 return -EINVAL;
832 }
833 qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
834 if (!qpn) {
835 en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n",
836 cmd->fs.ring_cookie);
837 return -EINVAL;
838 }
839 }
840 rule.qpn = qpn;
841 err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list);
842 if (err)
843 goto out_free_list;
844
845 loc_rule = &priv->ethtool_rules[cmd->fs.location];
846 if (loc_rule->id) {
847 err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id);
848 if (err) {
849 en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n",
850 cmd->fs.location, loc_rule->id);
851 goto out_free_list;
852 }
853 loc_rule->id = 0;
854 memset(&loc_rule->flow_spec, 0,
855 sizeof(struct ethtool_rx_flow_spec));
856 }
857 err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
858 if (err) {
859 en_err(priv, "Fail to attach network rule at location %d.\n",
860 cmd->fs.location);
861 goto out_free_list;
862 }
863 loc_rule->id = reg_id;
864 memcpy(&loc_rule->flow_spec, &cmd->fs,
865 sizeof(struct ethtool_rx_flow_spec));
866
867out_free_list:
868 list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
869 list_del(&spec->list);
870 kfree(spec);
871 }
872 return err;
873}
874
875static int mlx4_en_flow_detach(struct net_device *dev,
876 struct ethtool_rxnfc *cmd)
877{
878 int err = 0;
879 struct ethtool_flow_id *rule;
880 struct mlx4_en_priv *priv = netdev_priv(dev);
881
882 if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
883 return -EINVAL;
884
885 rule = &priv->ethtool_rules[cmd->fs.location];
886 if (!rule->id) {
887 err = -ENOENT;
888 goto out;
889 }
890
891 err = mlx4_flow_detach(priv->mdev->dev, rule->id);
892 if (err) {
893 en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n",
894 cmd->fs.location, rule->id);
895 goto out;
896 }
897 rule->id = 0;
898 memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
899out:
900 return err;
901
902}
903
904static int mlx4_en_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
905 int loc)
906{
907 int err = 0;
908 struct ethtool_flow_id *rule;
909 struct mlx4_en_priv *priv = netdev_priv(dev);
910
911 if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
912 return -EINVAL;
913
914 rule = &priv->ethtool_rules[loc];
915 if (rule->id)
916 memcpy(&cmd->fs, &rule->flow_spec,
917 sizeof(struct ethtool_rx_flow_spec));
918 else
919 err = -ENOENT;
920
921 return err;
922}
923
924static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
925{
926
927 int i, res = 0;
928 for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
929 if (priv->ethtool_rules[i].id)
930 res++;
931 }
932 return res;
933
934}
935
602static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 936static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
603 u32 *rule_locs) 937 u32 *rule_locs)
604{ 938{
605 struct mlx4_en_priv *priv = netdev_priv(dev); 939 struct mlx4_en_priv *priv = netdev_priv(dev);
940 struct mlx4_en_dev *mdev = priv->mdev;
606 int err = 0; 941 int err = 0;
942 int i = 0, priority = 0;
943
944 if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
945 cmd->cmd == ETHTOOL_GRXCLSRULE ||
946 cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
947 mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
948 return -EINVAL;
607 949
608 switch (cmd->cmd) { 950 switch (cmd->cmd) {
609 case ETHTOOL_GRXRINGS: 951 case ETHTOOL_GRXRINGS:
610 cmd->data = priv->rx_ring_num; 952 cmd->data = priv->rx_ring_num;
611 break; 953 break;
954 case ETHTOOL_GRXCLSRLCNT:
955 cmd->rule_cnt = mlx4_en_get_num_flows(priv);
956 break;
957 case ETHTOOL_GRXCLSRULE:
958 err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
959 break;
960 case ETHTOOL_GRXCLSRLALL:
961 while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
962 err = mlx4_en_get_flow(dev, cmd, i);
963 if (!err)
964 rule_locs[priority++] = i;
965 i++;
966 }
967 err = 0;
968 break;
612 default: 969 default:
613 err = -EOPNOTSUPP; 970 err = -EOPNOTSUPP;
614 break; 971 break;
@@ -617,6 +974,30 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
617 return err; 974 return err;
618} 975}
619 976
977static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
978{
979 int err = 0;
980 struct mlx4_en_priv *priv = netdev_priv(dev);
981 struct mlx4_en_dev *mdev = priv->mdev;
982
983 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
984 return -EINVAL;
985
986 switch (cmd->cmd) {
987 case ETHTOOL_SRXCLSRLINS:
988 err = mlx4_en_flow_replace(dev, cmd);
989 break;
990 case ETHTOOL_SRXCLSRLDEL:
991 err = mlx4_en_flow_detach(dev, cmd);
992 break;
993 default:
994 en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd);
995 return -EINVAL;
996 }
997
998 return err;
999}
1000
620const struct ethtool_ops mlx4_en_ethtool_ops = { 1001const struct ethtool_ops mlx4_en_ethtool_ops = {
621 .get_drvinfo = mlx4_en_get_drvinfo, 1002 .get_drvinfo = mlx4_en_get_drvinfo,
622 .get_settings = mlx4_en_get_settings, 1003 .get_settings = mlx4_en_get_settings,
@@ -637,6 +1018,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
637 .get_ringparam = mlx4_en_get_ringparam, 1018 .get_ringparam = mlx4_en_get_ringparam,
638 .set_ringparam = mlx4_en_set_ringparam, 1019 .set_ringparam = mlx4_en_set_ringparam,
639 .get_rxnfc = mlx4_en_get_rxnfc, 1020 .get_rxnfc = mlx4_en_get_rxnfc,
1021 .set_rxnfc = mlx4_en_set_rxnfc,
640 .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size, 1022 .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
641 .get_rxfh_indir = mlx4_en_get_rxfh_indir, 1023 .get_rxfh_indir = mlx4_en_get_rxfh_indir,
642 .set_rxfh_indir = mlx4_en_set_rxfh_indir, 1024 .set_rxfh_indir = mlx4_en_set_rxfh_indir,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 073b85b45fc..94375a8c6d4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -170,33 +170,81 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
170static void mlx4_en_clear_list(struct net_device *dev) 170static void mlx4_en_clear_list(struct net_device *dev)
171{ 171{
172 struct mlx4_en_priv *priv = netdev_priv(dev); 172 struct mlx4_en_priv *priv = netdev_priv(dev);
173 struct mlx4_en_mc_list *tmp, *mc_to_del;
173 174
174 kfree(priv->mc_addrs); 175 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
175 priv->mc_addrs = NULL; 176 list_del(&mc_to_del->list);
176 priv->mc_addrs_cnt = 0; 177 kfree(mc_to_del);
178 }
177} 179}
178 180
179static void mlx4_en_cache_mclist(struct net_device *dev) 181static void mlx4_en_cache_mclist(struct net_device *dev)
180{ 182{
181 struct mlx4_en_priv *priv = netdev_priv(dev); 183 struct mlx4_en_priv *priv = netdev_priv(dev);
182 struct netdev_hw_addr *ha; 184 struct netdev_hw_addr *ha;
183 char *mc_addrs; 185 struct mlx4_en_mc_list *tmp;
184 int mc_addrs_cnt = netdev_mc_count(dev);
185 int i;
186 186
187 mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC);
188 if (!mc_addrs) {
189 en_err(priv, "failed to allocate multicast list\n");
190 return;
191 }
192 i = 0;
193 netdev_for_each_mc_addr(ha, dev)
194 memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
195 mlx4_en_clear_list(dev); 187 mlx4_en_clear_list(dev);
196 priv->mc_addrs = mc_addrs; 188 netdev_for_each_mc_addr(ha, dev) {
197 priv->mc_addrs_cnt = mc_addrs_cnt; 189 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
190 if (!tmp) {
191 en_err(priv, "failed to allocate multicast list\n");
192 mlx4_en_clear_list(dev);
193 return;
194 }
195 memcpy(tmp->addr, ha->addr, ETH_ALEN);
196 list_add_tail(&tmp->list, &priv->mc_list);
197 }
198} 198}
199 199
200static void update_mclist_flags(struct mlx4_en_priv *priv,
201 struct list_head *dst,
202 struct list_head *src)
203{
204 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
205 bool found;
206
207 /* Find all the entries that should be removed from dst,
208 * These are the entries that are not found in src
209 */
210 list_for_each_entry(dst_tmp, dst, list) {
211 found = false;
212 list_for_each_entry(src_tmp, src, list) {
213 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
214 found = true;
215 break;
216 }
217 }
218 if (!found)
219 dst_tmp->action = MCLIST_REM;
220 }
221
222 /* Add entries that exist in src but not in dst
223 * mark them as need to add
224 */
225 list_for_each_entry(src_tmp, src, list) {
226 found = false;
227 list_for_each_entry(dst_tmp, dst, list) {
228 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
229 dst_tmp->action = MCLIST_NONE;
230 found = true;
231 break;
232 }
233 }
234 if (!found) {
235 new_mc = kmalloc(sizeof(struct mlx4_en_mc_list),
236 GFP_KERNEL);
237 if (!new_mc) {
238 en_err(priv, "Failed to allocate current multicast list\n");
239 return;
240 }
241 memcpy(new_mc, src_tmp,
242 sizeof(struct mlx4_en_mc_list));
243 new_mc->action = MCLIST_ADD;
244 list_add_tail(&new_mc->list, dst);
245 }
246 }
247}
200 248
201static void mlx4_en_set_multicast(struct net_device *dev) 249static void mlx4_en_set_multicast(struct net_device *dev)
202{ 250{
@@ -214,9 +262,10 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
214 mcast_task); 262 mcast_task);
215 struct mlx4_en_dev *mdev = priv->mdev; 263 struct mlx4_en_dev *mdev = priv->mdev;
216 struct net_device *dev = priv->dev; 264 struct net_device *dev = priv->dev;
265 struct mlx4_en_mc_list *mclist, *tmp;
217 u64 mcast_addr = 0; 266 u64 mcast_addr = 0;
218 u8 mc_list[16] = {0}; 267 u8 mc_list[16] = {0};
219 int err; 268 int err = 0;
220 269
221 mutex_lock(&mdev->state_lock); 270 mutex_lock(&mdev->state_lock);
222 if (!mdev->device_up) { 271 if (!mdev->device_up) {
@@ -251,16 +300,46 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
251 priv->flags |= MLX4_EN_FLAG_PROMISC; 300 priv->flags |= MLX4_EN_FLAG_PROMISC;
252 301
253 /* Enable promiscouos mode */ 302 /* Enable promiscouos mode */
254 if (!(mdev->dev->caps.flags & 303 switch (mdev->dev->caps.steering_mode) {
255 MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) 304 case MLX4_STEERING_MODE_DEVICE_MANAGED:
256 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 305 err = mlx4_flow_steer_promisc_add(mdev->dev,
257 priv->base_qpn, 1); 306 priv->port,
258 else 307 priv->base_qpn,
259 err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn, 308 MLX4_FS_PROMISC_UPLINK);
309 if (err)
310 en_err(priv, "Failed enabling promiscuous mode\n");
311 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
312 break;
313
314 case MLX4_STEERING_MODE_B0:
315 err = mlx4_unicast_promisc_add(mdev->dev,
316 priv->base_qpn,
260 priv->port); 317 priv->port);
261 if (err) 318 if (err)
262 en_err(priv, "Failed enabling " 319 en_err(priv, "Failed enabling unicast promiscuous mode\n");
263 "promiscuous mode\n"); 320
321 /* Add the default qp number as multicast
322 * promisc
323 */
324 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
325 err = mlx4_multicast_promisc_add(mdev->dev,
326 priv->base_qpn,
327 priv->port);
328 if (err)
329 en_err(priv, "Failed enabling multicast promiscuous mode\n");
330 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
331 }
332 break;
333
334 case MLX4_STEERING_MODE_A0:
335 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
336 priv->port,
337 priv->base_qpn,
338 1);
339 if (err)
340 en_err(priv, "Failed enabling promiscuous mode\n");
341 break;
342 }
264 343
265 /* Disable port multicast filter (unconditionally) */ 344 /* Disable port multicast filter (unconditionally) */
266 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 345 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
@@ -269,15 +348,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
269 en_err(priv, "Failed disabling " 348 en_err(priv, "Failed disabling "
270 "multicast filter\n"); 349 "multicast filter\n");
271 350
272 /* Add the default qp number as multicast promisc */
273 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
274 err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
275 priv->port);
276 if (err)
277 en_err(priv, "Failed entering multicast promisc mode\n");
278 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
279 }
280
281 /* Disable port VLAN filter */ 351 /* Disable port VLAN filter */
282 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 352 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
283 if (err) 353 if (err)
@@ -296,22 +366,40 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
296 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 366 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
297 367
298 /* Disable promiscouos mode */ 368 /* Disable promiscouos mode */
299 if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) 369 switch (mdev->dev->caps.steering_mode) {
300 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 370 case MLX4_STEERING_MODE_DEVICE_MANAGED:
301 priv->base_qpn, 0); 371 err = mlx4_flow_steer_promisc_remove(mdev->dev,
302 else 372 priv->port,
303 err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, 373 MLX4_FS_PROMISC_UPLINK);
374 if (err)
375 en_err(priv, "Failed disabling promiscuous mode\n");
376 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
377 break;
378
379 case MLX4_STEERING_MODE_B0:
380 err = mlx4_unicast_promisc_remove(mdev->dev,
381 priv->base_qpn,
304 priv->port); 382 priv->port);
305 if (err) 383 if (err)
306 en_err(priv, "Failed disabling promiscuous mode\n"); 384 en_err(priv, "Failed disabling unicast promiscuous mode\n");
385 /* Disable Multicast promisc */
386 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
387 err = mlx4_multicast_promisc_remove(mdev->dev,
388 priv->base_qpn,
389 priv->port);
390 if (err)
391 en_err(priv, "Failed disabling multicast promiscuous mode\n");
392 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
393 }
394 break;
307 395
308 /* Disable Multicast promisc */ 396 case MLX4_STEERING_MODE_A0:
309 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 397 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
310 err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 398 priv->port,
311 priv->port); 399 priv->base_qpn, 0);
312 if (err) 400 if (err)
313 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 401 en_err(priv, "Failed disabling promiscuous mode\n");
314 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 402 break;
315 } 403 }
316 404
317 /* Enable port VLAN filter */ 405 /* Enable port VLAN filter */
@@ -329,18 +417,46 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
329 417
330 /* Add the default qp number as multicast promisc */ 418 /* Add the default qp number as multicast promisc */
331 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 419 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
332 err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, 420 switch (mdev->dev->caps.steering_mode) {
333 priv->port); 421 case MLX4_STEERING_MODE_DEVICE_MANAGED:
422 err = mlx4_flow_steer_promisc_add(mdev->dev,
423 priv->port,
424 priv->base_qpn,
425 MLX4_FS_PROMISC_ALL_MULTI);
426 break;
427
428 case MLX4_STEERING_MODE_B0:
429 err = mlx4_multicast_promisc_add(mdev->dev,
430 priv->base_qpn,
431 priv->port);
432 break;
433
434 case MLX4_STEERING_MODE_A0:
435 break;
436 }
334 if (err) 437 if (err)
335 en_err(priv, "Failed entering multicast promisc mode\n"); 438 en_err(priv, "Failed entering multicast promisc mode\n");
336 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 439 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
337 } 440 }
338 } else { 441 } else {
339 int i;
340 /* Disable Multicast promisc */ 442 /* Disable Multicast promisc */
341 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 443 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
342 err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 444 switch (mdev->dev->caps.steering_mode) {
343 priv->port); 445 case MLX4_STEERING_MODE_DEVICE_MANAGED:
446 err = mlx4_flow_steer_promisc_remove(mdev->dev,
447 priv->port,
448 MLX4_FS_PROMISC_ALL_MULTI);
449 break;
450
451 case MLX4_STEERING_MODE_B0:
452 err = mlx4_multicast_promisc_remove(mdev->dev,
453 priv->base_qpn,
454 priv->port);
455 break;
456
457 case MLX4_STEERING_MODE_A0:
458 break;
459 }
344 if (err) 460 if (err)
345 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 461 en_err(priv, "Failed disabling multicast promiscuous mode\n");
346 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 462 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
@@ -351,13 +467,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
351 if (err) 467 if (err)
352 en_err(priv, "Failed disabling multicast filter\n"); 468 en_err(priv, "Failed disabling multicast filter\n");
353 469
354 /* Detach our qp from all the multicast addresses */
355 for (i = 0; i < priv->mc_addrs_cnt; i++) {
356 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
357 mc_list[5] = priv->port;
358 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
359 mc_list, MLX4_PROT_ETH);
360 }
361 /* Flush mcast filter and init it with broadcast address */ 470 /* Flush mcast filter and init it with broadcast address */
362 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 471 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
363 1, MLX4_MCAST_CONFIG); 472 1, MLX4_MCAST_CONFIG);
@@ -367,13 +476,8 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
367 netif_tx_lock_bh(dev); 476 netif_tx_lock_bh(dev);
368 mlx4_en_cache_mclist(dev); 477 mlx4_en_cache_mclist(dev);
369 netif_tx_unlock_bh(dev); 478 netif_tx_unlock_bh(dev);
370 for (i = 0; i < priv->mc_addrs_cnt; i++) { 479 list_for_each_entry(mclist, &priv->mc_list, list) {
371 mcast_addr = 480 mcast_addr = mlx4_en_mac_to_u64(mclist->addr);
372 mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
373 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
374 mc_list[5] = priv->port;
375 mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp,
376 mc_list, 0, MLX4_PROT_ETH);
377 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 481 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
378 mcast_addr, 0, MLX4_MCAST_CONFIG); 482 mcast_addr, 0, MLX4_MCAST_CONFIG);
379 } 483 }
@@ -381,6 +485,42 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
381 0, MLX4_MCAST_ENABLE); 485 0, MLX4_MCAST_ENABLE);
382 if (err) 486 if (err)
383 en_err(priv, "Failed enabling multicast filter\n"); 487 en_err(priv, "Failed enabling multicast filter\n");
488
489 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
490 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
491 if (mclist->action == MCLIST_REM) {
492 /* detach this address and delete from list */
493 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
494 mc_list[5] = priv->port;
495 err = mlx4_multicast_detach(mdev->dev,
496 &priv->rss_map.indir_qp,
497 mc_list,
498 MLX4_PROT_ETH,
499 mclist->reg_id);
500 if (err)
501 en_err(priv, "Fail to detach multicast address\n");
502
503 /* remove from list */
504 list_del(&mclist->list);
505 kfree(mclist);
506 }
507
508 if (mclist->action == MCLIST_ADD) {
509 /* attach the address */
510 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
511 /* needed for B0 steering support */
512 mc_list[5] = priv->port;
513 err = mlx4_multicast_attach(mdev->dev,
514 &priv->rss_map.indir_qp,
515 mc_list,
516 priv->port, 0,
517 MLX4_PROT_ETH,
518 &mclist->reg_id);
519 if (err)
520 en_err(priv, "Fail to attach multicast address\n");
521
522 }
523 }
384 } 524 }
385out: 525out:
386 mutex_unlock(&mdev->state_lock); 526 mutex_unlock(&mdev->state_lock);
@@ -605,6 +745,9 @@ int mlx4_en_start_port(struct net_device *dev)
605 return 0; 745 return 0;
606 } 746 }
607 747
748 INIT_LIST_HEAD(&priv->mc_list);
749 INIT_LIST_HEAD(&priv->curr_list);
750
608 /* Calculate Rx buf size */ 751 /* Calculate Rx buf size */
609 dev->mtu = min(dev->mtu, priv->max_mtu); 752 dev->mtu = min(dev->mtu, priv->max_mtu);
610 mlx4_en_calc_rx_buf(dev); 753 mlx4_en_calc_rx_buf(dev);
@@ -653,6 +796,10 @@ int mlx4_en_start_port(struct net_device *dev)
653 goto mac_err; 796 goto mac_err;
654 } 797 }
655 798
799 err = mlx4_en_create_drop_qp(priv);
800 if (err)
801 goto rss_err;
802
656 /* Configure tx cq's and rings */ 803 /* Configure tx cq's and rings */
657 for (i = 0; i < priv->tx_ring_num; i++) { 804 for (i = 0; i < priv->tx_ring_num; i++) {
658 /* Configure cq */ 805 /* Configure cq */
@@ -720,13 +867,23 @@ int mlx4_en_start_port(struct net_device *dev)
720 867
721 /* Attach rx QP to bradcast address */ 868 /* Attach rx QP to bradcast address */
722 memset(&mc_list[10], 0xff, ETH_ALEN); 869 memset(&mc_list[10], 0xff, ETH_ALEN);
723 mc_list[5] = priv->port; 870 mc_list[5] = priv->port; /* needed for B0 steering support */
724 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 871 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
725 0, MLX4_PROT_ETH)) 872 priv->port, 0, MLX4_PROT_ETH,
873 &priv->broadcast_id))
726 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 874 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
727 875
728 /* Must redo promiscuous mode setup. */ 876 /* Must redo promiscuous mode setup. */
729 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); 877 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
878 if (mdev->dev->caps.steering_mode ==
879 MLX4_STEERING_MODE_DEVICE_MANAGED) {
880 mlx4_flow_steer_promisc_remove(mdev->dev,
881 priv->port,
882 MLX4_FS_PROMISC_UPLINK);
883 mlx4_flow_steer_promisc_remove(mdev->dev,
884 priv->port,
885 MLX4_FS_PROMISC_ALL_MULTI);
886 }
730 887
731 /* Schedule multicast task to populate multicast list */ 888 /* Schedule multicast task to populate multicast list */
732 queue_work(mdev->workqueue, &priv->mcast_task); 889 queue_work(mdev->workqueue, &priv->mcast_task);
@@ -742,7 +899,8 @@ tx_err:
742 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); 899 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
743 mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]); 900 mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
744 } 901 }
745 902 mlx4_en_destroy_drop_qp(priv);
903rss_err:
746 mlx4_en_release_rss_steer(priv); 904 mlx4_en_release_rss_steer(priv);
747mac_err: 905mac_err:
748 mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn); 906 mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
@@ -760,6 +918,7 @@ void mlx4_en_stop_port(struct net_device *dev)
760{ 918{
761 struct mlx4_en_priv *priv = netdev_priv(dev); 919 struct mlx4_en_priv *priv = netdev_priv(dev);
762 struct mlx4_en_dev *mdev = priv->mdev; 920 struct mlx4_en_dev *mdev = priv->mdev;
921 struct mlx4_en_mc_list *mclist, *tmp;
763 int i; 922 int i;
764 u8 mc_list[16] = {0}; 923 u8 mc_list[16] = {0};
765 924
@@ -778,19 +937,26 @@ void mlx4_en_stop_port(struct net_device *dev)
778 937
779 /* Detach All multicasts */ 938 /* Detach All multicasts */
780 memset(&mc_list[10], 0xff, ETH_ALEN); 939 memset(&mc_list[10], 0xff, ETH_ALEN);
781 mc_list[5] = priv->port; 940 mc_list[5] = priv->port; /* needed for B0 steering support */
782 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 941 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
783 MLX4_PROT_ETH); 942 MLX4_PROT_ETH, priv->broadcast_id);
784 for (i = 0; i < priv->mc_addrs_cnt; i++) { 943 list_for_each_entry(mclist, &priv->curr_list, list) {
785 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); 944 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
786 mc_list[5] = priv->port; 945 mc_list[5] = priv->port;
787 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 946 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
788 mc_list, MLX4_PROT_ETH); 947 mc_list, MLX4_PROT_ETH, mclist->reg_id);
789 } 948 }
790 mlx4_en_clear_list(dev); 949 mlx4_en_clear_list(dev);
950 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
951 list_del(&mclist->list);
952 kfree(mclist);
953 }
954
791 /* Flush multicast filter */ 955 /* Flush multicast filter */
792 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 956 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
793 957
958 mlx4_en_destroy_drop_qp(priv);
959
794 /* Free TX Rings */ 960 /* Free TX Rings */
795 for (i = 0; i < priv->tx_ring_num; i++) { 961 for (i = 0; i < priv->tx_ring_num; i++) {
796 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]); 962 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index d49a7ac3187..a04cbf767eb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -844,6 +844,36 @@ out:
844 return err; 844 return err;
845} 845}
846 846
847int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
848{
849 int err;
850 u32 qpn;
851
852 err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn);
853 if (err) {
854 en_err(priv, "Failed reserving drop qpn\n");
855 return err;
856 }
857 err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
858 if (err) {
859 en_err(priv, "Failed allocating drop qp\n");
860 mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
861 return err;
862 }
863
864 return 0;
865}
866
867void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv)
868{
869 u32 qpn;
870
871 qpn = priv->drop_qp.qpn;
872 mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp);
873 mlx4_qp_free(priv->mdev->dev, &priv->drop_qp);
874 mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
875}
876
847/* Allocate rx qp's and configure them according to rss map */ 877/* Allocate rx qp's and configure them according to rss map */
848int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) 878int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
849{ 879{
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 9c83bb8151e..1d70657058a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -123,7 +123,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
123 static const char * const fname[] = { 123 static const char * const fname[] = {
124 [0] = "RSS support", 124 [0] = "RSS support",
125 [1] = "RSS Toeplitz Hash Function support", 125 [1] = "RSS Toeplitz Hash Function support",
126 [2] = "RSS XOR Hash Function support" 126 [2] = "RSS XOR Hash Function support",
127 [3] = "Device manage flow steering support"
127 }; 128 };
128 int i; 129 int i;
129 130
@@ -391,6 +392,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
391#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66 392#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
392#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 393#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
393#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 394#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
395#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
396#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
394#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 397#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
395#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 398#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
396#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 399#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
@@ -474,6 +477,12 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
474 dev_cap->num_ports = field & 0xf; 477 dev_cap->num_ports = field & 0xf;
475 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET); 478 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
476 dev_cap->max_msg_sz = 1 << (field & 0x1f); 479 dev_cap->max_msg_sz = 1 << (field & 0x1f);
480 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
481 if (field & 0x80)
482 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
483 dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
484 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
485 dev_cap->fs_max_num_qp_per_entry = field;
477 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); 486 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
478 dev_cap->stat_rate_support = stat_rate; 487 dev_cap->stat_rate_support = stat_rate;
479 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 488 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
@@ -1061,6 +1070,15 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1061#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) 1070#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
1062#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) 1071#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
1063#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) 1072#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1073#define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
1074#define INIT_HCA_FS_PARAM_OFFSET 0x1d0
1075#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1076#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
1077#define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1078#define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1079#define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
1080#define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
1081#define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1064#define INIT_HCA_TPT_OFFSET 0x0f0 1082#define INIT_HCA_TPT_OFFSET 0x0f0
1065#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) 1083#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
1066#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) 1084#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
@@ -1119,14 +1137,44 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1119 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET); 1137 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
1120 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET); 1138 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
1121 1139
1122 /* multicast attributes */ 1140 /* steering attributes */
1123 1141 if (dev->caps.steering_mode ==
1124 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); 1142 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1125 MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 1143 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |=
1126 MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 1144 cpu_to_be32(1 <<
1127 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 1145 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN);
1128 MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET); 1146
1129 MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 1147 MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET);
1148 MLX4_PUT(inbox, param->log_mc_entry_sz,
1149 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1150 MLX4_PUT(inbox, param->log_mc_table_sz,
1151 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1152 /* Enable Ethernet flow steering
1153 * with udp unicast and tcp unicast
1154 */
1155 MLX4_PUT(inbox, param->fs_hash_enable_bits,
1156 INIT_HCA_FS_ETH_BITS_OFFSET);
1157 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1158 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
1159 /* Enable IPoIB flow steering
1160 * with udp unicast and tcp unicast
1161 */
1162 MLX4_PUT(inbox, param->fs_hash_enable_bits,
1163 INIT_HCA_FS_IB_BITS_OFFSET);
1164 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1165 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
1166 } else {
1167 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
1168 MLX4_PUT(inbox, param->log_mc_entry_sz,
1169 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1170 MLX4_PUT(inbox, param->log_mc_hash_sz,
1171 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1172 MLX4_PUT(inbox, param->log_mc_table_sz,
1173 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1174 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0)
1175 MLX4_PUT(inbox, (u8) (1 << 3),
1176 INIT_HCA_UC_STEERING_OFFSET);
1177 }
1130 1178
1131 /* TPT attributes */ 1179 /* TPT attributes */
1132 1180
@@ -1188,15 +1236,24 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1188 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); 1236 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
1189 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); 1237 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
1190 1238
1191 /* multicast attributes */ 1239 /* steering attributes */
1240 if (dev->caps.steering_mode ==
1241 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1192 1242
1193 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); 1243 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
1194 MLX4_GET(param->log_mc_entry_sz, outbox, 1244 MLX4_GET(param->log_mc_entry_sz, outbox,
1195 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 1245 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1196 MLX4_GET(param->log_mc_hash_sz, outbox, 1246 MLX4_GET(param->log_mc_table_sz, outbox,
1197 INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 1247 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1198 MLX4_GET(param->log_mc_table_sz, outbox, 1248 } else {
1199 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 1249 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
1250 MLX4_GET(param->log_mc_entry_sz, outbox,
1251 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1252 MLX4_GET(param->log_mc_hash_sz, outbox,
1253 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1254 MLX4_GET(param->log_mc_table_sz, outbox,
1255 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1256 }
1200 1257
1201 /* TPT attributes */ 1258 /* TPT attributes */
1202 1259
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 64c0399e4b7..83fcbbf1b16 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -78,6 +78,8 @@ struct mlx4_dev_cap {
78 u16 wavelength[MLX4_MAX_PORTS + 1]; 78 u16 wavelength[MLX4_MAX_PORTS + 1];
79 u64 trans_code[MLX4_MAX_PORTS + 1]; 79 u64 trans_code[MLX4_MAX_PORTS + 1];
80 u16 stat_rate_support; 80 u16 stat_rate_support;
81 int fs_log_max_ucast_qp_range_size;
82 int fs_max_num_qp_per_entry;
81 u64 flags; 83 u64 flags;
82 u64 flags2; 84 u64 flags2;
83 int reserved_uars; 85 int reserved_uars;
@@ -165,6 +167,7 @@ struct mlx4_init_hca_param {
165 u8 log_mpt_sz; 167 u8 log_mpt_sz;
166 u8 log_uar_sz; 168 u8 log_uar_sz;
167 u8 uar_page_sz; /* log pg sz in 4k chunks */ 169 u8 uar_page_sz; /* log pg sz in 4k chunks */
170 u8 fs_hash_enable_bits;
168}; 171};
169 172
170struct mlx4_init_ib_param { 173struct mlx4_init_ib_param {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index a0313de122d..42645166bae 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -41,6 +41,7 @@
41#include <linux/slab.h> 41#include <linux/slab.h>
42#include <linux/io-mapping.h> 42#include <linux/io-mapping.h>
43#include <linux/delay.h> 43#include <linux/delay.h>
44#include <linux/netdevice.h>
44 45
45#include <linux/mlx4/device.h> 46#include <linux/mlx4/device.h>
46#include <linux/mlx4/doorbell.h> 47#include <linux/mlx4/doorbell.h>
@@ -90,7 +91,9 @@ module_param_named(log_num_mgm_entry_size,
90MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 91MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
91 " of qp per mcg, for example:" 92 " of qp per mcg, for example:"
92 " 10 gives 248.range: 9<=" 93 " 10 gives 248.range: 9<="
93 " log_num_mgm_entry_size <= 12"); 94 " log_num_mgm_entry_size <= 12."
95 " Not in use with device managed"
96 " flow steering");
94 97
95#define MLX4_VF (1 << 0) 98#define MLX4_VF (1 << 0)
96 99
@@ -243,7 +246,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
243 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 246 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
244 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 247 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
245 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 248 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
246 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
247 /* 249 /*
248 * Subtract 1 from the limit because we need to allocate a 250 * Subtract 1 from the limit because we need to allocate a
249 * spare CQE so the HCA HW can tell the difference between an 251 * spare CQE so the HCA HW can tell the difference between an
@@ -274,6 +276,28 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
274 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 276 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
275 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 277 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
276 278
279 if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
280 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
281 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
282 dev->caps.fs_log_max_ucast_qp_range_size =
283 dev_cap->fs_log_max_ucast_qp_range_size;
284 } else {
285 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
286 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) {
287 dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
288 } else {
289 dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
290
291 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
292 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
293 mlx4_warn(dev, "Must have UC_STEER and MC_STEER flags "
294 "set to use B0 steering. Falling back to A0 steering mode.\n");
295 }
296 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
297 }
298 mlx4_dbg(dev, "Steering mode is: %s\n",
299 mlx4_steering_mode_str(dev->caps.steering_mode));
300
277 /* Sense port always allowed on supported devices for ConnectX1 and 2 */ 301 /* Sense port always allowed on supported devices for ConnectX1 and 2 */
278 if (dev->pdev->device != 0x1003) 302 if (dev->pdev->device != 0x1003)
279 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 303 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
@@ -967,9 +991,11 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
967 } 991 }
968 992
969 /* 993 /*
970 * It's not strictly required, but for simplicity just map the 994 * For flow steering device managed mode it is required to use
971 * whole multicast group table now. The table isn't very big 995 * mlx4_init_icm_table. For B0 steering mode it's not strictly
972 * and it's a lot easier than trying to track ref counts. 996 * required, but for simplicity just map the whole multicast
997 * group table now. The table isn't very big and it's a lot
998 * easier than trying to track ref counts.
973 */ 999 */
974 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 1000 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
975 init_hca->mc_base, 1001 init_hca->mc_base,
@@ -1205,7 +1231,26 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1205 goto err_stop_fw; 1231 goto err_stop_fw;
1206 } 1232 }
1207 1233
1234 priv->fs_hash_mode = MLX4_FS_L2_HASH;
1235
1236 switch (priv->fs_hash_mode) {
1237 case MLX4_FS_L2_HASH:
1238 init_hca.fs_hash_enable_bits = 0;
1239 break;
1240
1241 case MLX4_FS_L2_L3_L4_HASH:
1242 /* Enable flow steering with
1243 * udp unicast and tcp unicast
1244 */
1245 init_hca.fs_hash_enable_bits =
1246 MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN;
1247 break;
1248 }
1249
1208 profile = default_profile; 1250 profile = default_profile;
1251 if (dev->caps.steering_mode ==
1252 MLX4_STEERING_MODE_DEVICE_MANAGED)
1253 profile.num_mcg = MLX4_FS_NUM_MCG;
1209 1254
1210 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, 1255 icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
1211 &init_hca); 1256 &init_hca);
@@ -1539,8 +1584,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
1539 struct mlx4_priv *priv = mlx4_priv(dev); 1584 struct mlx4_priv *priv = mlx4_priv(dev);
1540 struct msix_entry *entries; 1585 struct msix_entry *entries;
1541 int nreq = min_t(int, dev->caps.num_ports * 1586 int nreq = min_t(int, dev->caps.num_ports *
1542 min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT) 1587 min_t(int, netif_get_num_default_rss_queues() + 1,
1543 + MSIX_LEGACY_SZ, MAX_MSIX); 1588 MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
1544 int err; 1589 int err;
1545 int i; 1590 int i;
1546 1591
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index f4a8f98e402..bc62f536ffa 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -41,6 +41,7 @@
41 41
42#define MGM_QPN_MASK 0x00FFFFFF 42#define MGM_QPN_MASK 0x00FFFFFF
43#define MGM_BLCK_LB_BIT 30 43#define MGM_BLCK_LB_BIT 30
44#define MLX4_MAC_MASK 0xffffffffffffULL
44 45
45static const u8 zero_gid[16]; /* automatically initialized to 0 */ 46static const u8 zero_gid[16]; /* automatically initialized to 0 */
46 47
@@ -54,7 +55,12 @@ struct mlx4_mgm {
54 55
55int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) 56int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
56{ 57{
57 return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE); 58 if (dev->caps.steering_mode ==
59 MLX4_STEERING_MODE_DEVICE_MANAGED)
60 return 1 << MLX4_FS_MGM_LOG_ENTRY_SIZE;
61 else
62 return min((1 << mlx4_log_num_mgm_entry_size),
63 MLX4_MAX_MGM_ENTRY_SIZE);
58} 64}
59 65
60int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) 66int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
@@ -62,6 +68,35 @@ int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
62 return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2); 68 return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
63} 69}
64 70
71static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev,
72 struct mlx4_cmd_mailbox *mailbox,
73 u32 size,
74 u64 *reg_id)
75{
76 u64 imm;
77 int err = 0;
78
79 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0,
80 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
81 MLX4_CMD_NATIVE);
82 if (err)
83 return err;
84 *reg_id = imm;
85
86 return err;
87}
88
89static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid)
90{
91 int err = 0;
92
93 err = mlx4_cmd(dev, regid, 0, 0,
94 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
95 MLX4_CMD_NATIVE);
96
97 return err;
98}
99
65static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, 100static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
66 struct mlx4_cmd_mailbox *mailbox) 101 struct mlx4_cmd_mailbox *mailbox)
67{ 102{
@@ -614,6 +649,311 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
614 return err; 649 return err;
615} 650}
616 651
652struct mlx4_net_trans_rule_hw_ctrl {
653 __be32 ctrl;
654 __be32 vf_vep_port;
655 __be32 qpn;
656 __be32 reserved;
657};
658
659static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
660 struct mlx4_net_trans_rule_hw_ctrl *hw)
661{
662 static const u8 __promisc_mode[] = {
663 [MLX4_FS_PROMISC_NONE] = 0x0,
664 [MLX4_FS_PROMISC_UPLINK] = 0x1,
665 [MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2,
666 [MLX4_FS_PROMISC_ALL_MULTI] = 0x3,
667 };
668
669 u32 dw = 0;
670
671 dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
672 dw |= ctrl->exclusive ? (1 << 2) : 0;
673 dw |= ctrl->allow_loopback ? (1 << 3) : 0;
674 dw |= __promisc_mode[ctrl->promisc_mode] << 8;
675 dw |= ctrl->priority << 16;
676
677 hw->ctrl = cpu_to_be32(dw);
678 hw->vf_vep_port = cpu_to_be32(ctrl->port);
679 hw->qpn = cpu_to_be32(ctrl->qpn);
680}
681
682struct mlx4_net_trans_rule_hw_ib {
683 u8 size;
684 u8 rsvd1;
685 __be16 id;
686 u32 rsvd2;
687 __be32 qpn;
688 __be32 qpn_mask;
689 u8 dst_gid[16];
690 u8 dst_gid_msk[16];
691} __packed;
692
693struct mlx4_net_trans_rule_hw_eth {
694 u8 size;
695 u8 rsvd;
696 __be16 id;
697 u8 rsvd1[6];
698 u8 dst_mac[6];
699 u16 rsvd2;
700 u8 dst_mac_msk[6];
701 u16 rsvd3;
702 u8 src_mac[6];
703 u16 rsvd4;
704 u8 src_mac_msk[6];
705 u8 rsvd5;
706 u8 ether_type_enable;
707 __be16 ether_type;
708 __be16 vlan_id_msk;
709 __be16 vlan_id;
710} __packed;
711
712struct mlx4_net_trans_rule_hw_tcp_udp {
713 u8 size;
714 u8 rsvd;
715 __be16 id;
716 __be16 rsvd1[3];
717 __be16 dst_port;
718 __be16 rsvd2;
719 __be16 dst_port_msk;
720 __be16 rsvd3;
721 __be16 src_port;
722 __be16 rsvd4;
723 __be16 src_port_msk;
724} __packed;
725
726struct mlx4_net_trans_rule_hw_ipv4 {
727 u8 size;
728 u8 rsvd;
729 __be16 id;
730 __be32 rsvd1;
731 __be32 dst_ip;
732 __be32 dst_ip_msk;
733 __be32 src_ip;
734 __be32 src_ip_msk;
735} __packed;
736
737struct _rule_hw {
738 union {
739 struct {
740 u8 size;
741 u8 rsvd;
742 __be16 id;
743 };
744 struct mlx4_net_trans_rule_hw_eth eth;
745 struct mlx4_net_trans_rule_hw_ib ib;
746 struct mlx4_net_trans_rule_hw_ipv4 ipv4;
747 struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
748 };
749};
750
751static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
752 struct _rule_hw *rule_hw)
753{
754 static const u16 __sw_id_hw[] = {
755 [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001,
756 [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005,
757 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003,
758 [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002,
759 [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004,
760 [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006
761 };
762
763 static const size_t __rule_hw_sz[] = {
764 [MLX4_NET_TRANS_RULE_ID_ETH] =
765 sizeof(struct mlx4_net_trans_rule_hw_eth),
766 [MLX4_NET_TRANS_RULE_ID_IB] =
767 sizeof(struct mlx4_net_trans_rule_hw_ib),
768 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
769 [MLX4_NET_TRANS_RULE_ID_IPV4] =
770 sizeof(struct mlx4_net_trans_rule_hw_ipv4),
771 [MLX4_NET_TRANS_RULE_ID_TCP] =
772 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
773 [MLX4_NET_TRANS_RULE_ID_UDP] =
774 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
775 };
776 if (spec->id > MLX4_NET_TRANS_RULE_NUM) {
777 mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id);
778 return -EINVAL;
779 }
780 memset(rule_hw, 0, __rule_hw_sz[spec->id]);
781 rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
782 rule_hw->size = __rule_hw_sz[spec->id] >> 2;
783
784 switch (spec->id) {
785 case MLX4_NET_TRANS_RULE_ID_ETH:
786 memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN);
787 memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk,
788 ETH_ALEN);
789 memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN);
790 memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk,
791 ETH_ALEN);
792 if (spec->eth.ether_type_enable) {
793 rule_hw->eth.ether_type_enable = 1;
794 rule_hw->eth.ether_type = spec->eth.ether_type;
795 }
796 rule_hw->eth.vlan_id = spec->eth.vlan_id;
797 rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk;
798 break;
799
800 case MLX4_NET_TRANS_RULE_ID_IB:
801 rule_hw->ib.qpn = spec->ib.r_qpn;
802 rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
803 memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
804 memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
805 break;
806
807 case MLX4_NET_TRANS_RULE_ID_IPV6:
808 return -EOPNOTSUPP;
809
810 case MLX4_NET_TRANS_RULE_ID_IPV4:
811 rule_hw->ipv4.src_ip = spec->ipv4.src_ip;
812 rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk;
813 rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip;
814 rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk;
815 break;
816
817 case MLX4_NET_TRANS_RULE_ID_TCP:
818 case MLX4_NET_TRANS_RULE_ID_UDP:
819 rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port;
820 rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk;
821 rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port;
822 rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk;
823 break;
824
825 default:
826 return -EINVAL;
827 }
828
829 return __rule_hw_sz[spec->id];
830}
831
832static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
833 struct mlx4_net_trans_rule *rule)
834{
835#define BUF_SIZE 256
836 struct mlx4_spec_list *cur;
837 char buf[BUF_SIZE];
838 int len = 0;
839
840 mlx4_err(dev, "%s", str);
841 len += snprintf(buf + len, BUF_SIZE - len,
842 "port = %d prio = 0x%x qp = 0x%x ",
843 rule->port, rule->priority, rule->qpn);
844
845 list_for_each_entry(cur, &rule->list, list) {
846 switch (cur->id) {
847 case MLX4_NET_TRANS_RULE_ID_ETH:
848 len += snprintf(buf + len, BUF_SIZE - len,
849 "dmac = %pM ", &cur->eth.dst_mac);
850 if (cur->eth.ether_type)
851 len += snprintf(buf + len, BUF_SIZE - len,
852 "ethertype = 0x%x ",
853 be16_to_cpu(cur->eth.ether_type));
854 if (cur->eth.vlan_id)
855 len += snprintf(buf + len, BUF_SIZE - len,
856 "vlan-id = %d ",
857 be16_to_cpu(cur->eth.vlan_id));
858 break;
859
860 case MLX4_NET_TRANS_RULE_ID_IPV4:
861 if (cur->ipv4.src_ip)
862 len += snprintf(buf + len, BUF_SIZE - len,
863 "src-ip = %pI4 ",
864 &cur->ipv4.src_ip);
865 if (cur->ipv4.dst_ip)
866 len += snprintf(buf + len, BUF_SIZE - len,
867 "dst-ip = %pI4 ",
868 &cur->ipv4.dst_ip);
869 break;
870
871 case MLX4_NET_TRANS_RULE_ID_TCP:
872 case MLX4_NET_TRANS_RULE_ID_UDP:
873 if (cur->tcp_udp.src_port)
874 len += snprintf(buf + len, BUF_SIZE - len,
875 "src-port = %d ",
876 be16_to_cpu(cur->tcp_udp.src_port));
877 if (cur->tcp_udp.dst_port)
878 len += snprintf(buf + len, BUF_SIZE - len,
879 "dst-port = %d ",
880 be16_to_cpu(cur->tcp_udp.dst_port));
881 break;
882
883 case MLX4_NET_TRANS_RULE_ID_IB:
884 len += snprintf(buf + len, BUF_SIZE - len,
885 "dst-gid = %pI6\n", cur->ib.dst_gid);
886 len += snprintf(buf + len, BUF_SIZE - len,
887 "dst-gid-mask = %pI6\n",
888 cur->ib.dst_gid_msk);
889 break;
890
891 case MLX4_NET_TRANS_RULE_ID_IPV6:
892 break;
893
894 default:
895 break;
896 }
897 }
898 len += snprintf(buf + len, BUF_SIZE - len, "\n");
899 mlx4_err(dev, "%s", buf);
900
901 if (len >= BUF_SIZE)
902 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n");
903}
904
905int mlx4_flow_attach(struct mlx4_dev *dev,
906 struct mlx4_net_trans_rule *rule, u64 *reg_id)
907{
908 struct mlx4_cmd_mailbox *mailbox;
909 struct mlx4_spec_list *cur;
910 u32 size = 0;
911 int ret;
912
913 mailbox = mlx4_alloc_cmd_mailbox(dev);
914 if (IS_ERR(mailbox))
915 return PTR_ERR(mailbox);
916
917 memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl));
918 trans_rule_ctrl_to_hw(rule, mailbox->buf);
919
920 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
921
922 list_for_each_entry(cur, &rule->list, list) {
923 ret = parse_trans_rule(dev, cur, mailbox->buf + size);
924 if (ret < 0) {
925 mlx4_free_cmd_mailbox(dev, mailbox);
926 return -EINVAL;
927 }
928 size += ret;
929 }
930
931 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
932 if (ret == -ENOMEM)
933 mlx4_err_rule(dev,
934 "mcg table is full. Fail to register network rule.\n",
935 rule);
936 else if (ret)
937 mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
938
939 mlx4_free_cmd_mailbox(dev, mailbox);
940
941 return ret;
942}
943EXPORT_SYMBOL_GPL(mlx4_flow_attach);
944
945int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
946{
947 int err;
948
949 err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id);
950 if (err)
951 mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n",
952 reg_id);
953 return err;
954}
955EXPORT_SYMBOL_GPL(mlx4_flow_detach);
956
617int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 957int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
618 int block_mcast_loopback, enum mlx4_protocol prot, 958 int block_mcast_loopback, enum mlx4_protocol prot,
619 enum mlx4_steer_type steer) 959 enum mlx4_steer_type steer)
@@ -866,49 +1206,159 @@ static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
866} 1206}
867 1207
868int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1208int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
869 int block_mcast_loopback, enum mlx4_protocol prot) 1209 u8 port, int block_mcast_loopback,
1210 enum mlx4_protocol prot, u64 *reg_id)
870{ 1211{
871 if (prot == MLX4_PROT_ETH &&
872 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
873 return 0;
874 1212
875 if (prot == MLX4_PROT_ETH) 1213 switch (dev->caps.steering_mode) {
876 gid[7] |= (MLX4_MC_STEER << 1); 1214 case MLX4_STEERING_MODE_A0:
1215 if (prot == MLX4_PROT_ETH)
1216 return 0;
1217
1218 case MLX4_STEERING_MODE_B0:
1219 if (prot == MLX4_PROT_ETH)
1220 gid[7] |= (MLX4_MC_STEER << 1);
1221
1222 if (mlx4_is_mfunc(dev))
1223 return mlx4_QP_ATTACH(dev, qp, gid, 1,
1224 block_mcast_loopback, prot);
1225 return mlx4_qp_attach_common(dev, qp, gid,
1226 block_mcast_loopback, prot,
1227 MLX4_MC_STEER);
1228
1229 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
1230 struct mlx4_spec_list spec = { {NULL} };
1231 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
1232
1233 struct mlx4_net_trans_rule rule = {
1234 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
1235 .exclusive = 0,
1236 .promisc_mode = MLX4_FS_PROMISC_NONE,
1237 .priority = MLX4_DOMAIN_NIC,
1238 };
1239
1240 rule.allow_loopback = ~block_mcast_loopback;
1241 rule.port = port;
1242 rule.qpn = qp->qpn;
1243 INIT_LIST_HEAD(&rule.list);
1244
1245 switch (prot) {
1246 case MLX4_PROT_ETH:
1247 spec.id = MLX4_NET_TRANS_RULE_ID_ETH;
1248 memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN);
1249 memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
1250 break;
877 1251
878 if (mlx4_is_mfunc(dev)) 1252 case MLX4_PROT_IB_IPV6:
879 return mlx4_QP_ATTACH(dev, qp, gid, 1, 1253 spec.id = MLX4_NET_TRANS_RULE_ID_IB;
880 block_mcast_loopback, prot); 1254 memcpy(spec.ib.dst_gid, gid, 16);
1255 memset(&spec.ib.dst_gid_msk, 0xff, 16);
1256 break;
1257 default:
1258 return -EINVAL;
1259 }
1260 list_add_tail(&spec.list, &rule.list);
881 1261
882 return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, 1262 return mlx4_flow_attach(dev, &rule, reg_id);
883 prot, MLX4_MC_STEER); 1263 }
1264
1265 default:
1266 return -EINVAL;
1267 }
884} 1268}
885EXPORT_SYMBOL_GPL(mlx4_multicast_attach); 1269EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
886 1270
887int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1271int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
888 enum mlx4_protocol prot) 1272 enum mlx4_protocol prot, u64 reg_id)
889{ 1273{
890 if (prot == MLX4_PROT_ETH && 1274 switch (dev->caps.steering_mode) {
891 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) 1275 case MLX4_STEERING_MODE_A0:
892 return 0; 1276 if (prot == MLX4_PROT_ETH)
1277 return 0;
893 1278
894 if (prot == MLX4_PROT_ETH) 1279 case MLX4_STEERING_MODE_B0:
895 gid[7] |= (MLX4_MC_STEER << 1); 1280 if (prot == MLX4_PROT_ETH)
1281 gid[7] |= (MLX4_MC_STEER << 1);
896 1282
897 if (mlx4_is_mfunc(dev)) 1283 if (mlx4_is_mfunc(dev))
898 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); 1284 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
1285
1286 return mlx4_qp_detach_common(dev, qp, gid, prot,
1287 MLX4_MC_STEER);
1288
1289 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1290 return mlx4_flow_detach(dev, reg_id);
899 1291
900 return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_MC_STEER); 1292 default:
1293 return -EINVAL;
1294 }
901} 1295}
902EXPORT_SYMBOL_GPL(mlx4_multicast_detach); 1296EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
903 1297
1298int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
1299 u32 qpn, enum mlx4_net_trans_promisc_mode mode)
1300{
1301 struct mlx4_net_trans_rule rule;
1302 u64 *regid_p;
1303
1304 switch (mode) {
1305 case MLX4_FS_PROMISC_UPLINK:
1306 case MLX4_FS_PROMISC_FUNCTION_PORT:
1307 regid_p = &dev->regid_promisc_array[port];
1308 break;
1309 case MLX4_FS_PROMISC_ALL_MULTI:
1310 regid_p = &dev->regid_allmulti_array[port];
1311 break;
1312 default:
1313 return -1;
1314 }
1315
1316 if (*regid_p != 0)
1317 return -1;
1318
1319 rule.promisc_mode = mode;
1320 rule.port = port;
1321 rule.qpn = qpn;
1322 INIT_LIST_HEAD(&rule.list);
1323 mlx4_err(dev, "going promisc on %x\n", port);
1324
1325 return mlx4_flow_attach(dev, &rule, regid_p);
1326}
1327EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add);
1328
1329int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
1330 enum mlx4_net_trans_promisc_mode mode)
1331{
1332 int ret;
1333 u64 *regid_p;
1334
1335 switch (mode) {
1336 case MLX4_FS_PROMISC_UPLINK:
1337 case MLX4_FS_PROMISC_FUNCTION_PORT:
1338 regid_p = &dev->regid_promisc_array[port];
1339 break;
1340 case MLX4_FS_PROMISC_ALL_MULTI:
1341 regid_p = &dev->regid_allmulti_array[port];
1342 break;
1343 default:
1344 return -1;
1345 }
1346
1347 if (*regid_p == 0)
1348 return -1;
1349
1350 ret = mlx4_flow_detach(dev, *regid_p);
1351 if (ret == 0)
1352 *regid_p = 0;
1353
1354 return ret;
1355}
1356EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove);
1357
904int mlx4_unicast_attach(struct mlx4_dev *dev, 1358int mlx4_unicast_attach(struct mlx4_dev *dev,
905 struct mlx4_qp *qp, u8 gid[16], 1359 struct mlx4_qp *qp, u8 gid[16],
906 int block_mcast_loopback, enum mlx4_protocol prot) 1360 int block_mcast_loopback, enum mlx4_protocol prot)
907{ 1361{
908 if (prot == MLX4_PROT_ETH &&
909 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
910 return 0;
911
912 if (prot == MLX4_PROT_ETH) 1362 if (prot == MLX4_PROT_ETH)
913 gid[7] |= (MLX4_UC_STEER << 1); 1363 gid[7] |= (MLX4_UC_STEER << 1);
914 1364
@@ -924,10 +1374,6 @@ EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
924int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, 1374int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
925 u8 gid[16], enum mlx4_protocol prot) 1375 u8 gid[16], enum mlx4_protocol prot)
926{ 1376{
927 if (prot == MLX4_PROT_ETH &&
928 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
929 return 0;
930
931 if (prot == MLX4_PROT_ETH) 1377 if (prot == MLX4_PROT_ETH)
932 gid[7] |= (MLX4_UC_STEER << 1); 1378 gid[7] |= (MLX4_UC_STEER << 1);
933 1379
@@ -968,9 +1414,6 @@ static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
968 1414
969int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1415int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
970{ 1416{
971 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
972 return 0;
973
974 if (mlx4_is_mfunc(dev)) 1417 if (mlx4_is_mfunc(dev))
975 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port); 1418 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
976 1419
@@ -980,9 +1423,6 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
980 1423
981int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1424int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
982{ 1425{
983 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
984 return 0;
985
986 if (mlx4_is_mfunc(dev)) 1426 if (mlx4_is_mfunc(dev))
987 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port); 1427 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
988 1428
@@ -992,9 +1432,6 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
992 1432
993int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1433int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
994{ 1434{
995 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
996 return 0;
997
998 if (mlx4_is_mfunc(dev)) 1435 if (mlx4_is_mfunc(dev))
999 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port); 1436 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
1000 1437
@@ -1004,9 +1441,6 @@ EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
1004 1441
1005int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1442int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
1006{ 1443{
1007 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
1008 return 0;
1009
1010 if (mlx4_is_mfunc(dev)) 1444 if (mlx4_is_mfunc(dev))
1011 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port); 1445 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
1012 1446
@@ -1019,6 +1453,10 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
1019 struct mlx4_priv *priv = mlx4_priv(dev); 1453 struct mlx4_priv *priv = mlx4_priv(dev);
1020 int err; 1454 int err;
1021 1455
1456 /* No need for mcg_table when fw managed the mcg table*/
1457 if (dev->caps.steering_mode ==
1458 MLX4_STEERING_MODE_DEVICE_MANAGED)
1459 return 0;
1022 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms, 1460 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
1023 dev->caps.num_amgms - 1, 0, 0); 1461 dev->caps.num_amgms - 1, 0, 0);
1024 if (err) 1462 if (err)
@@ -1031,5 +1469,7 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
1031 1469
1032void mlx4_cleanup_mcg_table(struct mlx4_dev *dev) 1470void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
1033{ 1471{
1034 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap); 1472 if (dev->caps.steering_mode !=
1473 MLX4_STEERING_MODE_DEVICE_MANAGED)
1474 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
1035} 1475}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index e5d20220762..d2c436b10fb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -39,6 +39,7 @@
39 39
40#include <linux/mutex.h> 40#include <linux/mutex.h>
41#include <linux/radix-tree.h> 41#include <linux/radix-tree.h>
42#include <linux/rbtree.h>
42#include <linux/timer.h> 43#include <linux/timer.h>
43#include <linux/semaphore.h> 44#include <linux/semaphore.h>
44#include <linux/workqueue.h> 45#include <linux/workqueue.h>
@@ -53,6 +54,17 @@
53#define DRV_VERSION "1.1" 54#define DRV_VERSION "1.1"
54#define DRV_RELDATE "Dec, 2011" 55#define DRV_RELDATE "Dec, 2011"
55 56
57#define MLX4_FS_UDP_UC_EN (1 << 1)
58#define MLX4_FS_TCP_UC_EN (1 << 2)
59#define MLX4_FS_NUM_OF_L2_ADDR 8
60#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7
61#define MLX4_FS_NUM_MCG (1 << 17)
62
63enum {
64 MLX4_FS_L2_HASH = 0,
65 MLX4_FS_L2_L3_L4_HASH,
66};
67
56#define MLX4_NUM_UP 8 68#define MLX4_NUM_UP 8
57#define MLX4_NUM_TC 8 69#define MLX4_NUM_TC 8
58#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */ 70#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
@@ -137,6 +149,7 @@ enum mlx4_resource {
137 RES_VLAN, 149 RES_VLAN,
138 RES_EQ, 150 RES_EQ,
139 RES_COUNTER, 151 RES_COUNTER,
152 RES_FS_RULE,
140 MLX4_NUM_OF_RESOURCE_TYPE 153 MLX4_NUM_OF_RESOURCE_TYPE
141}; 154};
142 155
@@ -509,7 +522,7 @@ struct slave_list {
509struct mlx4_resource_tracker { 522struct mlx4_resource_tracker {
510 spinlock_t lock; 523 spinlock_t lock;
511 /* tree for each resources */ 524 /* tree for each resources */
512 struct radix_tree_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE]; 525 struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
513 /* num_of_slave's lists, one per slave */ 526 /* num_of_slave's lists, one per slave */
514 struct slave_list *slave_list; 527 struct slave_list *slave_list;
515}; 528};
@@ -703,6 +716,7 @@ struct mlx4_set_port_rqp_calc_context {
703 716
704struct mlx4_mac_entry { 717struct mlx4_mac_entry {
705 u64 mac; 718 u64 mac;
719 u64 reg_id;
706}; 720};
707 721
708struct mlx4_port_info { 722struct mlx4_port_info {
@@ -776,6 +790,7 @@ struct mlx4_priv {
776 struct mutex bf_mutex; 790 struct mutex bf_mutex;
777 struct io_mapping *bf_mapping; 791 struct io_mapping *bf_mapping;
778 int reserved_mtts; 792 int reserved_mtts;
793 int fs_hash_mode;
779}; 794};
780 795
781static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) 796static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -1032,7 +1047,7 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
1032/* resource tracker functions*/ 1047/* resource tracker functions*/
1033int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, 1048int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
1034 enum mlx4_resource resource_type, 1049 enum mlx4_resource resource_type,
1035 int resource_id, int *slave); 1050 u64 resource_id, int *slave);
1036void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id); 1051void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
1037int mlx4_init_resource_tracker(struct mlx4_dev *dev); 1052int mlx4_init_resource_tracker(struct mlx4_dev *dev);
1038 1053
@@ -1117,6 +1132,16 @@ int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
1117 struct mlx4_cmd_mailbox *inbox, 1132 struct mlx4_cmd_mailbox *inbox,
1118 struct mlx4_cmd_mailbox *outbox, 1133 struct mlx4_cmd_mailbox *outbox,
1119 struct mlx4_cmd_info *cmd); 1134 struct mlx4_cmd_info *cmd);
1135int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
1136 struct mlx4_vhcr *vhcr,
1137 struct mlx4_cmd_mailbox *inbox,
1138 struct mlx4_cmd_mailbox *outbox,
1139 struct mlx4_cmd_info *cmd);
1140int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
1141 struct mlx4_vhcr *vhcr,
1142 struct mlx4_cmd_mailbox *inbox,
1143 struct mlx4_cmd_mailbox *outbox,
1144 struct mlx4_cmd_info *cmd);
1120 1145
1121int mlx4_get_mgm_entry_size(struct mlx4_dev *dev); 1146int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
1122int mlx4_get_qp_per_mgm(struct mlx4_dev *dev); 1147int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 225c20d4790..a12632150b3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -75,6 +75,7 @@
75#define STAMP_SHIFT 31 75#define STAMP_SHIFT 31
76#define STAMP_VAL 0x7fffffff 76#define STAMP_VAL 0x7fffffff
77#define STATS_DELAY (HZ / 4) 77#define STATS_DELAY (HZ / 4)
78#define MAX_NUM_OF_FS_RULES 256
78 79
79/* Typical TSO descriptor with 16 gather entries is 352 bytes... */ 80/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
80#define MAX_DESC_SIZE 512 81#define MAX_DESC_SIZE 512
@@ -404,6 +405,19 @@ struct mlx4_en_perf_stats {
404#define NUM_PERF_COUNTERS 6 405#define NUM_PERF_COUNTERS 6
405}; 406};
406 407
408enum mlx4_en_mclist_act {
409 MCLIST_NONE,
410 MCLIST_REM,
411 MCLIST_ADD,
412};
413
414struct mlx4_en_mc_list {
415 struct list_head list;
416 enum mlx4_en_mclist_act action;
417 u8 addr[ETH_ALEN];
418 u64 reg_id;
419};
420
407struct mlx4_en_frag_info { 421struct mlx4_en_frag_info {
408 u16 frag_size; 422 u16 frag_size;
409 u16 frag_prefix_size; 423 u16 frag_prefix_size;
@@ -422,6 +436,11 @@ struct mlx4_en_frag_info {
422 436
423#endif 437#endif
424 438
439struct ethtool_flow_id {
440 struct ethtool_rx_flow_spec flow_spec;
441 u64 id;
442};
443
425struct mlx4_en_priv { 444struct mlx4_en_priv {
426 struct mlx4_en_dev *mdev; 445 struct mlx4_en_dev *mdev;
427 struct mlx4_en_port_profile *prof; 446 struct mlx4_en_port_profile *prof;
@@ -431,6 +450,7 @@ struct mlx4_en_priv {
431 struct net_device_stats ret_stats; 450 struct net_device_stats ret_stats;
432 struct mlx4_en_port_state port_state; 451 struct mlx4_en_port_state port_state;
433 spinlock_t stats_lock; 452 spinlock_t stats_lock;
453 struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
434 454
435 unsigned long last_moder_packets[MAX_RX_RINGS]; 455 unsigned long last_moder_packets[MAX_RX_RINGS];
436 unsigned long last_moder_tx_packets; 456 unsigned long last_moder_tx_packets;
@@ -480,6 +500,7 @@ struct mlx4_en_priv {
480 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; 500 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
481 struct mlx4_en_cq *tx_cq; 501 struct mlx4_en_cq *tx_cq;
482 struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; 502 struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
503 struct mlx4_qp drop_qp;
483 struct work_struct mcast_task; 504 struct work_struct mcast_task;
484 struct work_struct mac_task; 505 struct work_struct mac_task;
485 struct work_struct watchdog_task; 506 struct work_struct watchdog_task;
@@ -489,8 +510,9 @@ struct mlx4_en_priv {
489 struct mlx4_en_pkt_stats pkstats; 510 struct mlx4_en_pkt_stats pkstats;
490 struct mlx4_en_port_stats port_stats; 511 struct mlx4_en_port_stats port_stats;
491 u64 stats_bitmap; 512 u64 stats_bitmap;
492 char *mc_addrs; 513 struct list_head mc_list;
493 int mc_addrs_cnt; 514 struct list_head curr_list;
515 u64 broadcast_id;
494 struct mlx4_en_stat_out_mbox hw_stats; 516 struct mlx4_en_stat_out_mbox hw_stats;
495 int vids[128]; 517 int vids[128];
496 bool wol; 518 bool wol;
@@ -565,6 +587,8 @@ void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
565void mlx4_en_calc_rx_buf(struct net_device *dev); 587void mlx4_en_calc_rx_buf(struct net_device *dev);
566int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv); 588int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
567void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv); 589void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
590int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv);
591void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv);
568int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring); 592int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
569void mlx4_en_rx_irq(struct mlx4_cq *mcq); 593void mlx4_en_rx_irq(struct mlx4_cq *mcq);
570 594
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index a8fb52992c6..a51d1b9bf1d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -75,21 +75,54 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
75 table->total = 0; 75 table->total = 0;
76} 76}
77 77
78static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn) 78static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
79 u64 mac, int *qpn, u64 *reg_id)
79{ 80{
80 struct mlx4_qp qp;
81 u8 gid[16] = {0};
82 __be64 be_mac; 81 __be64 be_mac;
83 int err; 82 int err;
84 83
85 qp.qpn = *qpn; 84 mac &= MLX4_MAC_MASK;
86
87 mac &= 0xffffffffffffULL;
88 be_mac = cpu_to_be64(mac << 16); 85 be_mac = cpu_to_be64(mac << 16);
89 memcpy(&gid[10], &be_mac, ETH_ALEN);
90 gid[5] = port;
91 86
92 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); 87 switch (dev->caps.steering_mode) {
88 case MLX4_STEERING_MODE_B0: {
89 struct mlx4_qp qp;
90 u8 gid[16] = {0};
91
92 qp.qpn = *qpn;
93 memcpy(&gid[10], &be_mac, ETH_ALEN);
94 gid[5] = port;
95
96 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
97 break;
98 }
99 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
100 struct mlx4_spec_list spec_eth = { {NULL} };
101 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
102
103 struct mlx4_net_trans_rule rule = {
104 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
105 .exclusive = 0,
106 .allow_loopback = 1,
107 .promisc_mode = MLX4_FS_PROMISC_NONE,
108 .priority = MLX4_DOMAIN_NIC,
109 };
110
111 rule.port = port;
112 rule.qpn = *qpn;
113 INIT_LIST_HEAD(&rule.list);
114
115 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
116 memcpy(spec_eth.eth.dst_mac, &be_mac, ETH_ALEN);
117 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
118 list_add_tail(&spec_eth.list, &rule.list);
119
120 err = mlx4_flow_attach(dev, &rule, reg_id);
121 break;
122 }
123 default:
124 return -EINVAL;
125 }
93 if (err) 126 if (err)
94 mlx4_warn(dev, "Failed Attaching Unicast\n"); 127 mlx4_warn(dev, "Failed Attaching Unicast\n");
95 128
@@ -97,19 +130,30 @@ static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
97} 130}
98 131
99static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port, 132static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
100 u64 mac, int qpn) 133 u64 mac, int qpn, u64 reg_id)
101{ 134{
102 struct mlx4_qp qp; 135 switch (dev->caps.steering_mode) {
103 u8 gid[16] = {0}; 136 case MLX4_STEERING_MODE_B0: {
104 __be64 be_mac; 137 struct mlx4_qp qp;
138 u8 gid[16] = {0};
139 __be64 be_mac;
105 140
106 qp.qpn = qpn; 141 qp.qpn = qpn;
107 mac &= 0xffffffffffffULL; 142 mac &= MLX4_MAC_MASK;
108 be_mac = cpu_to_be64(mac << 16); 143 be_mac = cpu_to_be64(mac << 16);
109 memcpy(&gid[10], &be_mac, ETH_ALEN); 144 memcpy(&gid[10], &be_mac, ETH_ALEN);
110 gid[5] = port; 145 gid[5] = port;
111 146
112 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); 147 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
148 break;
149 }
150 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
151 mlx4_flow_detach(dev, reg_id);
152 break;
153 }
154 default:
155 mlx4_err(dev, "Invalid steering mode.\n");
156 }
113} 157}
114 158
115static int validate_index(struct mlx4_dev *dev, 159static int validate_index(struct mlx4_dev *dev,
@@ -144,6 +188,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
144 struct mlx4_mac_entry *entry; 188 struct mlx4_mac_entry *entry;
145 int index = 0; 189 int index = 0;
146 int err = 0; 190 int err = 0;
191 u64 reg_id;
147 192
148 mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n", 193 mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
149 (unsigned long long) mac); 194 (unsigned long long) mac);
@@ -155,7 +200,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
155 return err; 200 return err;
156 } 201 }
157 202
158 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) { 203 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
159 *qpn = info->base_qpn + index; 204 *qpn = info->base_qpn + index;
160 return 0; 205 return 0;
161 } 206 }
@@ -167,7 +212,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
167 goto qp_err; 212 goto qp_err;
168 } 213 }
169 214
170 err = mlx4_uc_steer_add(dev, port, mac, qpn); 215 err = mlx4_uc_steer_add(dev, port, mac, qpn, &reg_id);
171 if (err) 216 if (err)
172 goto steer_err; 217 goto steer_err;
173 218
@@ -177,6 +222,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
177 goto alloc_err; 222 goto alloc_err;
178 } 223 }
179 entry->mac = mac; 224 entry->mac = mac;
225 entry->reg_id = reg_id;
180 err = radix_tree_insert(&info->mac_tree, *qpn, entry); 226 err = radix_tree_insert(&info->mac_tree, *qpn, entry);
181 if (err) 227 if (err)
182 goto insert_err; 228 goto insert_err;
@@ -186,7 +232,7 @@ insert_err:
186 kfree(entry); 232 kfree(entry);
187 233
188alloc_err: 234alloc_err:
189 mlx4_uc_steer_release(dev, port, mac, *qpn); 235 mlx4_uc_steer_release(dev, port, mac, *qpn, reg_id);
190 236
191steer_err: 237steer_err:
192 mlx4_qp_release_range(dev, *qpn, 1); 238 mlx4_qp_release_range(dev, *qpn, 1);
@@ -206,13 +252,14 @@ void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
206 (unsigned long long) mac); 252 (unsigned long long) mac);
207 mlx4_unregister_mac(dev, port, mac); 253 mlx4_unregister_mac(dev, port, mac);
208 254
209 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { 255 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
210 entry = radix_tree_lookup(&info->mac_tree, qpn); 256 entry = radix_tree_lookup(&info->mac_tree, qpn);
211 if (entry) { 257 if (entry) {
212 mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx," 258 mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
213 " qpn %d\n", port, 259 " qpn %d\n", port,
214 (unsigned long long) mac, qpn); 260 (unsigned long long) mac, qpn);
215 mlx4_uc_steer_release(dev, port, entry->mac, qpn); 261 mlx4_uc_steer_release(dev, port, entry->mac,
262 qpn, entry->reg_id);
216 mlx4_qp_release_range(dev, qpn, 1); 263 mlx4_qp_release_range(dev, qpn, 1);
217 radix_tree_delete(&info->mac_tree, qpn); 264 radix_tree_delete(&info->mac_tree, qpn);
218 kfree(entry); 265 kfree(entry);
@@ -359,15 +406,18 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
359 int index = qpn - info->base_qpn; 406 int index = qpn - info->base_qpn;
360 int err = 0; 407 int err = 0;
361 408
362 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { 409 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
363 entry = radix_tree_lookup(&info->mac_tree, qpn); 410 entry = radix_tree_lookup(&info->mac_tree, qpn);
364 if (!entry) 411 if (!entry)
365 return -EINVAL; 412 return -EINVAL;
366 mlx4_uc_steer_release(dev, port, entry->mac, qpn); 413 mlx4_uc_steer_release(dev, port, entry->mac,
414 qpn, entry->reg_id);
367 mlx4_unregister_mac(dev, port, entry->mac); 415 mlx4_unregister_mac(dev, port, entry->mac);
368 entry->mac = new_mac; 416 entry->mac = new_mac;
417 entry->reg_id = 0;
369 mlx4_register_mac(dev, port, new_mac); 418 mlx4_register_mac(dev, port, new_mac);
370 err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn); 419 err = mlx4_uc_steer_add(dev, port, entry->mac,
420 &qpn, &entry->reg_id);
371 return err; 421 return err;
372 } 422 }
373 423
@@ -803,8 +853,7 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
803 u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ? 853 u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
804 MCAST_DIRECT : MCAST_DEFAULT; 854 MCAST_DIRECT : MCAST_DEFAULT;
805 855
806 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER && 856 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
807 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
808 return 0; 857 return 0;
809 858
810 mailbox = mlx4_alloc_cmd_mailbox(dev); 859 mailbox = mlx4_alloc_cmd_mailbox(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index b83bc928d52..9ee4725363d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -237,13 +237,19 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
237 init_hca->mtt_base = profile[i].start; 237 init_hca->mtt_base = profile[i].start;
238 break; 238 break;
239 case MLX4_RES_MCG: 239 case MLX4_RES_MCG:
240 dev->caps.num_mgms = profile[i].num >> 1;
241 dev->caps.num_amgms = profile[i].num >> 1;
242 init_hca->mc_base = profile[i].start; 240 init_hca->mc_base = profile[i].start;
243 init_hca->log_mc_entry_sz = 241 init_hca->log_mc_entry_sz =
244 ilog2(mlx4_get_mgm_entry_size(dev)); 242 ilog2(mlx4_get_mgm_entry_size(dev));
245 init_hca->log_mc_table_sz = profile[i].log_num; 243 init_hca->log_mc_table_sz = profile[i].log_num;
246 init_hca->log_mc_hash_sz = profile[i].log_num - 1; 244 if (dev->caps.steering_mode ==
245 MLX4_STEERING_MODE_DEVICE_MANAGED) {
246 dev->caps.num_mgms = profile[i].num;
247 } else {
248 init_hca->log_mc_hash_sz =
249 profile[i].log_num - 1;
250 dev->caps.num_mgms = profile[i].num >> 1;
251 dev->caps.num_amgms = profile[i].num >> 1;
252 }
247 break; 253 break;
248 default: 254 default:
249 break; 255 break;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index b45d0e7f6ab..c3fa9198619 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -57,7 +57,8 @@ struct mac_res {
57 57
58struct res_common { 58struct res_common {
59 struct list_head list; 59 struct list_head list;
60 u32 res_id; 60 struct rb_node node;
61 u64 res_id;
61 int owner; 62 int owner;
62 int state; 63 int state;
63 int from_state; 64 int from_state;
@@ -189,6 +190,58 @@ struct res_xrcdn {
189 int port; 190 int port;
190}; 191};
191 192
193enum res_fs_rule_states {
194 RES_FS_RULE_BUSY = RES_ANY_BUSY,
195 RES_FS_RULE_ALLOCATED,
196};
197
198struct res_fs_rule {
199 struct res_common com;
200};
201
202static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
203{
204 struct rb_node *node = root->rb_node;
205
206 while (node) {
207 struct res_common *res = container_of(node, struct res_common,
208 node);
209
210 if (res_id < res->res_id)
211 node = node->rb_left;
212 else if (res_id > res->res_id)
213 node = node->rb_right;
214 else
215 return res;
216 }
217 return NULL;
218}
219
220static int res_tracker_insert(struct rb_root *root, struct res_common *res)
221{
222 struct rb_node **new = &(root->rb_node), *parent = NULL;
223
224 /* Figure out where to put new node */
225 while (*new) {
226 struct res_common *this = container_of(*new, struct res_common,
227 node);
228
229 parent = *new;
230 if (res->res_id < this->res_id)
231 new = &((*new)->rb_left);
232 else if (res->res_id > this->res_id)
233 new = &((*new)->rb_right);
234 else
235 return -EEXIST;
236 }
237
238 /* Add new node and rebalance tree. */
239 rb_link_node(&res->node, parent, new);
240 rb_insert_color(&res->node, root);
241
242 return 0;
243}
244
192/* For Debug uses */ 245/* For Debug uses */
193static const char *ResourceType(enum mlx4_resource rt) 246static const char *ResourceType(enum mlx4_resource rt)
194{ 247{
@@ -201,6 +254,7 @@ static const char *ResourceType(enum mlx4_resource rt)
201 case RES_MAC: return "RES_MAC"; 254 case RES_MAC: return "RES_MAC";
202 case RES_EQ: return "RES_EQ"; 255 case RES_EQ: return "RES_EQ";
203 case RES_COUNTER: return "RES_COUNTER"; 256 case RES_COUNTER: return "RES_COUNTER";
257 case RES_FS_RULE: return "RES_FS_RULE";
204 case RES_XRCD: return "RES_XRCD"; 258 case RES_XRCD: return "RES_XRCD";
205 default: return "Unknown resource type !!!"; 259 default: return "Unknown resource type !!!";
206 }; 260 };
@@ -228,8 +282,7 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
228 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n", 282 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
229 dev->num_slaves); 283 dev->num_slaves);
230 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) 284 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
231 INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i], 285 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
232 GFP_ATOMIC|__GFP_NOWARN);
233 286
234 spin_lock_init(&priv->mfunc.master.res_tracker.lock); 287 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
235 return 0 ; 288 return 0 ;
@@ -277,11 +330,11 @@ static void *find_res(struct mlx4_dev *dev, int res_id,
277{ 330{
278 struct mlx4_priv *priv = mlx4_priv(dev); 331 struct mlx4_priv *priv = mlx4_priv(dev);
279 332
280 return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type], 333 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
281 res_id); 334 res_id);
282} 335}
283 336
284static int get_res(struct mlx4_dev *dev, int slave, int res_id, 337static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
285 enum mlx4_resource type, 338 enum mlx4_resource type,
286 void *res) 339 void *res)
287{ 340{
@@ -307,7 +360,7 @@ static int get_res(struct mlx4_dev *dev, int slave, int res_id,
307 360
308 r->from_state = r->state; 361 r->from_state = r->state;
309 r->state = RES_ANY_BUSY; 362 r->state = RES_ANY_BUSY;
310 mlx4_dbg(dev, "res %s id 0x%x to busy\n", 363 mlx4_dbg(dev, "res %s id 0x%llx to busy\n",
311 ResourceType(type), r->res_id); 364 ResourceType(type), r->res_id);
312 365
313 if (res) 366 if (res)
@@ -320,7 +373,7 @@ exit:
320 373
321int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, 374int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
322 enum mlx4_resource type, 375 enum mlx4_resource type,
323 int res_id, int *slave) 376 u64 res_id, int *slave)
324{ 377{
325 378
326 struct res_common *r; 379 struct res_common *r;
@@ -341,7 +394,7 @@ int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
341 return err; 394 return err;
342} 395}
343 396
344static void put_res(struct mlx4_dev *dev, int slave, int res_id, 397static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
345 enum mlx4_resource type) 398 enum mlx4_resource type)
346{ 399{
347 struct res_common *r; 400 struct res_common *r;
@@ -473,7 +526,21 @@ static struct res_common *alloc_xrcdn_tr(int id)
473 return &ret->com; 526 return &ret->com;
474} 527}
475 528
476static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave, 529static struct res_common *alloc_fs_rule_tr(u64 id)
530{
531 struct res_fs_rule *ret;
532
533 ret = kzalloc(sizeof *ret, GFP_KERNEL);
534 if (!ret)
535 return NULL;
536
537 ret->com.res_id = id;
538 ret->com.state = RES_FS_RULE_ALLOCATED;
539
540 return &ret->com;
541}
542
543static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
477 int extra) 544 int extra)
478{ 545{
479 struct res_common *ret; 546 struct res_common *ret;
@@ -506,6 +573,9 @@ static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
506 case RES_XRCD: 573 case RES_XRCD:
507 ret = alloc_xrcdn_tr(id); 574 ret = alloc_xrcdn_tr(id);
508 break; 575 break;
576 case RES_FS_RULE:
577 ret = alloc_fs_rule_tr(id);
578 break;
509 default: 579 default:
510 return NULL; 580 return NULL;
511 } 581 }
@@ -515,7 +585,7 @@ static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
515 return ret; 585 return ret;
516} 586}
517 587
518static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count, 588static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
519 enum mlx4_resource type, int extra) 589 enum mlx4_resource type, int extra)
520{ 590{
521 int i; 591 int i;
@@ -523,7 +593,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
523 struct mlx4_priv *priv = mlx4_priv(dev); 593 struct mlx4_priv *priv = mlx4_priv(dev);
524 struct res_common **res_arr; 594 struct res_common **res_arr;
525 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 595 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
526 struct radix_tree_root *root = &tracker->res_tree[type]; 596 struct rb_root *root = &tracker->res_tree[type];
527 597
528 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL); 598 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
529 if (!res_arr) 599 if (!res_arr)
@@ -546,7 +616,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
546 err = -EEXIST; 616 err = -EEXIST;
547 goto undo; 617 goto undo;
548 } 618 }
549 err = radix_tree_insert(root, base + i, res_arr[i]); 619 err = res_tracker_insert(root, res_arr[i]);
550 if (err) 620 if (err)
551 goto undo; 621 goto undo;
552 list_add_tail(&res_arr[i]->list, 622 list_add_tail(&res_arr[i]->list,
@@ -559,7 +629,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
559 629
560undo: 630undo:
561 for (--i; i >= base; --i) 631 for (--i; i >= base; --i)
562 radix_tree_delete(&tracker->res_tree[type], i); 632 rb_erase(&res_arr[i]->node, root);
563 633
564 spin_unlock_irq(mlx4_tlock(dev)); 634 spin_unlock_irq(mlx4_tlock(dev));
565 635
@@ -638,6 +708,16 @@ static int remove_xrcdn_ok(struct res_xrcdn *res)
638 return 0; 708 return 0;
639} 709}
640 710
711static int remove_fs_rule_ok(struct res_fs_rule *res)
712{
713 if (res->com.state == RES_FS_RULE_BUSY)
714 return -EBUSY;
715 else if (res->com.state != RES_FS_RULE_ALLOCATED)
716 return -EPERM;
717
718 return 0;
719}
720
641static int remove_cq_ok(struct res_cq *res) 721static int remove_cq_ok(struct res_cq *res)
642{ 722{
643 if (res->com.state == RES_CQ_BUSY) 723 if (res->com.state == RES_CQ_BUSY)
@@ -679,15 +759,17 @@ static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
679 return remove_counter_ok((struct res_counter *)res); 759 return remove_counter_ok((struct res_counter *)res);
680 case RES_XRCD: 760 case RES_XRCD:
681 return remove_xrcdn_ok((struct res_xrcdn *)res); 761 return remove_xrcdn_ok((struct res_xrcdn *)res);
762 case RES_FS_RULE:
763 return remove_fs_rule_ok((struct res_fs_rule *)res);
682 default: 764 default:
683 return -EINVAL; 765 return -EINVAL;
684 } 766 }
685} 767}
686 768
687static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count, 769static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
688 enum mlx4_resource type, int extra) 770 enum mlx4_resource type, int extra)
689{ 771{
690 int i; 772 u64 i;
691 int err; 773 int err;
692 struct mlx4_priv *priv = mlx4_priv(dev); 774 struct mlx4_priv *priv = mlx4_priv(dev);
693 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 775 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
@@ -695,7 +777,7 @@ static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
695 777
696 spin_lock_irq(mlx4_tlock(dev)); 778 spin_lock_irq(mlx4_tlock(dev));
697 for (i = base; i < base + count; ++i) { 779 for (i = base; i < base + count; ++i) {
698 r = radix_tree_lookup(&tracker->res_tree[type], i); 780 r = res_tracker_lookup(&tracker->res_tree[type], i);
699 if (!r) { 781 if (!r) {
700 err = -ENOENT; 782 err = -ENOENT;
701 goto out; 783 goto out;
@@ -710,8 +792,8 @@ static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
710 } 792 }
711 793
712 for (i = base; i < base + count; ++i) { 794 for (i = base; i < base + count; ++i) {
713 r = radix_tree_lookup(&tracker->res_tree[type], i); 795 r = res_tracker_lookup(&tracker->res_tree[type], i);
714 radix_tree_delete(&tracker->res_tree[type], i); 796 rb_erase(&r->node, &tracker->res_tree[type]);
715 list_del(&r->list); 797 list_del(&r->list);
716 kfree(r); 798 kfree(r);
717 } 799 }
@@ -733,7 +815,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
733 int err = 0; 815 int err = 0;
734 816
735 spin_lock_irq(mlx4_tlock(dev)); 817 spin_lock_irq(mlx4_tlock(dev));
736 r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn); 818 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
737 if (!r) 819 if (!r)
738 err = -ENOENT; 820 err = -ENOENT;
739 else if (r->com.owner != slave) 821 else if (r->com.owner != slave)
@@ -741,7 +823,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
741 else { 823 else {
742 switch (state) { 824 switch (state) {
743 case RES_QP_BUSY: 825 case RES_QP_BUSY:
744 mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n", 826 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
745 __func__, r->com.res_id); 827 __func__, r->com.res_id);
746 err = -EBUSY; 828 err = -EBUSY;
747 break; 829 break;
@@ -750,7 +832,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
750 if (r->com.state == RES_QP_MAPPED && !alloc) 832 if (r->com.state == RES_QP_MAPPED && !alloc)
751 break; 833 break;
752 834
753 mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id); 835 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
754 err = -EINVAL; 836 err = -EINVAL;
755 break; 837 break;
756 838
@@ -759,7 +841,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
759 r->com.state == RES_QP_HW) 841 r->com.state == RES_QP_HW)
760 break; 842 break;
761 else { 843 else {
762 mlx4_dbg(dev, "failed RES_QP, 0x%x\n", 844 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
763 r->com.res_id); 845 r->com.res_id);
764 err = -EINVAL; 846 err = -EINVAL;
765 } 847 }
@@ -779,7 +861,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
779 r->com.to_state = state; 861 r->com.to_state = state;
780 r->com.state = RES_QP_BUSY; 862 r->com.state = RES_QP_BUSY;
781 if (qp) 863 if (qp)
782 *qp = (struct res_qp *)r; 864 *qp = r;
783 } 865 }
784 } 866 }
785 867
@@ -797,7 +879,7 @@ static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
797 int err = 0; 879 int err = 0;
798 880
799 spin_lock_irq(mlx4_tlock(dev)); 881 spin_lock_irq(mlx4_tlock(dev));
800 r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index); 882 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
801 if (!r) 883 if (!r)
802 err = -ENOENT; 884 err = -ENOENT;
803 else if (r->com.owner != slave) 885 else if (r->com.owner != slave)
@@ -832,7 +914,7 @@ static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
832 r->com.to_state = state; 914 r->com.to_state = state;
833 r->com.state = RES_MPT_BUSY; 915 r->com.state = RES_MPT_BUSY;
834 if (mpt) 916 if (mpt)
835 *mpt = (struct res_mpt *)r; 917 *mpt = r;
836 } 918 }
837 } 919 }
838 920
@@ -850,7 +932,7 @@ static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
850 int err = 0; 932 int err = 0;
851 933
852 spin_lock_irq(mlx4_tlock(dev)); 934 spin_lock_irq(mlx4_tlock(dev));
853 r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index); 935 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
854 if (!r) 936 if (!r)
855 err = -ENOENT; 937 err = -ENOENT;
856 else if (r->com.owner != slave) 938 else if (r->com.owner != slave)
@@ -898,7 +980,7 @@ static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
898 int err; 980 int err;
899 981
900 spin_lock_irq(mlx4_tlock(dev)); 982 spin_lock_irq(mlx4_tlock(dev));
901 r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn); 983 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
902 if (!r) 984 if (!r)
903 err = -ENOENT; 985 err = -ENOENT;
904 else if (r->com.owner != slave) 986 else if (r->com.owner != slave)
@@ -952,7 +1034,7 @@ static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
952 int err = 0; 1034 int err = 0;
953 1035
954 spin_lock_irq(mlx4_tlock(dev)); 1036 spin_lock_irq(mlx4_tlock(dev));
955 r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index); 1037 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
956 if (!r) 1038 if (!r)
957 err = -ENOENT; 1039 err = -ENOENT;
958 else if (r->com.owner != slave) 1040 else if (r->com.owner != slave)
@@ -1001,7 +1083,7 @@ static void res_abort_move(struct mlx4_dev *dev, int slave,
1001 struct res_common *r; 1083 struct res_common *r;
1002 1084
1003 spin_lock_irq(mlx4_tlock(dev)); 1085 spin_lock_irq(mlx4_tlock(dev));
1004 r = radix_tree_lookup(&tracker->res_tree[type], id); 1086 r = res_tracker_lookup(&tracker->res_tree[type], id);
1005 if (r && (r->owner == slave)) 1087 if (r && (r->owner == slave))
1006 r->state = r->from_state; 1088 r->state = r->from_state;
1007 spin_unlock_irq(mlx4_tlock(dev)); 1089 spin_unlock_irq(mlx4_tlock(dev));
@@ -1015,7 +1097,7 @@ static void res_end_move(struct mlx4_dev *dev, int slave,
1015 struct res_common *r; 1097 struct res_common *r;
1016 1098
1017 spin_lock_irq(mlx4_tlock(dev)); 1099 spin_lock_irq(mlx4_tlock(dev));
1018 r = radix_tree_lookup(&tracker->res_tree[type], id); 1100 r = res_tracker_lookup(&tracker->res_tree[type], id);
1019 if (r && (r->owner == slave)) 1101 if (r && (r->owner == slave))
1020 r->state = r->to_state; 1102 r->state = r->to_state;
1021 spin_unlock_irq(mlx4_tlock(dev)); 1103 spin_unlock_irq(mlx4_tlock(dev));
@@ -2695,6 +2777,60 @@ ex_put:
2695 return err; 2777 return err;
2696} 2778}
2697 2779
2780int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2781 struct mlx4_vhcr *vhcr,
2782 struct mlx4_cmd_mailbox *inbox,
2783 struct mlx4_cmd_mailbox *outbox,
2784 struct mlx4_cmd_info *cmd)
2785{
2786 int err;
2787
2788 if (dev->caps.steering_mode !=
2789 MLX4_STEERING_MODE_DEVICE_MANAGED)
2790 return -EOPNOTSUPP;
2791
2792 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
2793 vhcr->in_modifier, 0,
2794 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
2795 MLX4_CMD_NATIVE);
2796 if (err)
2797 return err;
2798
2799 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
2800 if (err) {
2801 mlx4_err(dev, "Fail to add flow steering resources.\n ");
2802 /* detach rule*/
2803 mlx4_cmd(dev, vhcr->out_param, 0, 0,
2804 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
2805 MLX4_CMD_NATIVE);
2806 }
2807 return err;
2808}
2809
2810int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
2811 struct mlx4_vhcr *vhcr,
2812 struct mlx4_cmd_mailbox *inbox,
2813 struct mlx4_cmd_mailbox *outbox,
2814 struct mlx4_cmd_info *cmd)
2815{
2816 int err;
2817
2818 if (dev->caps.steering_mode !=
2819 MLX4_STEERING_MODE_DEVICE_MANAGED)
2820 return -EOPNOTSUPP;
2821
2822 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
2823 if (err) {
2824 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
2825 return err;
2826 }
2827
2828 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
2829 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
2830 MLX4_CMD_NATIVE);
2831 return err;
2832}
2833
2698enum { 2834enum {
2699 BUSY_MAX_RETRIES = 10 2835 BUSY_MAX_RETRIES = 10
2700}; 2836};
@@ -2751,7 +2887,7 @@ static int _move_all_busy(struct mlx4_dev *dev, int slave,
2751 if (r->state == RES_ANY_BUSY) { 2887 if (r->state == RES_ANY_BUSY) {
2752 if (print) 2888 if (print)
2753 mlx4_dbg(dev, 2889 mlx4_dbg(dev,
2754 "%s id 0x%x is busy\n", 2890 "%s id 0x%llx is busy\n",
2755 ResourceType(type), 2891 ResourceType(type),
2756 r->res_id); 2892 r->res_id);
2757 ++busy; 2893 ++busy;
@@ -2817,8 +2953,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
2817 switch (state) { 2953 switch (state) {
2818 case RES_QP_RESERVED: 2954 case RES_QP_RESERVED:
2819 spin_lock_irq(mlx4_tlock(dev)); 2955 spin_lock_irq(mlx4_tlock(dev));
2820 radix_tree_delete(&tracker->res_tree[RES_QP], 2956 rb_erase(&qp->com.node,
2821 qp->com.res_id); 2957 &tracker->res_tree[RES_QP]);
2822 list_del(&qp->com.list); 2958 list_del(&qp->com.list);
2823 spin_unlock_irq(mlx4_tlock(dev)); 2959 spin_unlock_irq(mlx4_tlock(dev));
2824 kfree(qp); 2960 kfree(qp);
@@ -2888,8 +3024,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
2888 case RES_SRQ_ALLOCATED: 3024 case RES_SRQ_ALLOCATED:
2889 __mlx4_srq_free_icm(dev, srqn); 3025 __mlx4_srq_free_icm(dev, srqn);
2890 spin_lock_irq(mlx4_tlock(dev)); 3026 spin_lock_irq(mlx4_tlock(dev));
2891 radix_tree_delete(&tracker->res_tree[RES_SRQ], 3027 rb_erase(&srq->com.node,
2892 srqn); 3028 &tracker->res_tree[RES_SRQ]);
2893 list_del(&srq->com.list); 3029 list_del(&srq->com.list);
2894 spin_unlock_irq(mlx4_tlock(dev)); 3030 spin_unlock_irq(mlx4_tlock(dev));
2895 kfree(srq); 3031 kfree(srq);
@@ -2954,8 +3090,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
2954 case RES_CQ_ALLOCATED: 3090 case RES_CQ_ALLOCATED:
2955 __mlx4_cq_free_icm(dev, cqn); 3091 __mlx4_cq_free_icm(dev, cqn);
2956 spin_lock_irq(mlx4_tlock(dev)); 3092 spin_lock_irq(mlx4_tlock(dev));
2957 radix_tree_delete(&tracker->res_tree[RES_CQ], 3093 rb_erase(&cq->com.node,
2958 cqn); 3094 &tracker->res_tree[RES_CQ]);
2959 list_del(&cq->com.list); 3095 list_del(&cq->com.list);
2960 spin_unlock_irq(mlx4_tlock(dev)); 3096 spin_unlock_irq(mlx4_tlock(dev));
2961 kfree(cq); 3097 kfree(cq);
@@ -3017,8 +3153,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3017 case RES_MPT_RESERVED: 3153 case RES_MPT_RESERVED:
3018 __mlx4_mr_release(dev, mpt->key); 3154 __mlx4_mr_release(dev, mpt->key);
3019 spin_lock_irq(mlx4_tlock(dev)); 3155 spin_lock_irq(mlx4_tlock(dev));
3020 radix_tree_delete(&tracker->res_tree[RES_MPT], 3156 rb_erase(&mpt->com.node,
3021 mptn); 3157 &tracker->res_tree[RES_MPT]);
3022 list_del(&mpt->com.list); 3158 list_del(&mpt->com.list);
3023 spin_unlock_irq(mlx4_tlock(dev)); 3159 spin_unlock_irq(mlx4_tlock(dev));
3024 kfree(mpt); 3160 kfree(mpt);
@@ -3086,8 +3222,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3086 __mlx4_free_mtt_range(dev, base, 3222 __mlx4_free_mtt_range(dev, base,
3087 mtt->order); 3223 mtt->order);
3088 spin_lock_irq(mlx4_tlock(dev)); 3224 spin_lock_irq(mlx4_tlock(dev));
3089 radix_tree_delete(&tracker->res_tree[RES_MTT], 3225 rb_erase(&mtt->com.node,
3090 base); 3226 &tracker->res_tree[RES_MTT]);
3091 list_del(&mtt->com.list); 3227 list_del(&mtt->com.list);
3092 spin_unlock_irq(mlx4_tlock(dev)); 3228 spin_unlock_irq(mlx4_tlock(dev));
3093 kfree(mtt); 3229 kfree(mtt);
@@ -3104,6 +3240,58 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3104 spin_unlock_irq(mlx4_tlock(dev)); 3240 spin_unlock_irq(mlx4_tlock(dev));
3105} 3241}
3106 3242
3243static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3244{
3245 struct mlx4_priv *priv = mlx4_priv(dev);
3246 struct mlx4_resource_tracker *tracker =
3247 &priv->mfunc.master.res_tracker;
3248 struct list_head *fs_rule_list =
3249 &tracker->slave_list[slave].res_list[RES_FS_RULE];
3250 struct res_fs_rule *fs_rule;
3251 struct res_fs_rule *tmp;
3252 int state;
3253 u64 base;
3254 int err;
3255
3256 err = move_all_busy(dev, slave, RES_FS_RULE);
3257 if (err)
3258 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3259 slave);
3260
3261 spin_lock_irq(mlx4_tlock(dev));
3262 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3263 spin_unlock_irq(mlx4_tlock(dev));
3264 if (fs_rule->com.owner == slave) {
3265 base = fs_rule->com.res_id;
3266 state = fs_rule->com.from_state;
3267 while (state != 0) {
3268 switch (state) {
3269 case RES_FS_RULE_ALLOCATED:
3270 /* detach rule */
3271 err = mlx4_cmd(dev, base, 0, 0,
3272 MLX4_QP_FLOW_STEERING_DETACH,
3273 MLX4_CMD_TIME_CLASS_A,
3274 MLX4_CMD_NATIVE);
3275
3276 spin_lock_irq(mlx4_tlock(dev));
3277 rb_erase(&fs_rule->com.node,
3278 &tracker->res_tree[RES_FS_RULE]);
3279 list_del(&fs_rule->com.list);
3280 spin_unlock_irq(mlx4_tlock(dev));
3281 kfree(fs_rule);
3282 state = 0;
3283 break;
3284
3285 default:
3286 state = 0;
3287 }
3288 }
3289 }
3290 spin_lock_irq(mlx4_tlock(dev));
3291 }
3292 spin_unlock_irq(mlx4_tlock(dev));
3293}
3294
3107static void rem_slave_eqs(struct mlx4_dev *dev, int slave) 3295static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3108{ 3296{
3109 struct mlx4_priv *priv = mlx4_priv(dev); 3297 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -3133,8 +3321,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3133 switch (state) { 3321 switch (state) {
3134 case RES_EQ_RESERVED: 3322 case RES_EQ_RESERVED:
3135 spin_lock_irq(mlx4_tlock(dev)); 3323 spin_lock_irq(mlx4_tlock(dev));
3136 radix_tree_delete(&tracker->res_tree[RES_EQ], 3324 rb_erase(&eq->com.node,
3137 eqn); 3325 &tracker->res_tree[RES_EQ]);
3138 list_del(&eq->com.list); 3326 list_del(&eq->com.list);
3139 spin_unlock_irq(mlx4_tlock(dev)); 3327 spin_unlock_irq(mlx4_tlock(dev));
3140 kfree(eq); 3328 kfree(eq);
@@ -3191,7 +3379,8 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3191 list_for_each_entry_safe(counter, tmp, counter_list, com.list) { 3379 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3192 if (counter->com.owner == slave) { 3380 if (counter->com.owner == slave) {
3193 index = counter->com.res_id; 3381 index = counter->com.res_id;
3194 radix_tree_delete(&tracker->res_tree[RES_COUNTER], index); 3382 rb_erase(&counter->com.node,
3383 &tracker->res_tree[RES_COUNTER]);
3195 list_del(&counter->com.list); 3384 list_del(&counter->com.list);
3196 kfree(counter); 3385 kfree(counter);
3197 __mlx4_counter_free(dev, index); 3386 __mlx4_counter_free(dev, index);
@@ -3220,7 +3409,7 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3220 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) { 3409 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3221 if (xrcd->com.owner == slave) { 3410 if (xrcd->com.owner == slave) {
3222 xrcdn = xrcd->com.res_id; 3411 xrcdn = xrcd->com.res_id;
3223 radix_tree_delete(&tracker->res_tree[RES_XRCD], xrcdn); 3412 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
3224 list_del(&xrcd->com.list); 3413 list_del(&xrcd->com.list);
3225 kfree(xrcd); 3414 kfree(xrcd);
3226 __mlx4_xrcd_free(dev, xrcdn); 3415 __mlx4_xrcd_free(dev, xrcdn);
@@ -3244,5 +3433,6 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3244 rem_slave_mtts(dev, slave); 3433 rem_slave_mtts(dev, slave);
3245 rem_slave_counters(dev, slave); 3434 rem_slave_counters(dev, slave);
3246 rem_slave_xrcdns(dev, slave); 3435 rem_slave_xrcdns(dev, slave);
3436 rem_slave_fs_rule(dev, slave);
3247 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 3437 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3248} 3438}
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 5ffde23ac8f..59ef568d5dd 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -16,8 +16,7 @@
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */ 17 */
18 18
19/** 19/* Supports:
20 * Supports:
21 * KS8851 16bit MLL chip from Micrel Inc. 20 * KS8851 16bit MLL chip from Micrel Inc.
22 */ 21 */
23 22
@@ -35,7 +34,7 @@
35#include <linux/platform_device.h> 34#include <linux/platform_device.h>
36#include <linux/delay.h> 35#include <linux/delay.h>
37#include <linux/slab.h> 36#include <linux/slab.h>
38#include <asm/io.h> 37#include <linux/ks8851_mll.h>
39 38
40#define DRV_NAME "ks8851_mll" 39#define DRV_NAME "ks8851_mll"
41 40
@@ -465,8 +464,7 @@ static int msg_enable;
465#define BE1 0x2000 /* Byte Enable 1 */ 464#define BE1 0x2000 /* Byte Enable 1 */
466#define BE0 0x1000 /* Byte Enable 0 */ 465#define BE0 0x1000 /* Byte Enable 0 */
467 466
468/** 467/* register read/write calls.
469 * register read/write calls.
470 * 468 *
471 * All these calls issue transactions to access the chip's registers. They 469 * All these calls issue transactions to access the chip's registers. They
472 * all require that the necessary lock is held to prevent accesses when the 470 * all require that the necessary lock is held to prevent accesses when the
@@ -1103,7 +1101,7 @@ static void ks_set_grpaddr(struct ks_net *ks)
1103 } 1101 }
1104} /* ks_set_grpaddr */ 1102} /* ks_set_grpaddr */
1105 1103
1106/* 1104/**
1107* ks_clear_mcast - clear multicast information 1105* ks_clear_mcast - clear multicast information
1108* 1106*
1109* @ks : The chip information 1107* @ks : The chip information
@@ -1515,6 +1513,7 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
1515 struct net_device *netdev; 1513 struct net_device *netdev;
1516 struct ks_net *ks; 1514 struct ks_net *ks;
1517 u16 id, data; 1515 u16 id, data;
1516 struct ks8851_mll_platform_data *pdata;
1518 1517
1519 io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1518 io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1520 io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1519 io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -1596,17 +1595,27 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
1596 ks_disable_qmu(ks); 1595 ks_disable_qmu(ks);
1597 ks_setup(ks); 1596 ks_setup(ks);
1598 ks_setup_int(ks); 1597 ks_setup_int(ks);
1599 memcpy(netdev->dev_addr, ks->mac_addr, 6);
1600 1598
1601 data = ks_rdreg16(ks, KS_OBCR); 1599 data = ks_rdreg16(ks, KS_OBCR);
1602 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA); 1600 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
1603 1601
1604 /** 1602 /* overwriting the default MAC address */
1605 * If you want to use the default MAC addr, 1603 pdata = pdev->dev.platform_data;
1606 * comment out the 2 functions below. 1604 if (!pdata) {
1607 */ 1605 netdev_err(netdev, "No platform data\n");
1606 err = -ENODEV;
1607 goto err_pdata;
1608 }
1609 memcpy(ks->mac_addr, pdata->mac_addr, 6);
1610 if (!is_valid_ether_addr(ks->mac_addr)) {
1611 /* Use random MAC address if none passed */
1612 random_ether_addr(ks->mac_addr);
1613 netdev_info(netdev, "Using random mac address\n");
1614 }
1615 netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
1616
1617 memcpy(netdev->dev_addr, ks->mac_addr, 6);
1608 1618
1609 random_ether_addr(netdev->dev_addr);
1610 ks_set_mac(ks, netdev->dev_addr); 1619 ks_set_mac(ks, netdev->dev_addr);
1611 1620
1612 id = ks_rdreg16(ks, KS_CIDER); 1621 id = ks_rdreg16(ks, KS_CIDER);
@@ -1615,6 +1624,8 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
1615 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7); 1624 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1616 return 0; 1625 return 0;
1617 1626
1627err_pdata:
1628 unregister_netdev(netdev);
1618err_register: 1629err_register:
1619err_get_irq: 1630err_get_irq:
1620 iounmap(ks->hw_addr_cmd); 1631 iounmap(ks->hw_addr_cmd);
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index eaf9ff0262a..37b44b91950 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -3913,7 +3913,7 @@ static void hw_start_rx(struct ksz_hw *hw)
3913 hw->rx_stop = 2; 3913 hw->rx_stop = 2;
3914} 3914}
3915 3915
3916/* 3916/**
3917 * hw_stop_rx - stop receiving 3917 * hw_stop_rx - stop receiving
3918 * @hw: The hardware instance. 3918 * @hw: The hardware instance.
3919 * 3919 *
@@ -4480,14 +4480,12 @@ static void ksz_init_rx_buffers(struct dev_info *adapter)
4480 dma_buf->len = adapter->mtu; 4480 dma_buf->len = adapter->mtu;
4481 if (!dma_buf->skb) 4481 if (!dma_buf->skb)
4482 dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC); 4482 dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC);
4483 if (dma_buf->skb && !dma_buf->dma) { 4483 if (dma_buf->skb && !dma_buf->dma)
4484 dma_buf->skb->dev = adapter->dev;
4485 dma_buf->dma = pci_map_single( 4484 dma_buf->dma = pci_map_single(
4486 adapter->pdev, 4485 adapter->pdev,
4487 skb_tail_pointer(dma_buf->skb), 4486 skb_tail_pointer(dma_buf->skb),
4488 dma_buf->len, 4487 dma_buf->len,
4489 PCI_DMA_FROMDEVICE); 4488 PCI_DMA_FROMDEVICE);
4490 }
4491 4489
4492 /* Set descriptor. */ 4490 /* Set descriptor. */
4493 set_rx_buf(desc, dma_buf->dma); 4491 set_rx_buf(desc, dma_buf->dma);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 90153fc983c..fa85cf1353f 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3775,7 +3775,7 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
3775 3775
3776 mgp->num_slices = 1; 3776 mgp->num_slices = 1;
3777 msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 3777 msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
3778 ncpus = num_online_cpus(); 3778 ncpus = netif_get_num_default_rss_queues();
3779 3779
3780 if (myri10ge_max_slices == 1 || msix_cap == 0 || 3780 if (myri10ge_max_slices == 1 || msix_cap == 0 ||
3781 (myri10ge_max_slices == -1 && ncpus < 2)) 3781 (myri10ge_max_slices == -1 && ncpus < 2))
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index bb367582c1e..d958c229937 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -3377,7 +3377,7 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3377 } while (cnt < 20); 3377 } while (cnt < 20);
3378 return ret; 3378 return ret;
3379} 3379}
3380/* 3380/**
3381 * check_pci_device_id - Checks if the device id is supported 3381 * check_pci_device_id - Checks if the device id is supported
3382 * @id : device id 3382 * @id : device id
3383 * Description: Function to check if the pci device id is supported by driver. 3383 * Description: Function to check if the pci device id is supported by driver.
@@ -5238,7 +5238,7 @@ static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5238} 5238}
5239 5239
5240/** 5240/**
5241 * s2io_set_mac_addr driver entry point 5241 * s2io_set_mac_addr - driver entry point
5242 */ 5242 */
5243 5243
5244static int s2io_set_mac_addr(struct net_device *dev, void *p) 5244static int s2io_set_mac_addr(struct net_device *dev, void *p)
@@ -6088,7 +6088,7 @@ static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6088} 6088}
6089 6089
6090/** 6090/**
6091 * s2io-link_test - verifies the link state of the nic 6091 * s2io_link_test - verifies the link state of the nic
6092 * @sp ; private member of the device structure, which is a pointer to the 6092 * @sp ; private member of the device structure, which is a pointer to the
6093 * s2io_nic structure. 6093 * s2io_nic structure.
6094 * @data: variable that returns the result of each of the test conducted by 6094 * @data: variable that returns the result of each of the test conducted by
@@ -6116,9 +6116,9 @@ static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6116 6116
6117/** 6117/**
6118 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC 6118 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6119 * @sp - private member of the device structure, which is a pointer to the 6119 * @sp: private member of the device structure, which is a pointer to the
6120 * s2io_nic structure. 6120 * s2io_nic structure.
6121 * @data - variable that returns the result of each of the test 6121 * @data: variable that returns the result of each of the test
6122 * conducted by the driver. 6122 * conducted by the driver.
6123 * Description: 6123 * Description:
6124 * This is one of the offline test that tests the read and write 6124 * This is one of the offline test that tests the read and write
@@ -6946,9 +6946,9 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp)
6946 if (sp->rxd_mode == RXD_MODE_3B) 6946 if (sp->rxd_mode == RXD_MODE_3B)
6947 ba = &ring->ba[j][k]; 6947 ba = &ring->ba[j][k];
6948 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb, 6948 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6949 (u64 *)&temp0_64, 6949 &temp0_64,
6950 (u64 *)&temp1_64, 6950 &temp1_64,
6951 (u64 *)&temp2_64, 6951 &temp2_64,
6952 size) == -ENOMEM) { 6952 size) == -ENOMEM) {
6953 return 0; 6953 return 0;
6954 } 6954 }
@@ -7149,7 +7149,7 @@ static int s2io_card_up(struct s2io_nic *sp)
7149 int i, ret = 0; 7149 int i, ret = 0;
7150 struct config_param *config; 7150 struct config_param *config;
7151 struct mac_info *mac_control; 7151 struct mac_info *mac_control;
7152 struct net_device *dev = (struct net_device *)sp->dev; 7152 struct net_device *dev = sp->dev;
7153 u16 interruptible; 7153 u16 interruptible;
7154 7154
7155 /* Initialize the H/W I/O registers */ 7155 /* Initialize the H/W I/O registers */
@@ -7325,7 +7325,7 @@ static void s2io_tx_watchdog(struct net_device *dev)
7325static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) 7325static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7326{ 7326{
7327 struct s2io_nic *sp = ring_data->nic; 7327 struct s2io_nic *sp = ring_data->nic;
7328 struct net_device *dev = (struct net_device *)ring_data->dev; 7328 struct net_device *dev = ring_data->dev;
7329 struct sk_buff *skb = (struct sk_buff *) 7329 struct sk_buff *skb = (struct sk_buff *)
7330 ((unsigned long)rxdp->Host_Control); 7330 ((unsigned long)rxdp->Host_Control);
7331 int ring_no = ring_data->ring_no; 7331 int ring_no = ring_data->ring_no;
@@ -7508,7 +7508,7 @@ aggregate:
7508 7508
7509static void s2io_link(struct s2io_nic *sp, int link) 7509static void s2io_link(struct s2io_nic *sp, int link)
7510{ 7510{
7511 struct net_device *dev = (struct net_device *)sp->dev; 7511 struct net_device *dev = sp->dev;
7512 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; 7512 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7513 7513
7514 if (link != sp->last_link_state) { 7514 if (link != sp->last_link_state) {
@@ -8280,7 +8280,7 @@ static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8280 return -1; 8280 return -1;
8281 } 8281 }
8282 8282
8283 *ip = (struct iphdr *)((u8 *)buffer + ip_off); 8283 *ip = (struct iphdr *)(buffer + ip_off);
8284 ip_len = (u8)((*ip)->ihl); 8284 ip_len = (u8)((*ip)->ihl);
8285 ip_len <<= 2; 8285 ip_len <<= 2;
8286 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len); 8286 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 98e2c10ae08..32d06824fe3 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -2346,7 +2346,7 @@ void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
2346 2346
2347 for (i = 0; i < nreq; i++) 2347 for (i = 0; i < nreq; i++)
2348 vxge_os_dma_malloc_async( 2348 vxge_os_dma_malloc_async(
2349 ((struct __vxge_hw_device *)blockpool->hldev)->pdev, 2349 (blockpool->hldev)->pdev,
2350 blockpool->hldev, VXGE_HW_BLOCK_SIZE); 2350 blockpool->hldev, VXGE_HW_BLOCK_SIZE);
2351} 2351}
2352 2352
@@ -2428,13 +2428,13 @@ __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
2428 break; 2428 break;
2429 2429
2430 pci_unmap_single( 2430 pci_unmap_single(
2431 ((struct __vxge_hw_device *)blockpool->hldev)->pdev, 2431 (blockpool->hldev)->pdev,
2432 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, 2432 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
2433 ((struct __vxge_hw_blockpool_entry *)p)->length, 2433 ((struct __vxge_hw_blockpool_entry *)p)->length,
2434 PCI_DMA_BIDIRECTIONAL); 2434 PCI_DMA_BIDIRECTIONAL);
2435 2435
2436 vxge_os_dma_free( 2436 vxge_os_dma_free(
2437 ((struct __vxge_hw_device *)blockpool->hldev)->pdev, 2437 (blockpool->hldev)->pdev,
2438 ((struct __vxge_hw_blockpool_entry *)p)->memblock, 2438 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
2439 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); 2439 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
2440 2440
@@ -4059,7 +4059,7 @@ __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
4059 enum vxge_hw_status status = VXGE_HW_OK; 4059 enum vxge_hw_status status = VXGE_HW_OK;
4060 struct __vxge_hw_virtualpath *vpath; 4060 struct __vxge_hw_virtualpath *vpath;
4061 4061
4062 vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id]; 4062 vpath = &hldev->virtual_paths[vp_id];
4063 4063
4064 if (vpath->ringh) { 4064 if (vpath->ringh) {
4065 status = __vxge_hw_ring_reset(vpath->ringh); 4065 status = __vxge_hw_ring_reset(vpath->ringh);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
index 5046a64f0fe..9e0c1eed5dc 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
@@ -1922,7 +1922,7 @@ realloc:
1922 /* misaligned, free current one and try allocating 1922 /* misaligned, free current one and try allocating
1923 * size + VXGE_CACHE_LINE_SIZE memory 1923 * size + VXGE_CACHE_LINE_SIZE memory
1924 */ 1924 */
1925 kfree((void *) vaddr); 1925 kfree(vaddr);
1926 size += VXGE_CACHE_LINE_SIZE; 1926 size += VXGE_CACHE_LINE_SIZE;
1927 realloc_flag = 1; 1927 realloc_flag = 1;
1928 goto realloc; 1928 goto realloc;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 51387c31914..4e20c5f0271 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1134,7 +1134,7 @@ static void vxge_set_multicast(struct net_device *dev)
1134 "%s:%d", __func__, __LINE__); 1134 "%s:%d", __func__, __LINE__);
1135 1135
1136 vdev = netdev_priv(dev); 1136 vdev = netdev_priv(dev);
1137 hldev = (struct __vxge_hw_device *)vdev->devh; 1137 hldev = vdev->devh;
1138 1138
1139 if (unlikely(!is_vxge_card_up(vdev))) 1139 if (unlikely(!is_vxge_card_up(vdev)))
1140 return; 1140 return;
@@ -3687,7 +3687,8 @@ static int __devinit vxge_config_vpaths(
3687 return 0; 3687 return 0;
3688 3688
3689 if (!driver_config->g_no_cpus) 3689 if (!driver_config->g_no_cpus)
3690 driver_config->g_no_cpus = num_online_cpus(); 3690 driver_config->g_no_cpus =
3691 netif_get_num_default_rss_queues();
3691 3692
3692 driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1; 3693 driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
3693 if (!driver_config->vpath_per_dev) 3694 if (!driver_config->vpath_per_dev)
@@ -3989,16 +3990,16 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3989 continue; 3990 continue;
3990 vxge_debug_ll_config(VXGE_TRACE, 3991 vxge_debug_ll_config(VXGE_TRACE,
3991 "%s: MTU size - %d", vdev->ndev->name, 3992 "%s: MTU size - %d", vdev->ndev->name,
3992 ((struct __vxge_hw_device *)(vdev->devh))-> 3993 ((vdev->devh))->
3993 config.vp_config[i].mtu); 3994 config.vp_config[i].mtu);
3994 vxge_debug_init(VXGE_TRACE, 3995 vxge_debug_init(VXGE_TRACE,
3995 "%s: VLAN tag stripping %s", vdev->ndev->name, 3996 "%s: VLAN tag stripping %s", vdev->ndev->name,
3996 ((struct __vxge_hw_device *)(vdev->devh))-> 3997 ((vdev->devh))->
3997 config.vp_config[i].rpa_strip_vlan_tag 3998 config.vp_config[i].rpa_strip_vlan_tag
3998 ? "Enabled" : "Disabled"); 3999 ? "Enabled" : "Disabled");
3999 vxge_debug_ll_config(VXGE_TRACE, 4000 vxge_debug_ll_config(VXGE_TRACE,
4000 "%s: Max frags : %d", vdev->ndev->name, 4001 "%s: Max frags : %d", vdev->ndev->name,
4001 ((struct __vxge_hw_device *)(vdev->devh))-> 4002 ((vdev->devh))->
4002 config.vp_config[i].fifo.max_frags); 4003 config.vp_config[i].fifo.max_frags);
4003 break; 4004 break;
4004 } 4005 }
@@ -4260,9 +4261,7 @@ static int vxge_probe_fw_update(struct vxgedev *vdev)
4260 if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) > 4261 if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
4261 VXGE_FW_VER(maj, min, 0)) { 4262 VXGE_FW_VER(maj, min, 0)) {
4262 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to" 4263 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
4263 " be used with this driver.\n" 4264 " be used with this driver.",
4264 "Please get the latest version from "
4265 "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
4266 VXGE_DRIVER_NAME, maj, min, bld); 4265 VXGE_DRIVER_NAME, maj, min, bld);
4267 return -EINVAL; 4266 return -EINVAL;
4268 } 4267 }
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
index 35f3e7552ec..36ca40f8f24 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
@@ -430,8 +430,7 @@ void vxge_initialize_ethtool_ops(struct net_device *ndev);
430enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev); 430enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
431int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override); 431int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
432 432
433/** 433/* #define VXGE_DEBUG_INIT: debug for initialization functions
434 * #define VXGE_DEBUG_INIT: debug for initialization functions
435 * #define VXGE_DEBUG_TX : debug transmit related functions 434 * #define VXGE_DEBUG_TX : debug transmit related functions
436 * #define VXGE_DEBUG_RX : debug recevice related functions 435 * #define VXGE_DEBUG_RX : debug recevice related functions
437 * #define VXGE_DEBUG_MEM : debug memory module 436 * #define VXGE_DEBUG_MEM : debug memory module
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index 5954fa264da..99749bd07d7 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -533,8 +533,7 @@ __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
533 533
534 /* notify driver */ 534 /* notify driver */
535 if (hldev->uld_callbacks->crit_err) 535 if (hldev->uld_callbacks->crit_err)
536 hldev->uld_callbacks->crit_err( 536 hldev->uld_callbacks->crit_err(hldev,
537 (struct __vxge_hw_device *)hldev,
538 type, vp_id); 537 type, vp_id);
539out: 538out:
540 539
@@ -1322,7 +1321,7 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1322 /* check whether it is not the end */ 1321 /* check whether it is not the end */
1323 if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) { 1322 if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
1324 1323
1325 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control != 1324 vxge_assert((rxdp)->host_control !=
1326 0); 1325 0);
1327 1326
1328 ++ring->cmpl_cnt; 1327 ++ring->cmpl_cnt;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 928913c4f3f..8b7c5129c7e 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3218,7 +3218,7 @@ static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
3218} 3218}
3219 3219
3220/** 3220/**
3221 * nv_update_linkspeed: Setup the MAC according to the link partner 3221 * nv_update_linkspeed - Setup the MAC according to the link partner
3222 * @dev: Network device to be configured 3222 * @dev: Network device to be configured
3223 * 3223 *
3224 * The function queries the PHY and checks if there is a link partner. 3224 * The function queries the PHY and checks if there is a link partner.
@@ -3552,8 +3552,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3552 return IRQ_HANDLED; 3552 return IRQ_HANDLED;
3553} 3553}
3554 3554
3555/** 3555/* All _optimized functions are used to help increase performance
3556 * All _optimized functions are used to help increase performance
3557 * (reduce CPU and increase throughput). They use descripter version 3, 3556 * (reduce CPU and increase throughput). They use descripter version 3,
3558 * compiler directives, and reduce memory accesses. 3557 * compiler directives, and reduce memory accesses.
3559 */ 3558 */
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 083d6715335..e7d2496a473 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -52,7 +52,6 @@
52 52
53#define MODNAME "lpc-eth" 53#define MODNAME "lpc-eth"
54#define DRV_VERSION "1.00" 54#define DRV_VERSION "1.00"
55#define PHYDEF_ADDR 0x00
56 55
57#define ENET_MAXF_SIZE 1536 56#define ENET_MAXF_SIZE 1536
58#define ENET_RX_DESC 48 57#define ENET_RX_DESC 48
@@ -416,9 +415,6 @@ static bool use_iram_for_net(struct device *dev)
416#define TXDESC_CONTROL_LAST (1 << 30) 415#define TXDESC_CONTROL_LAST (1 << 30)
417#define TXDESC_CONTROL_INT (1 << 31) 416#define TXDESC_CONTROL_INT (1 << 31)
418 417
419static int lpc_eth_hard_start_xmit(struct sk_buff *skb,
420 struct net_device *ndev);
421
422/* 418/*
423 * Structure of a TX/RX descriptors and RX status 419 * Structure of a TX/RX descriptors and RX status
424 */ 420 */
@@ -440,7 +436,7 @@ struct netdata_local {
440 spinlock_t lock; 436 spinlock_t lock;
441 void __iomem *net_base; 437 void __iomem *net_base;
442 u32 msg_enable; 438 u32 msg_enable;
443 struct sk_buff *skb[ENET_TX_DESC]; 439 unsigned int skblen[ENET_TX_DESC];
444 unsigned int last_tx_idx; 440 unsigned int last_tx_idx;
445 unsigned int num_used_tx_buffs; 441 unsigned int num_used_tx_buffs;
446 struct mii_bus *mii_bus; 442 struct mii_bus *mii_bus;
@@ -903,12 +899,11 @@ err_out:
903static void __lpc_handle_xmit(struct net_device *ndev) 899static void __lpc_handle_xmit(struct net_device *ndev)
904{ 900{
905 struct netdata_local *pldat = netdev_priv(ndev); 901 struct netdata_local *pldat = netdev_priv(ndev);
906 struct sk_buff *skb;
907 u32 txcidx, *ptxstat, txstat; 902 u32 txcidx, *ptxstat, txstat;
908 903
909 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); 904 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
910 while (pldat->last_tx_idx != txcidx) { 905 while (pldat->last_tx_idx != txcidx) {
911 skb = pldat->skb[pldat->last_tx_idx]; 906 unsigned int skblen = pldat->skblen[pldat->last_tx_idx];
912 907
913 /* A buffer is available, get buffer status */ 908 /* A buffer is available, get buffer status */
914 ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx]; 909 ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx];
@@ -945,9 +940,8 @@ static void __lpc_handle_xmit(struct net_device *ndev)
945 } else { 940 } else {
946 /* Update stats */ 941 /* Update stats */
947 ndev->stats.tx_packets++; 942 ndev->stats.tx_packets++;
948 ndev->stats.tx_bytes += skb->len; 943 ndev->stats.tx_bytes += skblen;
949 } 944 }
950 dev_kfree_skb_irq(skb);
951 945
952 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); 946 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
953 } 947 }
@@ -1132,7 +1126,7 @@ static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1132 memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len); 1126 memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len);
1133 1127
1134 /* Save the buffer and increment the buffer counter */ 1128 /* Save the buffer and increment the buffer counter */
1135 pldat->skb[txidx] = skb; 1129 pldat->skblen[txidx] = len;
1136 pldat->num_used_tx_buffs++; 1130 pldat->num_used_tx_buffs++;
1137 1131
1138 /* Start transmit */ 1132 /* Start transmit */
@@ -1147,6 +1141,7 @@ static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1147 1141
1148 spin_unlock_irq(&pldat->lock); 1142 spin_unlock_irq(&pldat->lock);
1149 1143
1144 dev_kfree_skb(skb);
1150 return NETDEV_TX_OK; 1145 return NETDEV_TX_OK;
1151} 1146}
1152 1147
@@ -1442,7 +1437,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1442 res->start); 1437 res->start);
1443 netdev_dbg(ndev, "IO address size :%d\n", 1438 netdev_dbg(ndev, "IO address size :%d\n",
1444 res->end - res->start + 1); 1439 res->end - res->start + 1);
1445 netdev_err(ndev, "IO address (mapped) :0x%p\n", 1440 netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
1446 pldat->net_base); 1441 pldat->net_base);
1447 netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq); 1442 netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
1448 netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size); 1443 netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
index e48f084ad22..5ae03e815ee 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
@@ -60,7 +60,7 @@ static void pch_gbe_plat_get_bus_info(struct pch_gbe_hw *hw)
60/** 60/**
61 * pch_gbe_plat_init_hw - Initialize hardware 61 * pch_gbe_plat_init_hw - Initialize hardware
62 * @hw: Pointer to the HW structure 62 * @hw: Pointer to the HW structure
63 * Returns 63 * Returns:
64 * 0: Successfully 64 * 0: Successfully
65 * Negative value: Failed-EBUSY 65 * Negative value: Failed-EBUSY
66 */ 66 */
@@ -108,7 +108,7 @@ static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw)
108/** 108/**
109 * pch_gbe_hal_setup_init_funcs - Initializes function pointers 109 * pch_gbe_hal_setup_init_funcs - Initializes function pointers
110 * @hw: Pointer to the HW structure 110 * @hw: Pointer to the HW structure
111 * Returns 111 * Returns:
112 * 0: Successfully 112 * 0: Successfully
113 * ENOSYS: Function is not registered 113 * ENOSYS: Function is not registered
114 */ 114 */
@@ -137,7 +137,7 @@ inline void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw)
137/** 137/**
138 * pch_gbe_hal_init_hw - Initialize hardware 138 * pch_gbe_hal_init_hw - Initialize hardware
139 * @hw: Pointer to the HW structure 139 * @hw: Pointer to the HW structure
140 * Returns 140 * Returns:
141 * 0: Successfully 141 * 0: Successfully
142 * ENOSYS: Function is not registered 142 * ENOSYS: Function is not registered
143 */ 143 */
@@ -155,7 +155,7 @@ inline s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw)
155 * @hw: Pointer to the HW structure 155 * @hw: Pointer to the HW structure
156 * @offset: The register to read 156 * @offset: The register to read
157 * @data: The buffer to store the 16-bit read. 157 * @data: The buffer to store the 16-bit read.
158 * Returns 158 * Returns:
159 * 0: Successfully 159 * 0: Successfully
160 * Negative value: Failed 160 * Negative value: Failed
161 */ 161 */
@@ -172,7 +172,7 @@ inline s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset,
172 * @hw: Pointer to the HW structure 172 * @hw: Pointer to the HW structure
173 * @offset: The register to read 173 * @offset: The register to read
174 * @data: The value to write. 174 * @data: The value to write.
175 * Returns 175 * Returns:
176 * 0: Successfully 176 * 0: Successfully
177 * Negative value: Failed 177 * Negative value: Failed
178 */ 178 */
@@ -211,7 +211,7 @@ inline void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw)
211/** 211/**
212 * pch_gbe_hal_read_mac_addr - Reads MAC address 212 * pch_gbe_hal_read_mac_addr - Reads MAC address
213 * @hw: Pointer to the HW structure 213 * @hw: Pointer to the HW structure
214 * Returns 214 * Returns:
215 * 0: Successfully 215 * 0: Successfully
216 * ENOSYS: Function is not registered 216 * ENOSYS: Function is not registered
217 */ 217 */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index ac4e72d529e..9dbf38c10a6 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -77,7 +77,7 @@ static const struct pch_gbe_stats pch_gbe_gstrings_stats[] = {
77 * pch_gbe_get_settings - Get device-specific settings 77 * pch_gbe_get_settings - Get device-specific settings
78 * @netdev: Network interface device structure 78 * @netdev: Network interface device structure
79 * @ecmd: Ethtool command 79 * @ecmd: Ethtool command
80 * Returns 80 * Returns:
81 * 0: Successful. 81 * 0: Successful.
82 * Negative value: Failed. 82 * Negative value: Failed.
83 */ 83 */
@@ -100,7 +100,7 @@ static int pch_gbe_get_settings(struct net_device *netdev,
100 * pch_gbe_set_settings - Set device-specific settings 100 * pch_gbe_set_settings - Set device-specific settings
101 * @netdev: Network interface device structure 101 * @netdev: Network interface device structure
102 * @ecmd: Ethtool command 102 * @ecmd: Ethtool command
103 * Returns 103 * Returns:
104 * 0: Successful. 104 * 0: Successful.
105 * Negative value: Failed. 105 * Negative value: Failed.
106 */ 106 */
@@ -220,7 +220,7 @@ static void pch_gbe_get_wol(struct net_device *netdev,
220 * pch_gbe_set_wol - Turn Wake-on-Lan on or off 220 * pch_gbe_set_wol - Turn Wake-on-Lan on or off
221 * @netdev: Network interface device structure 221 * @netdev: Network interface device structure
222 * @wol: Pointer of wake-on-Lan information straucture 222 * @wol: Pointer of wake-on-Lan information straucture
223 * Returns 223 * Returns:
224 * 0: Successful. 224 * 0: Successful.
225 * Negative value: Failed. 225 * Negative value: Failed.
226 */ 226 */
@@ -248,7 +248,7 @@ static int pch_gbe_set_wol(struct net_device *netdev,
248/** 248/**
249 * pch_gbe_nway_reset - Restart autonegotiation 249 * pch_gbe_nway_reset - Restart autonegotiation
250 * @netdev: Network interface device structure 250 * @netdev: Network interface device structure
251 * Returns 251 * Returns:
252 * 0: Successful. 252 * 0: Successful.
253 * Negative value: Failed. 253 * Negative value: Failed.
254 */ 254 */
@@ -398,7 +398,7 @@ static void pch_gbe_get_pauseparam(struct net_device *netdev,
398 * pch_gbe_set_pauseparam - Set pause paramters 398 * pch_gbe_set_pauseparam - Set pause paramters
399 * @netdev: Network interface device structure 399 * @netdev: Network interface device structure
400 * @pause: Pause parameters structure 400 * @pause: Pause parameters structure
401 * Returns 401 * Returns:
402 * 0: Successful. 402 * 0: Successful.
403 * Negative value: Failed. 403 * Negative value: Failed.
404 */ 404 */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 3787c64ee71..b1006563f73 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -301,7 +301,7 @@ inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
301/** 301/**
302 * pch_gbe_mac_read_mac_addr - Read MAC address 302 * pch_gbe_mac_read_mac_addr - Read MAC address
303 * @hw: Pointer to the HW structure 303 * @hw: Pointer to the HW structure
304 * Returns 304 * Returns:
305 * 0: Successful. 305 * 0: Successful.
306 */ 306 */
307s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw) 307s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
@@ -483,7 +483,7 @@ static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
483/** 483/**
484 * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings 484 * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
485 * @hw: Pointer to the HW structure 485 * @hw: Pointer to the HW structure
486 * Returns 486 * Returns:
487 * 0: Successful. 487 * 0: Successful.
488 * Negative value: Failed. 488 * Negative value: Failed.
489 */ 489 */
@@ -639,7 +639,7 @@ static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
639/** 639/**
640 * pch_gbe_alloc_queues - Allocate memory for all rings 640 * pch_gbe_alloc_queues - Allocate memory for all rings
641 * @adapter: Board private structure to initialize 641 * @adapter: Board private structure to initialize
642 * Returns 642 * Returns:
643 * 0: Successfully 643 * 0: Successfully
644 * Negative value: Failed 644 * Negative value: Failed
645 */ 645 */
@@ -670,7 +670,7 @@ static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
670/** 670/**
671 * pch_gbe_init_phy - Initialize PHY 671 * pch_gbe_init_phy - Initialize PHY
672 * @adapter: Board private structure to initialize 672 * @adapter: Board private structure to initialize
673 * Returns 673 * Returns:
674 * 0: Successfully 674 * 0: Successfully
675 * Negative value: Failed 675 * Negative value: Failed
676 */ 676 */
@@ -720,7 +720,7 @@ static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
720 * @netdev: Network interface device structure 720 * @netdev: Network interface device structure
721 * @addr: Phy ID 721 * @addr: Phy ID
722 * @reg: Access location 722 * @reg: Access location
723 * Returns 723 * Returns:
724 * 0: Successfully 724 * 0: Successfully
725 * Negative value: Failed 725 * Negative value: Failed
726 */ 726 */
@@ -1364,7 +1364,7 @@ static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
1364 * pch_gbe_intr - Interrupt Handler 1364 * pch_gbe_intr - Interrupt Handler
1365 * @irq: Interrupt number 1365 * @irq: Interrupt number
1366 * @data: Pointer to a network interface device structure 1366 * @data: Pointer to a network interface device structure
1367 * Returns 1367 * Returns:
1368 * - IRQ_HANDLED: Our interrupt 1368 * - IRQ_HANDLED: Our interrupt
1369 * - IRQ_NONE: Not our interrupt 1369 * - IRQ_NONE: Not our interrupt
1370 */ 1370 */
@@ -1566,7 +1566,7 @@ static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
1566 * pch_gbe_clean_tx - Reclaim resources after transmit completes 1566 * pch_gbe_clean_tx - Reclaim resources after transmit completes
1567 * @adapter: Board private structure 1567 * @adapter: Board private structure
1568 * @tx_ring: Tx descriptor ring 1568 * @tx_ring: Tx descriptor ring
1569 * Returns 1569 * Returns:
1570 * true: Cleaned the descriptor 1570 * true: Cleaned the descriptor
1571 * false: Not cleaned the descriptor 1571 * false: Not cleaned the descriptor
1572 */ 1572 */
@@ -1660,7 +1660,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1660 * @rx_ring: Rx descriptor ring 1660 * @rx_ring: Rx descriptor ring
1661 * @work_done: Completed count 1661 * @work_done: Completed count
1662 * @work_to_do: Request count 1662 * @work_to_do: Request count
1663 * Returns 1663 * Returns:
1664 * true: Cleaned the descriptor 1664 * true: Cleaned the descriptor
1665 * false: Not cleaned the descriptor 1665 * false: Not cleaned the descriptor
1666 */ 1666 */
@@ -1775,7 +1775,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1775 * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors) 1775 * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
1776 * @adapter: Board private structure 1776 * @adapter: Board private structure
1777 * @tx_ring: Tx descriptor ring (for a specific queue) to setup 1777 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
1778 * Returns 1778 * Returns:
1779 * 0: Successfully 1779 * 0: Successfully
1780 * Negative value: Failed 1780 * Negative value: Failed
1781 */ 1781 */
@@ -1822,7 +1822,7 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1822 * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors) 1822 * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
1823 * @adapter: Board private structure 1823 * @adapter: Board private structure
1824 * @rx_ring: Rx descriptor ring (for a specific queue) to setup 1824 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1825 * Returns 1825 * Returns:
1826 * 0: Successfully 1826 * 0: Successfully
1827 * Negative value: Failed 1827 * Negative value: Failed
1828 */ 1828 */
@@ -1899,7 +1899,7 @@ void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
1899/** 1899/**
1900 * pch_gbe_request_irq - Allocate an interrupt line 1900 * pch_gbe_request_irq - Allocate an interrupt line
1901 * @adapter: Board private structure 1901 * @adapter: Board private structure
1902 * Returns 1902 * Returns:
1903 * 0: Successfully 1903 * 0: Successfully
1904 * Negative value: Failed 1904 * Negative value: Failed
1905 */ 1905 */
@@ -1932,7 +1932,7 @@ static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1932/** 1932/**
1933 * pch_gbe_up - Up GbE network device 1933 * pch_gbe_up - Up GbE network device
1934 * @adapter: Board private structure 1934 * @adapter: Board private structure
1935 * Returns 1935 * Returns:
1936 * 0: Successfully 1936 * 0: Successfully
1937 * Negative value: Failed 1937 * Negative value: Failed
1938 */ 1938 */
@@ -2018,7 +2018,7 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)
2018/** 2018/**
2019 * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter) 2019 * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
2020 * @adapter: Board private structure to initialize 2020 * @adapter: Board private structure to initialize
2021 * Returns 2021 * Returns:
2022 * 0: Successfully 2022 * 0: Successfully
2023 * Negative value: Failed 2023 * Negative value: Failed
2024 */ 2024 */
@@ -2057,7 +2057,7 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
2057/** 2057/**
2058 * pch_gbe_open - Called when a network interface is made active 2058 * pch_gbe_open - Called when a network interface is made active
2059 * @netdev: Network interface device structure 2059 * @netdev: Network interface device structure
2060 * Returns 2060 * Returns:
2061 * 0: Successfully 2061 * 0: Successfully
2062 * Negative value: Failed 2062 * Negative value: Failed
2063 */ 2063 */
@@ -2097,7 +2097,7 @@ err_setup_tx:
2097/** 2097/**
2098 * pch_gbe_stop - Disables a network interface 2098 * pch_gbe_stop - Disables a network interface
2099 * @netdev: Network interface device structure 2099 * @netdev: Network interface device structure
2100 * Returns 2100 * Returns:
2101 * 0: Successfully 2101 * 0: Successfully
2102 */ 2102 */
2103static int pch_gbe_stop(struct net_device *netdev) 2103static int pch_gbe_stop(struct net_device *netdev)
@@ -2117,7 +2117,7 @@ static int pch_gbe_stop(struct net_device *netdev)
2117 * pch_gbe_xmit_frame - Packet transmitting start 2117 * pch_gbe_xmit_frame - Packet transmitting start
2118 * @skb: Socket buffer structure 2118 * @skb: Socket buffer structure
2119 * @netdev: Network interface device structure 2119 * @netdev: Network interface device structure
2120 * Returns 2120 * Returns:
2121 * - NETDEV_TX_OK: Normal end 2121 * - NETDEV_TX_OK: Normal end
2122 * - NETDEV_TX_BUSY: Error end 2122 * - NETDEV_TX_BUSY: Error end
2123 */ 2123 */
@@ -2225,7 +2225,7 @@ static void pch_gbe_set_multi(struct net_device *netdev)
2225 * pch_gbe_set_mac - Change the Ethernet Address of the NIC 2225 * pch_gbe_set_mac - Change the Ethernet Address of the NIC
2226 * @netdev: Network interface device structure 2226 * @netdev: Network interface device structure
2227 * @addr: Pointer to an address structure 2227 * @addr: Pointer to an address structure
2228 * Returns 2228 * Returns:
2229 * 0: Successfully 2229 * 0: Successfully
2230 * -EADDRNOTAVAIL: Failed 2230 * -EADDRNOTAVAIL: Failed
2231 */ 2231 */
@@ -2256,7 +2256,7 @@ static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
2256 * pch_gbe_change_mtu - Change the Maximum Transfer Unit 2256 * pch_gbe_change_mtu - Change the Maximum Transfer Unit
2257 * @netdev: Network interface device structure 2257 * @netdev: Network interface device structure
2258 * @new_mtu: New value for maximum frame size 2258 * @new_mtu: New value for maximum frame size
2259 * Returns 2259 * Returns:
2260 * 0: Successfully 2260 * 0: Successfully
2261 * -EINVAL: Failed 2261 * -EINVAL: Failed
2262 */ 2262 */
@@ -2309,7 +2309,7 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2309 * pch_gbe_set_features - Reset device after features changed 2309 * pch_gbe_set_features - Reset device after features changed
2310 * @netdev: Network interface device structure 2310 * @netdev: Network interface device structure
2311 * @features: New features 2311 * @features: New features
2312 * Returns 2312 * Returns:
2313 * 0: HW state updated successfully 2313 * 0: HW state updated successfully
2314 */ 2314 */
2315static int pch_gbe_set_features(struct net_device *netdev, 2315static int pch_gbe_set_features(struct net_device *netdev,
@@ -2334,7 +2334,7 @@ static int pch_gbe_set_features(struct net_device *netdev,
2334 * @netdev: Network interface device structure 2334 * @netdev: Network interface device structure
2335 * @ifr: Pointer to ifr structure 2335 * @ifr: Pointer to ifr structure
2336 * @cmd: Control command 2336 * @cmd: Control command
2337 * Returns 2337 * Returns:
2338 * 0: Successfully 2338 * 0: Successfully
2339 * Negative value: Failed 2339 * Negative value: Failed
2340 */ 2340 */
@@ -2369,7 +2369,7 @@ static void pch_gbe_tx_timeout(struct net_device *netdev)
2369 * pch_gbe_napi_poll - NAPI receive and transfer polling callback 2369 * pch_gbe_napi_poll - NAPI receive and transfer polling callback
2370 * @napi: Pointer of polling device struct 2370 * @napi: Pointer of polling device struct
2371 * @budget: The maximum number of a packet 2371 * @budget: The maximum number of a packet
2372 * Returns 2372 * Returns:
2373 * false: Exit the polling mode 2373 * false: Exit the polling mode
2374 * true: Continue the polling mode 2374 * true: Continue the polling mode
2375 */ 2375 */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index 29e23bec809..8653c3b81f8 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -139,7 +139,7 @@ MODULE_PARM_DESC(XsumTX, "Disable or enable Transmit Checksum offload");
139/** 139/**
140 * pch_gbe_option - Force the MAC's flow control settings 140 * pch_gbe_option - Force the MAC's flow control settings
141 * @hw: Pointer to the HW structure 141 * @hw: Pointer to the HW structure
142 * Returns 142 * Returns:
143 * 0: Successful. 143 * 0: Successful.
144 * Negative value: Failed. 144 * Negative value: Failed.
145 */ 145 */
@@ -220,7 +220,7 @@ static const struct pch_gbe_opt_list fc_list[] = {
220 * @value: value 220 * @value: value
221 * @opt: option 221 * @opt: option
222 * @adapter: Board private structure 222 * @adapter: Board private structure
223 * Returns 223 * Returns:
224 * 0: Successful. 224 * 0: Successful.
225 * Negative value: Failed. 225 * Negative value: Failed.
226 */ 226 */
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 37ccbe54e62..eb3dfdbb642 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
53 53
54#define _NETXEN_NIC_LINUX_MAJOR 4 54#define _NETXEN_NIC_LINUX_MAJOR 4
55#define _NETXEN_NIC_LINUX_MINOR 0 55#define _NETXEN_NIC_LINUX_MINOR 0
56#define _NETXEN_NIC_LINUX_SUBVERSION 79 56#define _NETXEN_NIC_LINUX_SUBVERSION 80
57#define NETXEN_NIC_LINUX_VERSIONID "4.0.79" 57#define NETXEN_NIC_LINUX_VERSIONID "4.0.80"
58 58
59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) 59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
60#define _major(v) (((v) >> 24) & 0xff) 60#define _major(v) (((v) >> 24) & 0xff)
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 39730403782..10468e7932d 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -489,7 +489,7 @@ netxen_nic_get_pauseparam(struct net_device *dev,
489 int port = adapter->physical_port; 489 int port = adapter->physical_port;
490 490
491 if (adapter->ahw.port_type == NETXEN_NIC_GBE) { 491 if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
492 if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) 492 if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
493 return; 493 return;
494 /* get flow control settings */ 494 /* get flow control settings */
495 val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port)); 495 val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
@@ -511,7 +511,7 @@ netxen_nic_get_pauseparam(struct net_device *dev,
511 break; 511 break;
512 } 512 }
513 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { 513 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
514 if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS)) 514 if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
515 return; 515 return;
516 pause->rx_pause = 1; 516 pause->rx_pause = 1;
517 val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL); 517 val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
@@ -534,7 +534,7 @@ netxen_nic_set_pauseparam(struct net_device *dev,
534 int port = adapter->physical_port; 534 int port = adapter->physical_port;
535 /* read mode */ 535 /* read mode */
536 if (adapter->ahw.port_type == NETXEN_NIC_GBE) { 536 if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
537 if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) 537 if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
538 return -EIO; 538 return -EIO;
539 /* set flow control */ 539 /* set flow control */
540 val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port)); 540 val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
@@ -577,7 +577,7 @@ netxen_nic_set_pauseparam(struct net_device *dev,
577 } 577 }
578 NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val); 578 NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val);
579 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { 579 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
580 if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS)) 580 if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
581 return -EIO; 581 return -EIO;
582 val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL); 582 val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
583 if (port == 0) { 583 if (port == 0) {
@@ -826,7 +826,12 @@ netxen_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
826 dump->len = mdump->md_dump_size; 826 dump->len = mdump->md_dump_size;
827 else 827 else
828 dump->len = 0; 828 dump->len = 0;
829 dump->flag = mdump->md_capture_mask; 829
830 if (!mdump->md_enabled)
831 dump->flag = ETH_FW_DUMP_DISABLE;
832 else
833 dump->flag = mdump->md_capture_mask;
834
830 dump->version = adapter->fw_version; 835 dump->version = adapter->fw_version;
831 return 0; 836 return 0;
832} 837}
@@ -840,8 +845,10 @@ netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val)
840 845
841 switch (val->flag) { 846 switch (val->flag) {
842 case NX_FORCE_FW_DUMP_KEY: 847 case NX_FORCE_FW_DUMP_KEY:
843 if (!mdump->md_enabled) 848 if (!mdump->md_enabled) {
844 mdump->md_enabled = 1; 849 netdev_info(netdev, "FW dump not enabled\n");
850 return 0;
851 }
845 if (adapter->fw_mdump_rdy) { 852 if (adapter->fw_mdump_rdy) {
846 netdev_info(netdev, "Previous dump not cleared, not forcing dump\n"); 853 netdev_info(netdev, "Previous dump not cleared, not forcing dump\n");
847 return 0; 854 return 0;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index de96a948bb7..946160fa584 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -365,7 +365,7 @@ static int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
365 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 365 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
366 return 0; 366 return 0;
367 367
368 if (port > NETXEN_NIU_MAX_XG_PORTS) 368 if (port >= NETXEN_NIU_MAX_XG_PORTS)
369 return -EINVAL; 369 return -EINVAL;
370 370
371 mac_cfg = 0; 371 mac_cfg = 0;
@@ -392,7 +392,7 @@ static int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
392 u32 port = adapter->physical_port; 392 u32 port = adapter->physical_port;
393 u16 board_type = adapter->ahw.board_type; 393 u16 board_type = adapter->ahw.board_type;
394 394
395 if (port > NETXEN_NIU_MAX_XG_PORTS) 395 if (port >= NETXEN_NIU_MAX_XG_PORTS)
396 return -EINVAL; 396 return -EINVAL;
397 397
398 mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port)); 398 mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port));
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 8694124ef77..b2c1b676477 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1532,8 +1532,6 @@ static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
1532 } else 1532 } else
1533 skb->ip_summed = CHECKSUM_NONE; 1533 skb->ip_summed = CHECKSUM_NONE;
1534 1534
1535 skb->dev = adapter->netdev;
1536
1537 buffer->skb = NULL; 1535 buffer->skb = NULL;
1538no_skb: 1536no_skb:
1539 buffer->state = NETXEN_BUFFER_FREE; 1537 buffer->state = NETXEN_BUFFER_FREE;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 8680a5dae4a..eaa1db9fec3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -36,8 +36,8 @@
36 36
37#define _QLCNIC_LINUX_MAJOR 5 37#define _QLCNIC_LINUX_MAJOR 5
38#define _QLCNIC_LINUX_MINOR 0 38#define _QLCNIC_LINUX_MINOR 0
39#define _QLCNIC_LINUX_SUBVERSION 28 39#define _QLCNIC_LINUX_SUBVERSION 29
40#define QLCNIC_LINUX_VERSIONID "5.0.28" 40#define QLCNIC_LINUX_VERSIONID "5.0.29"
41#define QLCNIC_DRV_IDC_VER 0x01 41#define QLCNIC_DRV_IDC_VER 0x01
42#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 42#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
43 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 43 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -258,6 +258,8 @@ struct rcv_desc {
258 (((sts_data) >> 52) & 0x1) 258 (((sts_data) >> 52) & 0x1)
259#define qlcnic_get_lro_sts_seq_number(sts_data) \ 259#define qlcnic_get_lro_sts_seq_number(sts_data) \
260 ((sts_data) & 0x0FFFFFFFF) 260 ((sts_data) & 0x0FFFFFFFF)
261#define qlcnic_get_lro_sts_mss(sts_data1) \
262 ((sts_data1 >> 32) & 0x0FFFF)
261 263
262 264
263struct status_desc { 265struct status_desc {
@@ -610,7 +612,11 @@ struct qlcnic_recv_context {
610#define QLCNIC_CDRP_CMD_GET_MAC_STATS 0x00000037 612#define QLCNIC_CDRP_CMD_GET_MAC_STATS 0x00000037
611 613
612#define QLCNIC_RCODE_SUCCESS 0 614#define QLCNIC_RCODE_SUCCESS 0
615#define QLCNIC_RCODE_INVALID_ARGS 6
613#define QLCNIC_RCODE_NOT_SUPPORTED 9 616#define QLCNIC_RCODE_NOT_SUPPORTED 9
617#define QLCNIC_RCODE_NOT_PERMITTED 10
618#define QLCNIC_RCODE_NOT_IMPL 15
619#define QLCNIC_RCODE_INVALID 16
614#define QLCNIC_RCODE_TIMEOUT 17 620#define QLCNIC_RCODE_TIMEOUT 17
615#define QLCNIC_DESTROY_CTX_RESET 0 621#define QLCNIC_DESTROY_CTX_RESET 0
616 622
@@ -623,6 +629,7 @@ struct qlcnic_recv_context {
623#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7) 629#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
624#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8) 630#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
625#define QLCNIC_CAP0_VALIDOFF (1 << 11) 631#define QLCNIC_CAP0_VALIDOFF (1 << 11)
632#define QLCNIC_CAP0_LRO_MSS (1 << 21)
626 633
627/* 634/*
628 * Context state 635 * Context state
@@ -829,6 +836,9 @@ struct qlcnic_mac_list_s {
829#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9 836#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9
830#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10 837#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10
831#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27 838#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27
839#define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31
840
841#define QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG BIT_2
832 842
833/* module types */ 843/* module types */
834#define LINKEVENT_MODULE_NOT_PRESENT 1 844#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -918,6 +928,7 @@ struct qlcnic_ipaddr {
918#define QLCNIC_NEED_FLR 0x1000 928#define QLCNIC_NEED_FLR 0x1000
919#define QLCNIC_FW_RESET_OWNER 0x2000 929#define QLCNIC_FW_RESET_OWNER 0x2000
920#define QLCNIC_FW_HANG 0x4000 930#define QLCNIC_FW_HANG 0x4000
931#define QLCNIC_FW_LRO_MSS_CAP 0x8000
921#define QLCNIC_IS_MSI_FAMILY(adapter) \ 932#define QLCNIC_IS_MSI_FAMILY(adapter) \
922 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) 933 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
923 934
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 8db85244e8a..b8ead696141 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -53,12 +53,39 @@ qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd)
53 rsp = qlcnic_poll_rsp(adapter); 53 rsp = qlcnic_poll_rsp(adapter);
54 54
55 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) { 55 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
56 dev_err(&pdev->dev, "card response timeout.\n"); 56 dev_err(&pdev->dev, "CDRP response timeout.\n");
57 cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT; 57 cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT;
58 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) { 58 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
59 cmd->rsp.cmd = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); 59 cmd->rsp.cmd = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
60 dev_err(&pdev->dev, "failed card response code:0x%x\n", 60 switch (cmd->rsp.cmd) {
61 case QLCNIC_RCODE_INVALID_ARGS:
62 dev_err(&pdev->dev, "CDRP invalid args: 0x%x.\n",
61 cmd->rsp.cmd); 63 cmd->rsp.cmd);
64 break;
65 case QLCNIC_RCODE_NOT_SUPPORTED:
66 case QLCNIC_RCODE_NOT_IMPL:
67 dev_err(&pdev->dev,
68 "CDRP command not supported: 0x%x.\n",
69 cmd->rsp.cmd);
70 break;
71 case QLCNIC_RCODE_NOT_PERMITTED:
72 dev_err(&pdev->dev,
73 "CDRP requested action not permitted: 0x%x.\n",
74 cmd->rsp.cmd);
75 break;
76 case QLCNIC_RCODE_INVALID:
77 dev_err(&pdev->dev,
78 "CDRP invalid or unknown cmd received: 0x%x.\n",
79 cmd->rsp.cmd);
80 break;
81 case QLCNIC_RCODE_TIMEOUT:
82 dev_err(&pdev->dev, "CDRP command timeout: 0x%x.\n",
83 cmd->rsp.cmd);
84 break;
85 default:
86 dev_err(&pdev->dev, "CDRP command failed: 0x%x.\n",
87 cmd->rsp.cmd);
88 }
62 } else if (rsp == QLCNIC_CDRP_RSP_OK) { 89 } else if (rsp == QLCNIC_CDRP_RSP_OK) {
63 cmd->rsp.cmd = QLCNIC_RCODE_SUCCESS; 90 cmd->rsp.cmd = QLCNIC_RCODE_SUCCESS;
64 if (cmd->rsp.arg2) 91 if (cmd->rsp.arg2)
@@ -237,6 +264,9 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
237 | QLCNIC_CAP0_VALIDOFF); 264 | QLCNIC_CAP0_VALIDOFF);
238 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS); 265 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
239 266
267 if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
268 cap |= QLCNIC_CAP0_LRO_MSS;
269
240 prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx, 270 prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx,
241 msix_handler); 271 msix_handler);
242 prq->txrx_sds_binding = nsds_rings - 1; 272 prq->txrx_sds_binding = nsds_rings - 1;
@@ -954,9 +984,6 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
954 mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber); 984 mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber);
955 mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped); 985 mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped);
956 mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error); 986 mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error);
957 } else {
958 dev_info(&adapter->pdev->dev,
959 "%s: Get mac stats failed =%d.\n", __func__, err);
960 } 987 }
961 988
962 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, 989 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index 6ced3195aad..28a6b28192e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -588,6 +588,7 @@ enum {
588#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0)) 588#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
589 589
590#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128)) 590#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
591#define CRB_FW_CAPABILITIES_2 (QLCNIC_CAM_RAM(0x12c))
591#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0)) 592#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
592 593
593/* 594/*
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index 799fd40ed03..0bcda9c51e9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -1488,8 +1488,6 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1488 skb_checksum_none_assert(skb); 1488 skb_checksum_none_assert(skb);
1489 } 1489 }
1490 1490
1491 skb->dev = adapter->netdev;
1492
1493 buffer->skb = NULL; 1491 buffer->skb = NULL;
1494 1492
1495 return skb; 1493 return skb;
@@ -1653,6 +1651,9 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1653 1651
1654 length = skb->len; 1652 length = skb->len;
1655 1653
1654 if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
1655 skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
1656
1656 if (vid != 0xffff) 1657 if (vid != 0xffff)
1657 __vlan_hwaccel_put_tag(skb, vid); 1658 __vlan_hwaccel_put_tag(skb, vid);
1658 netif_receive_skb(skb); 1659 netif_receive_skb(skb);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index ad98f4d7919..212c1219327 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1136,6 +1136,8 @@ static int
1136__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) 1136__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1137{ 1137{
1138 int ring; 1138 int ring;
1139 u32 capab2;
1140
1139 struct qlcnic_host_rds_ring *rds_ring; 1141 struct qlcnic_host_rds_ring *rds_ring;
1140 1142
1141 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 1143 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
@@ -1146,6 +1148,12 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1146 if (qlcnic_set_eswitch_port_config(adapter)) 1148 if (qlcnic_set_eswitch_port_config(adapter))
1147 return -EIO; 1149 return -EIO;
1148 1150
1151 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
1152 capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
1153 if (capab2 & QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
1154 adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
1155 }
1156
1149 if (qlcnic_fw_create_ctx(adapter)) 1157 if (qlcnic_fw_create_ctx(adapter))
1150 return -EIO; 1158 return -EIO;
1151 1159
@@ -1215,6 +1223,7 @@ __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1215 qlcnic_napi_disable(adapter); 1223 qlcnic_napi_disable(adapter);
1216 1224
1217 qlcnic_fw_destroy_ctx(adapter); 1225 qlcnic_fw_destroy_ctx(adapter);
1226 adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP;
1218 1227
1219 qlcnic_reset_rx_buffers_list(adapter); 1228 qlcnic_reset_rx_buffers_list(adapter);
1220 qlcnic_release_tx_buffers(adapter); 1229 qlcnic_release_tx_buffers(adapter);
@@ -2024,6 +2033,7 @@ qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
2024 vh = (struct vlan_ethhdr *)skb->data; 2033 vh = (struct vlan_ethhdr *)skb->data;
2025 flags = FLAGS_VLAN_TAGGED; 2034 flags = FLAGS_VLAN_TAGGED;
2026 vlan_tci = vh->h_vlan_TCI; 2035 vlan_tci = vh->h_vlan_TCI;
2036 protocol = ntohs(vh->h_vlan_encapsulated_proto);
2027 } else if (vlan_tx_tag_present(skb)) { 2037 } else if (vlan_tx_tag_present(skb)) {
2028 flags = FLAGS_VLAN_OOB; 2038 flags = FLAGS_VLAN_OOB;
2029 vlan_tci = vlan_tx_tag_get(skb); 2039 vlan_tci = vlan_tx_tag_get(skb);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 5a639df33f1..a131d7b5d2f 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,13 +18,15 @@
18 */ 18 */
19#define DRV_NAME "qlge" 19#define DRV_NAME "qlge"
20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
21#define DRV_VERSION "v1.00.00.30.00.00-01" 21#define DRV_VERSION "v1.00.00.31"
22 22
23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ 23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
24 24
25#define QLGE_VENDOR_ID 0x1077 25#define QLGE_VENDOR_ID 0x1077
26#define QLGE_DEVICE_ID_8012 0x8012 26#define QLGE_DEVICE_ID_8012 0x8012
27#define QLGE_DEVICE_ID_8000 0x8000 27#define QLGE_DEVICE_ID_8000 0x8000
28#define QLGE_MEZZ_SSYS_ID_068 0x0068
29#define QLGE_MEZZ_SSYS_ID_180 0x0180
28#define MAX_CPUS 8 30#define MAX_CPUS 8
29#define MAX_TX_RINGS MAX_CPUS 31#define MAX_TX_RINGS MAX_CPUS
30#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1) 32#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1)
@@ -1397,7 +1399,6 @@ struct tx_ring {
1397 struct tx_ring_desc *q; /* descriptor list for the queue */ 1399 struct tx_ring_desc *q; /* descriptor list for the queue */
1398 spinlock_t lock; 1400 spinlock_t lock;
1399 atomic_t tx_count; /* counts down for every outstanding IO */ 1401 atomic_t tx_count; /* counts down for every outstanding IO */
1400 atomic_t queue_stopped; /* Turns queue off when full. */
1401 struct delayed_work tx_work; 1402 struct delayed_work tx_work;
1402 struct ql_adapter *qdev; 1403 struct ql_adapter *qdev;
1403 u64 tx_packets; 1404 u64 tx_packets;
@@ -1535,6 +1536,14 @@ struct nic_stats {
1535 u64 rx_1024_to_1518_pkts; 1536 u64 rx_1024_to_1518_pkts;
1536 u64 rx_1519_to_max_pkts; 1537 u64 rx_1519_to_max_pkts;
1537 u64 rx_len_err_pkts; 1538 u64 rx_len_err_pkts;
1539 /* Receive Mac Err stats */
1540 u64 rx_code_err;
1541 u64 rx_oversize_err;
1542 u64 rx_undersize_err;
1543 u64 rx_preamble_err;
1544 u64 rx_frame_len_err;
1545 u64 rx_crc_err;
1546 u64 rx_err_count;
1538 /* 1547 /*
1539 * These stats come from offset 500h to 5C8h 1548 * These stats come from offset 500h to 5C8h
1540 * in the XGMAC register. 1549 * in the XGMAC register.
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 8e2c2a74f3a..3d4462bd9de 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -35,10 +35,152 @@
35 35
36#include "qlge.h" 36#include "qlge.h"
37 37
38struct ql_stats {
39 char stat_string[ETH_GSTRING_LEN];
40 int sizeof_stat;
41 int stat_offset;
42};
43
44#define QL_SIZEOF(m) FIELD_SIZEOF(struct ql_adapter, m)
45#define QL_OFF(m) offsetof(struct ql_adapter, m)
46
47static const struct ql_stats ql_gstrings_stats[] = {
48 {"tx_pkts", QL_SIZEOF(nic_stats.tx_pkts), QL_OFF(nic_stats.tx_pkts)},
49 {"tx_bytes", QL_SIZEOF(nic_stats.tx_bytes), QL_OFF(nic_stats.tx_bytes)},
50 {"tx_mcast_pkts", QL_SIZEOF(nic_stats.tx_mcast_pkts),
51 QL_OFF(nic_stats.tx_mcast_pkts)},
52 {"tx_bcast_pkts", QL_SIZEOF(nic_stats.tx_bcast_pkts),
53 QL_OFF(nic_stats.tx_bcast_pkts)},
54 {"tx_ucast_pkts", QL_SIZEOF(nic_stats.tx_ucast_pkts),
55 QL_OFF(nic_stats.tx_ucast_pkts)},
56 {"tx_ctl_pkts", QL_SIZEOF(nic_stats.tx_ctl_pkts),
57 QL_OFF(nic_stats.tx_ctl_pkts)},
58 {"tx_pause_pkts", QL_SIZEOF(nic_stats.tx_pause_pkts),
59 QL_OFF(nic_stats.tx_pause_pkts)},
60 {"tx_64_pkts", QL_SIZEOF(nic_stats.tx_64_pkt),
61 QL_OFF(nic_stats.tx_64_pkt)},
62 {"tx_65_to_127_pkts", QL_SIZEOF(nic_stats.tx_65_to_127_pkt),
63 QL_OFF(nic_stats.tx_65_to_127_pkt)},
64 {"tx_128_to_255_pkts", QL_SIZEOF(nic_stats.tx_128_to_255_pkt),
65 QL_OFF(nic_stats.tx_128_to_255_pkt)},
66 {"tx_256_511_pkts", QL_SIZEOF(nic_stats.tx_256_511_pkt),
67 QL_OFF(nic_stats.tx_256_511_pkt)},
68 {"tx_512_to_1023_pkts", QL_SIZEOF(nic_stats.tx_512_to_1023_pkt),
69 QL_OFF(nic_stats.tx_512_to_1023_pkt)},
70 {"tx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.tx_1024_to_1518_pkt),
71 QL_OFF(nic_stats.tx_1024_to_1518_pkt)},
72 {"tx_1519_to_max_pkts", QL_SIZEOF(nic_stats.tx_1519_to_max_pkt),
73 QL_OFF(nic_stats.tx_1519_to_max_pkt)},
74 {"tx_undersize_pkts", QL_SIZEOF(nic_stats.tx_undersize_pkt),
75 QL_OFF(nic_stats.tx_undersize_pkt)},
76 {"tx_oversize_pkts", QL_SIZEOF(nic_stats.tx_oversize_pkt),
77 QL_OFF(nic_stats.tx_oversize_pkt)},
78 {"rx_bytes", QL_SIZEOF(nic_stats.rx_bytes), QL_OFF(nic_stats.rx_bytes)},
79 {"rx_bytes_ok", QL_SIZEOF(nic_stats.rx_bytes_ok),
80 QL_OFF(nic_stats.rx_bytes_ok)},
81 {"rx_pkts", QL_SIZEOF(nic_stats.rx_pkts), QL_OFF(nic_stats.rx_pkts)},
82 {"rx_pkts_ok", QL_SIZEOF(nic_stats.rx_pkts_ok),
83 QL_OFF(nic_stats.rx_pkts_ok)},
84 {"rx_bcast_pkts", QL_SIZEOF(nic_stats.rx_bcast_pkts),
85 QL_OFF(nic_stats.rx_bcast_pkts)},
86 {"rx_mcast_pkts", QL_SIZEOF(nic_stats.rx_mcast_pkts),
87 QL_OFF(nic_stats.rx_mcast_pkts)},
88 {"rx_ucast_pkts", QL_SIZEOF(nic_stats.rx_ucast_pkts),
89 QL_OFF(nic_stats.rx_ucast_pkts)},
90 {"rx_undersize_pkts", QL_SIZEOF(nic_stats.rx_undersize_pkts),
91 QL_OFF(nic_stats.rx_undersize_pkts)},
92 {"rx_oversize_pkts", QL_SIZEOF(nic_stats.rx_oversize_pkts),
93 QL_OFF(nic_stats.rx_oversize_pkts)},
94 {"rx_jabber_pkts", QL_SIZEOF(nic_stats.rx_jabber_pkts),
95 QL_OFF(nic_stats.rx_jabber_pkts)},
96 {"rx_undersize_fcerr_pkts",
97 QL_SIZEOF(nic_stats.rx_undersize_fcerr_pkts),
98 QL_OFF(nic_stats.rx_undersize_fcerr_pkts)},
99 {"rx_drop_events", QL_SIZEOF(nic_stats.rx_drop_events),
100 QL_OFF(nic_stats.rx_drop_events)},
101 {"rx_fcerr_pkts", QL_SIZEOF(nic_stats.rx_fcerr_pkts),
102 QL_OFF(nic_stats.rx_fcerr_pkts)},
103 {"rx_align_err", QL_SIZEOF(nic_stats.rx_align_err),
104 QL_OFF(nic_stats.rx_align_err)},
105 {"rx_symbol_err", QL_SIZEOF(nic_stats.rx_symbol_err),
106 QL_OFF(nic_stats.rx_symbol_err)},
107 {"rx_mac_err", QL_SIZEOF(nic_stats.rx_mac_err),
108 QL_OFF(nic_stats.rx_mac_err)},
109 {"rx_ctl_pkts", QL_SIZEOF(nic_stats.rx_ctl_pkts),
110 QL_OFF(nic_stats.rx_ctl_pkts)},
111 {"rx_pause_pkts", QL_SIZEOF(nic_stats.rx_pause_pkts),
112 QL_OFF(nic_stats.rx_pause_pkts)},
113 {"rx_64_pkts", QL_SIZEOF(nic_stats.rx_64_pkts),
114 QL_OFF(nic_stats.rx_64_pkts)},
115 {"rx_65_to_127_pkts", QL_SIZEOF(nic_stats.rx_65_to_127_pkts),
116 QL_OFF(nic_stats.rx_65_to_127_pkts)},
117 {"rx_128_255_pkts", QL_SIZEOF(nic_stats.rx_128_255_pkts),
118 QL_OFF(nic_stats.rx_128_255_pkts)},
119 {"rx_256_511_pkts", QL_SIZEOF(nic_stats.rx_256_511_pkts),
120 QL_OFF(nic_stats.rx_256_511_pkts)},
121 {"rx_512_to_1023_pkts", QL_SIZEOF(nic_stats.rx_512_to_1023_pkts),
122 QL_OFF(nic_stats.rx_512_to_1023_pkts)},
123 {"rx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.rx_1024_to_1518_pkts),
124 QL_OFF(nic_stats.rx_1024_to_1518_pkts)},
125 {"rx_1519_to_max_pkts", QL_SIZEOF(nic_stats.rx_1519_to_max_pkts),
126 QL_OFF(nic_stats.rx_1519_to_max_pkts)},
127 {"rx_len_err_pkts", QL_SIZEOF(nic_stats.rx_len_err_pkts),
128 QL_OFF(nic_stats.rx_len_err_pkts)},
129 {"rx_code_err", QL_SIZEOF(nic_stats.rx_code_err),
130 QL_OFF(nic_stats.rx_code_err)},
131 {"rx_oversize_err", QL_SIZEOF(nic_stats.rx_oversize_err),
132 QL_OFF(nic_stats.rx_oversize_err)},
133 {"rx_undersize_err", QL_SIZEOF(nic_stats.rx_undersize_err),
134 QL_OFF(nic_stats.rx_undersize_err)},
135 {"rx_preamble_err", QL_SIZEOF(nic_stats.rx_preamble_err),
136 QL_OFF(nic_stats.rx_preamble_err)},
137 {"rx_frame_len_err", QL_SIZEOF(nic_stats.rx_frame_len_err),
138 QL_OFF(nic_stats.rx_frame_len_err)},
139 {"rx_crc_err", QL_SIZEOF(nic_stats.rx_crc_err),
140 QL_OFF(nic_stats.rx_crc_err)},
141 {"rx_err_count", QL_SIZEOF(nic_stats.rx_err_count),
142 QL_OFF(nic_stats.rx_err_count)},
143 {"tx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames0),
144 QL_OFF(nic_stats.tx_cbfc_pause_frames0)},
145 {"tx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames1),
146 QL_OFF(nic_stats.tx_cbfc_pause_frames1)},
147 {"tx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames2),
148 QL_OFF(nic_stats.tx_cbfc_pause_frames2)},
149 {"tx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames3),
150 QL_OFF(nic_stats.tx_cbfc_pause_frames3)},
151 {"tx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames4),
152 QL_OFF(nic_stats.tx_cbfc_pause_frames4)},
153 {"tx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames5),
154 QL_OFF(nic_stats.tx_cbfc_pause_frames5)},
155 {"tx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames6),
156 QL_OFF(nic_stats.tx_cbfc_pause_frames6)},
157 {"tx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames7),
158 QL_OFF(nic_stats.tx_cbfc_pause_frames7)},
159 {"rx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames0),
160 QL_OFF(nic_stats.rx_cbfc_pause_frames0)},
161 {"rx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames1),
162 QL_OFF(nic_stats.rx_cbfc_pause_frames1)},
163 {"rx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames2),
164 QL_OFF(nic_stats.rx_cbfc_pause_frames2)},
165 {"rx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames3),
166 QL_OFF(nic_stats.rx_cbfc_pause_frames3)},
167 {"rx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames4),
168 QL_OFF(nic_stats.rx_cbfc_pause_frames4)},
169 {"rx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames5),
170 QL_OFF(nic_stats.rx_cbfc_pause_frames5)},
171 {"rx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames6),
172 QL_OFF(nic_stats.rx_cbfc_pause_frames6)},
173 {"rx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames7),
174 QL_OFF(nic_stats.rx_cbfc_pause_frames7)},
175 {"rx_nic_fifo_drop", QL_SIZEOF(nic_stats.rx_nic_fifo_drop),
176 QL_OFF(nic_stats.rx_nic_fifo_drop)},
177};
178
38static const char ql_gstrings_test[][ETH_GSTRING_LEN] = { 179static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
39 "Loopback test (offline)" 180 "Loopback test (offline)"
40}; 181};
41#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN) 182#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
183#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats)
42 184
43static int ql_update_ring_coalescing(struct ql_adapter *qdev) 185static int ql_update_ring_coalescing(struct ql_adapter *qdev)
44{ 186{
@@ -183,73 +325,19 @@ quit:
183 QL_DUMP_STAT(qdev); 325 QL_DUMP_STAT(qdev);
184} 326}
185 327
186static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
187 {"tx_pkts"},
188 {"tx_bytes"},
189 {"tx_mcast_pkts"},
190 {"tx_bcast_pkts"},
191 {"tx_ucast_pkts"},
192 {"tx_ctl_pkts"},
193 {"tx_pause_pkts"},
194 {"tx_64_pkts"},
195 {"tx_65_to_127_pkts"},
196 {"tx_128_to_255_pkts"},
197 {"tx_256_511_pkts"},
198 {"tx_512_to_1023_pkts"},
199 {"tx_1024_to_1518_pkts"},
200 {"tx_1519_to_max_pkts"},
201 {"tx_undersize_pkts"},
202 {"tx_oversize_pkts"},
203 {"rx_bytes"},
204 {"rx_bytes_ok"},
205 {"rx_pkts"},
206 {"rx_pkts_ok"},
207 {"rx_bcast_pkts"},
208 {"rx_mcast_pkts"},
209 {"rx_ucast_pkts"},
210 {"rx_undersize_pkts"},
211 {"rx_oversize_pkts"},
212 {"rx_jabber_pkts"},
213 {"rx_undersize_fcerr_pkts"},
214 {"rx_drop_events"},
215 {"rx_fcerr_pkts"},
216 {"rx_align_err"},
217 {"rx_symbol_err"},
218 {"rx_mac_err"},
219 {"rx_ctl_pkts"},
220 {"rx_pause_pkts"},
221 {"rx_64_pkts"},
222 {"rx_65_to_127_pkts"},
223 {"rx_128_255_pkts"},
224 {"rx_256_511_pkts"},
225 {"rx_512_to_1023_pkts"},
226 {"rx_1024_to_1518_pkts"},
227 {"rx_1519_to_max_pkts"},
228 {"rx_len_err_pkts"},
229 {"tx_cbfc_pause_frames0"},
230 {"tx_cbfc_pause_frames1"},
231 {"tx_cbfc_pause_frames2"},
232 {"tx_cbfc_pause_frames3"},
233 {"tx_cbfc_pause_frames4"},
234 {"tx_cbfc_pause_frames5"},
235 {"tx_cbfc_pause_frames6"},
236 {"tx_cbfc_pause_frames7"},
237 {"rx_cbfc_pause_frames0"},
238 {"rx_cbfc_pause_frames1"},
239 {"rx_cbfc_pause_frames2"},
240 {"rx_cbfc_pause_frames3"},
241 {"rx_cbfc_pause_frames4"},
242 {"rx_cbfc_pause_frames5"},
243 {"rx_cbfc_pause_frames6"},
244 {"rx_cbfc_pause_frames7"},
245 {"rx_nic_fifo_drop"},
246};
247
248static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 328static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
249{ 329{
330 int index;
250 switch (stringset) { 331 switch (stringset) {
332 case ETH_SS_TEST:
333 memcpy(buf, *ql_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN);
334 break;
251 case ETH_SS_STATS: 335 case ETH_SS_STATS:
252 memcpy(buf, ql_stats_str_arr, sizeof(ql_stats_str_arr)); 336 for (index = 0; index < QLGE_STATS_LEN; index++) {
337 memcpy(buf + index * ETH_GSTRING_LEN,
338 ql_gstrings_stats[index].stat_string,
339 ETH_GSTRING_LEN);
340 }
253 break; 341 break;
254 } 342 }
255} 343}
@@ -260,7 +348,7 @@ static int ql_get_sset_count(struct net_device *dev, int sset)
260 case ETH_SS_TEST: 348 case ETH_SS_TEST:
261 return QLGE_TEST_LEN; 349 return QLGE_TEST_LEN;
262 case ETH_SS_STATS: 350 case ETH_SS_STATS:
263 return ARRAY_SIZE(ql_stats_str_arr); 351 return QLGE_STATS_LEN;
264 default: 352 default:
265 return -EOPNOTSUPP; 353 return -EOPNOTSUPP;
266 } 354 }
@@ -271,69 +359,17 @@ ql_get_ethtool_stats(struct net_device *ndev,
271 struct ethtool_stats *stats, u64 *data) 359 struct ethtool_stats *stats, u64 *data)
272{ 360{
273 struct ql_adapter *qdev = netdev_priv(ndev); 361 struct ql_adapter *qdev = netdev_priv(ndev);
274 struct nic_stats *s = &qdev->nic_stats; 362 int index, length;
275 363
364 length = QLGE_STATS_LEN;
276 ql_update_stats(qdev); 365 ql_update_stats(qdev);
277 366
278 *data++ = s->tx_pkts; 367 for (index = 0; index < length; index++) {
279 *data++ = s->tx_bytes; 368 char *p = (char *)qdev +
280 *data++ = s->tx_mcast_pkts; 369 ql_gstrings_stats[index].stat_offset;
281 *data++ = s->tx_bcast_pkts; 370 *data++ = (ql_gstrings_stats[index].sizeof_stat ==
282 *data++ = s->tx_ucast_pkts; 371 sizeof(u64)) ? *(u64 *)p : (*(u32 *)p);
283 *data++ = s->tx_ctl_pkts; 372 }
284 *data++ = s->tx_pause_pkts;
285 *data++ = s->tx_64_pkt;
286 *data++ = s->tx_65_to_127_pkt;
287 *data++ = s->tx_128_to_255_pkt;
288 *data++ = s->tx_256_511_pkt;
289 *data++ = s->tx_512_to_1023_pkt;
290 *data++ = s->tx_1024_to_1518_pkt;
291 *data++ = s->tx_1519_to_max_pkt;
292 *data++ = s->tx_undersize_pkt;
293 *data++ = s->tx_oversize_pkt;
294 *data++ = s->rx_bytes;
295 *data++ = s->rx_bytes_ok;
296 *data++ = s->rx_pkts;
297 *data++ = s->rx_pkts_ok;
298 *data++ = s->rx_bcast_pkts;
299 *data++ = s->rx_mcast_pkts;
300 *data++ = s->rx_ucast_pkts;
301 *data++ = s->rx_undersize_pkts;
302 *data++ = s->rx_oversize_pkts;
303 *data++ = s->rx_jabber_pkts;
304 *data++ = s->rx_undersize_fcerr_pkts;
305 *data++ = s->rx_drop_events;
306 *data++ = s->rx_fcerr_pkts;
307 *data++ = s->rx_align_err;
308 *data++ = s->rx_symbol_err;
309 *data++ = s->rx_mac_err;
310 *data++ = s->rx_ctl_pkts;
311 *data++ = s->rx_pause_pkts;
312 *data++ = s->rx_64_pkts;
313 *data++ = s->rx_65_to_127_pkts;
314 *data++ = s->rx_128_255_pkts;
315 *data++ = s->rx_256_511_pkts;
316 *data++ = s->rx_512_to_1023_pkts;
317 *data++ = s->rx_1024_to_1518_pkts;
318 *data++ = s->rx_1519_to_max_pkts;
319 *data++ = s->rx_len_err_pkts;
320 *data++ = s->tx_cbfc_pause_frames0;
321 *data++ = s->tx_cbfc_pause_frames1;
322 *data++ = s->tx_cbfc_pause_frames2;
323 *data++ = s->tx_cbfc_pause_frames3;
324 *data++ = s->tx_cbfc_pause_frames4;
325 *data++ = s->tx_cbfc_pause_frames5;
326 *data++ = s->tx_cbfc_pause_frames6;
327 *data++ = s->tx_cbfc_pause_frames7;
328 *data++ = s->rx_cbfc_pause_frames0;
329 *data++ = s->rx_cbfc_pause_frames1;
330 *data++ = s->rx_cbfc_pause_frames2;
331 *data++ = s->rx_cbfc_pause_frames3;
332 *data++ = s->rx_cbfc_pause_frames4;
333 *data++ = s->rx_cbfc_pause_frames5;
334 *data++ = s->rx_cbfc_pause_frames6;
335 *data++ = s->rx_cbfc_pause_frames7;
336 *data++ = s->rx_nic_fifo_drop;
337} 373}
338 374
339static int ql_get_settings(struct net_device *ndev, 375static int ql_get_settings(struct net_device *ndev,
@@ -388,30 +424,33 @@ static void ql_get_drvinfo(struct net_device *ndev,
388static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 424static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
389{ 425{
390 struct ql_adapter *qdev = netdev_priv(ndev); 426 struct ql_adapter *qdev = netdev_priv(ndev);
391 /* What we support. */ 427 unsigned short ssys_dev = qdev->pdev->subsystem_device;
392 wol->supported = WAKE_MAGIC; 428
393 /* What we've currently got set. */ 429 /* WOL is only supported for mezz card. */
394 wol->wolopts = qdev->wol; 430 if (ssys_dev == QLGE_MEZZ_SSYS_ID_068 ||
431 ssys_dev == QLGE_MEZZ_SSYS_ID_180) {
432 wol->supported = WAKE_MAGIC;
433 wol->wolopts = qdev->wol;
434 }
395} 435}
396 436
397static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 437static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
398{ 438{
399 struct ql_adapter *qdev = netdev_priv(ndev); 439 struct ql_adapter *qdev = netdev_priv(ndev);
400 int status; 440 unsigned short ssys_dev = qdev->pdev->subsystem_device;
401 441
442 /* WOL is only supported for mezz card. */
443 if (ssys_dev != QLGE_MEZZ_SSYS_ID_068 ||
444 ssys_dev != QLGE_MEZZ_SSYS_ID_180) {
445 netif_info(qdev, drv, qdev->ndev,
446 "WOL is only supported for mezz card\n");
447 return -EOPNOTSUPP;
448 }
402 if (wol->wolopts & ~WAKE_MAGIC) 449 if (wol->wolopts & ~WAKE_MAGIC)
403 return -EINVAL; 450 return -EINVAL;
404 qdev->wol = wol->wolopts; 451 qdev->wol = wol->wolopts;
405 452
406 netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol); 453 netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
407 if (!qdev->wol) {
408 u32 wol = 0;
409 status = ql_mb_wol_mode(qdev, wol);
410 netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n",
411 status == 0 ? "cleared successfully" : "clear failed",
412 wol);
413 }
414
415 return 0; 454 return 0;
416} 455}
417 456
@@ -528,6 +567,8 @@ static void ql_self_test(struct net_device *ndev,
528{ 567{
529 struct ql_adapter *qdev = netdev_priv(ndev); 568 struct ql_adapter *qdev = netdev_priv(ndev);
530 569
570 memset(data, 0, sizeof(u64) * QLGE_TEST_LEN);
571
531 if (netif_running(ndev)) { 572 if (netif_running(ndev)) {
532 set_bit(QL_SELFTEST, &qdev->flags); 573 set_bit(QL_SELFTEST, &qdev->flags);
533 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 574 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 09d8d33171d..31cae42498a 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1433,6 +1433,36 @@ map_error:
1433 return NETDEV_TX_BUSY; 1433 return NETDEV_TX_BUSY;
1434} 1434}
1435 1435
1436/* Categorizing receive firmware frame errors */
1437static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
1438{
1439 struct nic_stats *stats = &qdev->nic_stats;
1440
1441 stats->rx_err_count++;
1442
1443 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1444 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1445 stats->rx_code_err++;
1446 break;
1447 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1448 stats->rx_oversize_err++;
1449 break;
1450 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1451 stats->rx_undersize_err++;
1452 break;
1453 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1454 stats->rx_preamble_err++;
1455 break;
1456 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1457 stats->rx_frame_len_err++;
1458 break;
1459 case IB_MAC_IOCB_RSP_ERR_CRC:
1460 stats->rx_crc_err++;
1461 default:
1462 break;
1463 }
1464}
1465
1436/* Process an inbound completion from an rx ring. */ 1466/* Process an inbound completion from an rx ring. */
1437static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, 1467static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1438 struct rx_ring *rx_ring, 1468 struct rx_ring *rx_ring,
@@ -1499,15 +1529,6 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1499 addr = lbq_desc->p.pg_chunk.va; 1529 addr = lbq_desc->p.pg_chunk.va;
1500 prefetch(addr); 1530 prefetch(addr);
1501 1531
1502
1503 /* Frame error, so drop the packet. */
1504 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1505 netif_info(qdev, drv, qdev->ndev,
1506 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1507 rx_ring->rx_errors++;
1508 goto err_out;
1509 }
1510
1511 /* The max framesize filter on this chip is set higher than 1532 /* The max framesize filter on this chip is set higher than
1512 * MTU since FCoE uses 2k frames. 1533 * MTU since FCoE uses 2k frames.
1513 */ 1534 */
@@ -1593,15 +1614,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1593 memcpy(skb_put(new_skb, length), skb->data, length); 1614 memcpy(skb_put(new_skb, length), skb->data, length);
1594 skb = new_skb; 1615 skb = new_skb;
1595 1616
1596 /* Frame error, so drop the packet. */
1597 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1598 netif_info(qdev, drv, qdev->ndev,
1599 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1600 dev_kfree_skb_any(skb);
1601 rx_ring->rx_errors++;
1602 return;
1603 }
1604
1605 /* loopback self test for ethtool */ 1617 /* loopback self test for ethtool */
1606 if (test_bit(QL_SELFTEST, &qdev->flags)) { 1618 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1607 ql_check_lb_frame(qdev, skb); 1619 ql_check_lb_frame(qdev, skb);
@@ -1619,7 +1631,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1619 } 1631 }
1620 1632
1621 prefetch(skb->data); 1633 prefetch(skb->data);
1622 skb->dev = ndev;
1623 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { 1634 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1624 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, 1635 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1625 "%s Multicast.\n", 1636 "%s Multicast.\n",
@@ -1908,15 +1919,6 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1908 return; 1919 return;
1909 } 1920 }
1910 1921
1911 /* Frame error, so drop the packet. */
1912 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1913 netif_info(qdev, drv, qdev->ndev,
1914 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1915 dev_kfree_skb_any(skb);
1916 rx_ring->rx_errors++;
1917 return;
1918 }
1919
1920 /* The max framesize filter on this chip is set higher than 1922 /* The max framesize filter on this chip is set higher than
1921 * MTU since FCoE uses 2k frames. 1923 * MTU since FCoE uses 2k frames.
1922 */ 1924 */
@@ -1934,7 +1936,6 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1934 } 1936 }
1935 1937
1936 prefetch(skb->data); 1938 prefetch(skb->data);
1937 skb->dev = ndev;
1938 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { 1939 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1939 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n", 1940 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1940 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == 1941 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
@@ -1999,6 +2000,12 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1999 2000
2000 QL_DUMP_IB_MAC_RSP(ib_mac_rsp); 2001 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2001 2002
2003 /* Frame error, so drop the packet. */
2004 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
2005 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
2006 return (unsigned long)length;
2007 }
2008
2002 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { 2009 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2003 /* The data and headers are split into 2010 /* The data and headers are split into
2004 * separate buffers. 2011 * separate buffers.
@@ -2173,8 +2180,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2173 ql_write_cq_idx(rx_ring); 2180 ql_write_cq_idx(rx_ring);
2174 tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; 2181 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2175 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) { 2182 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2176 if (atomic_read(&tx_ring->queue_stopped) && 2183 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2177 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2178 /* 2184 /*
2179 * The queue got stopped because the tx_ring was full. 2185 * The queue got stopped because the tx_ring was full.
2180 * Wake it up, because it's now at least 25% empty. 2186 * Wake it up, because it's now at least 25% empty.
@@ -2558,10 +2564,9 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2558 2564
2559 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { 2565 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2560 netif_info(qdev, tx_queued, qdev->ndev, 2566 netif_info(qdev, tx_queued, qdev->ndev,
2561 "%s: shutting down tx queue %d du to lack of resources.\n", 2567 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2562 __func__, tx_ring_idx); 2568 __func__, tx_ring_idx);
2563 netif_stop_subqueue(ndev, tx_ring->wq_id); 2569 netif_stop_subqueue(ndev, tx_ring->wq_id);
2564 atomic_inc(&tx_ring->queue_stopped);
2565 tx_ring->tx_errors++; 2570 tx_ring->tx_errors++;
2566 return NETDEV_TX_BUSY; 2571 return NETDEV_TX_BUSY;
2567 } 2572 }
@@ -2612,6 +2617,16 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2612 tx_ring->prod_idx, skb->len); 2617 tx_ring->prod_idx, skb->len);
2613 2618
2614 atomic_dec(&tx_ring->tx_count); 2619 atomic_dec(&tx_ring->tx_count);
2620
2621 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2622 netif_stop_subqueue(ndev, tx_ring->wq_id);
2623 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2624 /*
2625 * The queue got stopped because the tx_ring was full.
2626 * Wake it up, because it's now at least 25% empty.
2627 */
2628 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2629 }
2615 return NETDEV_TX_OK; 2630 return NETDEV_TX_OK;
2616} 2631}
2617 2632
@@ -2680,7 +2695,6 @@ static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2680 tx_ring_desc++; 2695 tx_ring_desc++;
2681 } 2696 }
2682 atomic_set(&tx_ring->tx_count, tx_ring->wq_len); 2697 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2683 atomic_set(&tx_ring->queue_stopped, 0);
2684} 2698}
2685 2699
2686static void ql_free_tx_resources(struct ql_adapter *qdev, 2700static void ql_free_tx_resources(struct ql_adapter *qdev,
@@ -2703,10 +2717,9 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2703 &tx_ring->wq_base_dma); 2717 &tx_ring->wq_base_dma);
2704 2718
2705 if ((tx_ring->wq_base == NULL) || 2719 if ((tx_ring->wq_base == NULL) ||
2706 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) { 2720 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2707 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n"); 2721 goto pci_alloc_err;
2708 return -ENOMEM; 2722
2709 }
2710 tx_ring->q = 2723 tx_ring->q =
2711 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL); 2724 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2712 if (tx_ring->q == NULL) 2725 if (tx_ring->q == NULL)
@@ -2716,6 +2729,9 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2716err: 2729err:
2717 pci_free_consistent(qdev->pdev, tx_ring->wq_size, 2730 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2718 tx_ring->wq_base, tx_ring->wq_base_dma); 2731 tx_ring->wq_base, tx_ring->wq_base_dma);
2732 tx_ring->wq_base = NULL;
2733pci_alloc_err:
2734 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2719 return -ENOMEM; 2735 return -ENOMEM;
2720} 2736}
2721 2737
@@ -4649,7 +4665,7 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
4649 int err = 0; 4665 int err = 0;
4650 4666
4651 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter), 4667 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4652 min(MAX_CPUS, (int)num_online_cpus())); 4668 min(MAX_CPUS, netif_get_num_default_rss_queues()));
4653 if (!ndev) 4669 if (!ndev)
4654 return -ENOMEM; 4670 return -ENOMEM;
4655 4671
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index d1827e887f4..557a26545d7 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1256,7 +1256,6 @@ static void __devexit r6040_remove_one(struct pci_dev *pdev)
1256 kfree(lp->mii_bus->irq); 1256 kfree(lp->mii_bus->irq);
1257 mdiobus_free(lp->mii_bus); 1257 mdiobus_free(lp->mii_bus);
1258 netif_napi_del(&lp->napi); 1258 netif_napi_del(&lp->napi);
1259 pci_set_drvdata(pdev, NULL);
1260 pci_iounmap(pdev, lp->base); 1259 pci_iounmap(pdev, lp->base);
1261 pci_release_regions(pdev); 1260 pci_release_regions(pdev);
1262 free_netdev(dev); 1261 free_netdev(dev);
@@ -1278,17 +1277,4 @@ static struct pci_driver r6040_driver = {
1278 .remove = __devexit_p(r6040_remove_one), 1277 .remove = __devexit_p(r6040_remove_one),
1279}; 1278};
1280 1279
1281 1280module_pci_driver(r6040_driver);
1282static int __init r6040_init(void)
1283{
1284 return pci_register_driver(&r6040_driver);
1285}
1286
1287
1288static void __exit r6040_cleanup(void)
1289{
1290 pci_unregister_driver(&r6040_driver);
1291}
1292
1293module_init(r6040_init);
1294module_exit(r6040_cleanup);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index d7a04e09110..7ff3423edb9 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -46,6 +46,8 @@
46#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" 46#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw" 47#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw" 48#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
50#define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
49 51
50#ifdef RTL8169_DEBUG 52#ifdef RTL8169_DEBUG
51#define assert(expr) \ 53#define assert(expr) \
@@ -141,6 +143,9 @@ enum mac_version {
141 RTL_GIGA_MAC_VER_36, 143 RTL_GIGA_MAC_VER_36,
142 RTL_GIGA_MAC_VER_37, 144 RTL_GIGA_MAC_VER_37,
143 RTL_GIGA_MAC_VER_38, 145 RTL_GIGA_MAC_VER_38,
146 RTL_GIGA_MAC_VER_39,
147 RTL_GIGA_MAC_VER_40,
148 RTL_GIGA_MAC_VER_41,
144 RTL_GIGA_MAC_NONE = 0xff, 149 RTL_GIGA_MAC_NONE = 0xff,
145}; 150};
146 151
@@ -259,6 +264,14 @@ static const struct {
259 [RTL_GIGA_MAC_VER_38] = 264 [RTL_GIGA_MAC_VER_38] =
260 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1, 265 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
261 JUMBO_9K, false), 266 JUMBO_9K, false),
267 [RTL_GIGA_MAC_VER_39] =
268 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
269 JUMBO_1K, true),
270 [RTL_GIGA_MAC_VER_40] =
271 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1,
272 JUMBO_9K, false),
273 [RTL_GIGA_MAC_VER_41] =
274 _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
262}; 275};
263#undef _R 276#undef _R
264 277
@@ -389,8 +402,12 @@ enum rtl8168_8101_registers {
389 TWSI = 0xd2, 402 TWSI = 0xd2,
390 MCU = 0xd3, 403 MCU = 0xd3,
391#define NOW_IS_OOB (1 << 7) 404#define NOW_IS_OOB (1 << 7)
405#define TX_EMPTY (1 << 5)
406#define RX_EMPTY (1 << 4)
407#define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
392#define EN_NDP (1 << 3) 408#define EN_NDP (1 << 3)
393#define EN_OOB_RESET (1 << 2) 409#define EN_OOB_RESET (1 << 2)
410#define LINK_LIST_RDY (1 << 1)
394 EFUSEAR = 0xdc, 411 EFUSEAR = 0xdc,
395#define EFUSEAR_FLAG 0x80000000 412#define EFUSEAR_FLAG 0x80000000
396#define EFUSEAR_WRITE_CMD 0x80000000 413#define EFUSEAR_WRITE_CMD 0x80000000
@@ -416,6 +433,7 @@ enum rtl8168_registers {
416#define ERIAR_MASK_SHIFT 12 433#define ERIAR_MASK_SHIFT 12
417#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT) 434#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
418#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT) 435#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
436#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
419#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT) 437#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
420 EPHY_RXER_NUM = 0x7c, 438 EPHY_RXER_NUM = 0x7c,
421 OCPDR = 0xb0, /* OCP GPHY access */ 439 OCPDR = 0xb0, /* OCP GPHY access */
@@ -428,10 +446,14 @@ enum rtl8168_registers {
428#define OCPAR_FLAG 0x80000000 446#define OCPAR_FLAG 0x80000000
429#define OCPAR_GPHY_WRITE_CMD 0x8000f060 447#define OCPAR_GPHY_WRITE_CMD 0x8000f060
430#define OCPAR_GPHY_READ_CMD 0x0000f060 448#define OCPAR_GPHY_READ_CMD 0x0000f060
449 GPHY_OCP = 0xb8,
431 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */ 450 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
432 MISC = 0xf0, /* 8168e only. */ 451 MISC = 0xf0, /* 8168e only. */
433#define TXPLA_RST (1 << 29) 452#define TXPLA_RST (1 << 29)
453#define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
434#define PWM_EN (1 << 22) 454#define PWM_EN (1 << 22)
455#define RXDV_GATED_EN (1 << 19)
456#define EARLY_TALLY_EN (1 << 16)
435}; 457};
436 458
437enum rtl_register_content { 459enum rtl_register_content {
@@ -721,8 +743,8 @@ struct rtl8169_private {
721 u16 event_slow; 743 u16 event_slow;
722 744
723 struct mdio_ops { 745 struct mdio_ops {
724 void (*write)(void __iomem *, int, int); 746 void (*write)(struct rtl8169_private *, int, int);
725 int (*read)(void __iomem *, int); 747 int (*read)(struct rtl8169_private *, int);
726 } mdio_ops; 748 } mdio_ops;
727 749
728 struct pll_power_ops { 750 struct pll_power_ops {
@@ -736,8 +758,8 @@ struct rtl8169_private {
736 } jumbo_ops; 758 } jumbo_ops;
737 759
738 struct csi_ops { 760 struct csi_ops {
739 void (*write)(void __iomem *, int, int); 761 void (*write)(struct rtl8169_private *, int, int);
740 u32 (*read)(void __iomem *, int); 762 u32 (*read)(struct rtl8169_private *, int);
741 } csi_ops; 763 } csi_ops;
742 764
743 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); 765 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
@@ -774,6 +796,8 @@ struct rtl8169_private {
774 } phy_action; 796 } phy_action;
775 } *rtl_fw; 797 } *rtl_fw;
776#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN) 798#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
799
800 u32 ocp_base;
777}; 801};
778 802
779MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); 803MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -794,6 +818,8 @@ MODULE_FIRMWARE(FIRMWARE_8168F_1);
794MODULE_FIRMWARE(FIRMWARE_8168F_2); 818MODULE_FIRMWARE(FIRMWARE_8168F_2);
795MODULE_FIRMWARE(FIRMWARE_8402_1); 819MODULE_FIRMWARE(FIRMWARE_8402_1);
796MODULE_FIRMWARE(FIRMWARE_8411_1); 820MODULE_FIRMWARE(FIRMWARE_8411_1);
821MODULE_FIRMWARE(FIRMWARE_8106E_1);
822MODULE_FIRMWARE(FIRMWARE_8168G_1);
797 823
798static void rtl_lock_work(struct rtl8169_private *tp) 824static void rtl_lock_work(struct rtl8169_private *tp)
799{ 825{
@@ -818,47 +844,113 @@ static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
818 } 844 }
819} 845}
820 846
847struct rtl_cond {
848 bool (*check)(struct rtl8169_private *);
849 const char *msg;
850};
851
852static void rtl_udelay(unsigned int d)
853{
854 udelay(d);
855}
856
857static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
858 void (*delay)(unsigned int), unsigned int d, int n,
859 bool high)
860{
861 int i;
862
863 for (i = 0; i < n; i++) {
864 delay(d);
865 if (c->check(tp) == high)
866 return true;
867 }
868 netif_err(tp, drv, tp->dev, c->msg);
869 return false;
870}
871
872static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
873 const struct rtl_cond *c,
874 unsigned int d, int n)
875{
876 return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
877}
878
879static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
880 const struct rtl_cond *c,
881 unsigned int d, int n)
882{
883 return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
884}
885
886static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
887 const struct rtl_cond *c,
888 unsigned int d, int n)
889{
890 return rtl_loop_wait(tp, c, msleep, d, n, true);
891}
892
893static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
894 const struct rtl_cond *c,
895 unsigned int d, int n)
896{
897 return rtl_loop_wait(tp, c, msleep, d, n, false);
898}
899
900#define DECLARE_RTL_COND(name) \
901static bool name ## _check(struct rtl8169_private *); \
902 \
903static const struct rtl_cond name = { \
904 .check = name ## _check, \
905 .msg = #name \
906}; \
907 \
908static bool name ## _check(struct rtl8169_private *tp)
909
910DECLARE_RTL_COND(rtl_ocpar_cond)
911{
912 void __iomem *ioaddr = tp->mmio_addr;
913
914 return RTL_R32(OCPAR) & OCPAR_FLAG;
915}
916
821static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) 917static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
822{ 918{
823 void __iomem *ioaddr = tp->mmio_addr; 919 void __iomem *ioaddr = tp->mmio_addr;
824 int i;
825 920
826 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); 921 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
827 for (i = 0; i < 20; i++) { 922
828 udelay(100); 923 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
829 if (RTL_R32(OCPAR) & OCPAR_FLAG) 924 RTL_R32(OCPDR) : ~0;
830 break;
831 }
832 return RTL_R32(OCPDR);
833} 925}
834 926
835static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) 927static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
836{ 928{
837 void __iomem *ioaddr = tp->mmio_addr; 929 void __iomem *ioaddr = tp->mmio_addr;
838 int i;
839 930
840 RTL_W32(OCPDR, data); 931 RTL_W32(OCPDR, data);
841 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); 932 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
842 for (i = 0; i < 20; i++) { 933
843 udelay(100); 934 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
844 if ((RTL_R32(OCPAR) & OCPAR_FLAG) == 0) 935}
845 break; 936
846 } 937DECLARE_RTL_COND(rtl_eriar_cond)
938{
939 void __iomem *ioaddr = tp->mmio_addr;
940
941 return RTL_R32(ERIAR) & ERIAR_FLAG;
847} 942}
848 943
849static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd) 944static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
850{ 945{
851 void __iomem *ioaddr = tp->mmio_addr; 946 void __iomem *ioaddr = tp->mmio_addr;
852 int i;
853 947
854 RTL_W8(ERIDR, cmd); 948 RTL_W8(ERIDR, cmd);
855 RTL_W32(ERIAR, 0x800010e8); 949 RTL_W32(ERIAR, 0x800010e8);
856 msleep(2); 950 msleep(2);
857 for (i = 0; i < 5; i++) { 951
858 udelay(100); 952 if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
859 if (!(RTL_R32(ERIAR) & ERIAR_FLAG)) 953 return;
860 break;
861 }
862 954
863 ocp_write(tp, 0x1, 0x30, 0x00000001); 955 ocp_write(tp, 0x1, 0x30, 0x00000001);
864} 956}
@@ -872,36 +964,27 @@ static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
872 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10; 964 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
873} 965}
874 966
875static void rtl8168_driver_start(struct rtl8169_private *tp) 967DECLARE_RTL_COND(rtl_ocp_read_cond)
876{ 968{
877 u16 reg; 969 u16 reg;
878 int i;
879
880 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
881 970
882 reg = rtl8168_get_ocp_reg(tp); 971 reg = rtl8168_get_ocp_reg(tp);
883 972
884 for (i = 0; i < 10; i++) { 973 return ocp_read(tp, 0x0f, reg) & 0x00000800;
885 msleep(10);
886 if (ocp_read(tp, 0x0f, reg) & 0x00000800)
887 break;
888 }
889} 974}
890 975
891static void rtl8168_driver_stop(struct rtl8169_private *tp) 976static void rtl8168_driver_start(struct rtl8169_private *tp)
892{ 977{
893 u16 reg; 978 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
894 int i;
895 979
896 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP); 980 rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
981}
897 982
898 reg = rtl8168_get_ocp_reg(tp); 983static void rtl8168_driver_stop(struct rtl8169_private *tp)
984{
985 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
899 986
900 for (i = 0; i < 10; i++) { 987 rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
901 msleep(10);
902 if ((ocp_read(tp, 0x0f, reg) & 0x00000800) == 0)
903 break;
904 }
905} 988}
906 989
907static int r8168dp_check_dash(struct rtl8169_private *tp) 990static int r8168dp_check_dash(struct rtl8169_private *tp)
@@ -911,21 +994,124 @@ static int r8168dp_check_dash(struct rtl8169_private *tp)
911 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0; 994 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
912} 995}
913 996
914static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value) 997static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
915{ 998{
916 int i; 999 if (reg & 0xffff0001) {
1000 netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
1001 return true;
1002 }
1003 return false;
1004}
917 1005
918 RTL_W32(PHYAR, 0x80000000 | (reg_addr & 0x1f) << 16 | (value & 0xffff)); 1006DECLARE_RTL_COND(rtl_ocp_gphy_cond)
1007{
1008 void __iomem *ioaddr = tp->mmio_addr;
919 1009
920 for (i = 20; i > 0; i--) { 1010 return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
921 /* 1011}
922 * Check if the RTL8169 has completed writing to the specified 1012
923 * MII register. 1013static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
924 */ 1014{
925 if (!(RTL_R32(PHYAR) & 0x80000000)) 1015 void __iomem *ioaddr = tp->mmio_addr;
926 break; 1016
927 udelay(25); 1017 if (rtl_ocp_reg_failure(tp, reg))
1018 return;
1019
1020 RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
1021
1022 rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
1023}
1024
1025static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
1026{
1027 void __iomem *ioaddr = tp->mmio_addr;
1028
1029 if (rtl_ocp_reg_failure(tp, reg))
1030 return 0;
1031
1032 RTL_W32(GPHY_OCP, reg << 15);
1033
1034 return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
1035 (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
1036}
1037
1038static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
1039{
1040 int val;
1041
1042 val = r8168_phy_ocp_read(tp, reg);
1043 r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
1044}
1045
1046DECLARE_RTL_COND(rtl_ocpdr_cond)
1047{
1048 void __iomem *ioaddr = tp->mmio_addr;
1049
1050 return RTL_R32(OCPDR) & OCPAR_FLAG;
1051}
1052
1053static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1054{
1055 void __iomem *ioaddr = tp->mmio_addr;
1056
1057 if (rtl_ocp_reg_failure(tp, reg))
1058 return;
1059
1060 RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
1061
1062 rtl_udelay_loop_wait_low(tp, &rtl_ocpdr_cond, 25, 10);
1063}
1064
1065static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
1066{
1067 void __iomem *ioaddr = tp->mmio_addr;
1068
1069 if (rtl_ocp_reg_failure(tp, reg))
1070 return 0;
1071
1072 RTL_W32(OCPDR, reg << 15);
1073
1074 return rtl_udelay_loop_wait_high(tp, &rtl_ocpdr_cond, 25, 10) ?
1075 RTL_R32(OCPDR) : ~0;
1076}
1077
1078#define OCP_STD_PHY_BASE 0xa400
1079
1080static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
1081{
1082 if (reg == 0x1f) {
1083 tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
1084 return;
928 } 1085 }
1086
1087 if (tp->ocp_base != OCP_STD_PHY_BASE)
1088 reg -= 0x10;
1089
1090 r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
1091}
1092
1093static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1094{
1095 if (tp->ocp_base != OCP_STD_PHY_BASE)
1096 reg -= 0x10;
1097
1098 return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1099}
1100
1101DECLARE_RTL_COND(rtl_phyar_cond)
1102{
1103 void __iomem *ioaddr = tp->mmio_addr;
1104
1105 return RTL_R32(PHYAR) & 0x80000000;
1106}
1107
1108static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
1109{
1110 void __iomem *ioaddr = tp->mmio_addr;
1111
1112 RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
1113
1114 rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
929 /* 1115 /*
930 * According to hardware specs a 20us delay is required after write 1116 * According to hardware specs a 20us delay is required after write
931 * complete indication, but before sending next command. 1117 * complete indication, but before sending next command.
@@ -933,23 +1119,16 @@ static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
933 udelay(20); 1119 udelay(20);
934} 1120}
935 1121
936static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr) 1122static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
937{ 1123{
938 int i, value = -1; 1124 void __iomem *ioaddr = tp->mmio_addr;
1125 int value;
939 1126
940 RTL_W32(PHYAR, 0x0 | (reg_addr & 0x1f) << 16); 1127 RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
1128
1129 value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
1130 RTL_R32(PHYAR) & 0xffff : ~0;
941 1131
942 for (i = 20; i > 0; i--) {
943 /*
944 * Check if the RTL8169 has completed retrieving data from
945 * the specified MII register.
946 */
947 if (RTL_R32(PHYAR) & 0x80000000) {
948 value = RTL_R32(PHYAR) & 0xffff;
949 break;
950 }
951 udelay(25);
952 }
953 /* 1132 /*
954 * According to hardware specs a 20us delay is required after read 1133 * According to hardware specs a 20us delay is required after read
955 * complete indication, but before sending next command. 1134 * complete indication, but before sending next command.
@@ -959,45 +1138,35 @@ static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr)
959 return value; 1138 return value;
960} 1139}
961 1140
962static void r8168dp_1_mdio_access(void __iomem *ioaddr, int reg_addr, u32 data) 1141static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
963{ 1142{
964 int i; 1143 void __iomem *ioaddr = tp->mmio_addr;
965 1144
966 RTL_W32(OCPDR, data | 1145 RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
967 ((reg_addr & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
968 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD); 1146 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
969 RTL_W32(EPHY_RXER_NUM, 0); 1147 RTL_W32(EPHY_RXER_NUM, 0);
970 1148
971 for (i = 0; i < 100; i++) { 1149 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
972 mdelay(1);
973 if (!(RTL_R32(OCPAR) & OCPAR_FLAG))
974 break;
975 }
976} 1150}
977 1151
978static void r8168dp_1_mdio_write(void __iomem *ioaddr, int reg_addr, int value) 1152static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
979{ 1153{
980 r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_WRITE_CMD | 1154 r8168dp_1_mdio_access(tp, reg,
981 (value & OCPDR_DATA_MASK)); 1155 OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
982} 1156}
983 1157
984static int r8168dp_1_mdio_read(void __iomem *ioaddr, int reg_addr) 1158static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
985{ 1159{
986 int i; 1160 void __iomem *ioaddr = tp->mmio_addr;
987 1161
988 r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_READ_CMD); 1162 r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
989 1163
990 mdelay(1); 1164 mdelay(1);
991 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD); 1165 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
992 RTL_W32(EPHY_RXER_NUM, 0); 1166 RTL_W32(EPHY_RXER_NUM, 0);
993 1167
994 for (i = 0; i < 100; i++) { 1168 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
995 mdelay(1); 1169 RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
996 if (RTL_R32(OCPAR) & OCPAR_FLAG)
997 break;
998 }
999
1000 return RTL_R32(OCPDR) & OCPDR_DATA_MASK;
1001} 1170}
1002 1171
1003#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 1172#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
@@ -1012,22 +1181,25 @@ static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
1012 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT); 1181 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1013} 1182}
1014 1183
1015static void r8168dp_2_mdio_write(void __iomem *ioaddr, int reg_addr, int value) 1184static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1016{ 1185{
1186 void __iomem *ioaddr = tp->mmio_addr;
1187
1017 r8168dp_2_mdio_start(ioaddr); 1188 r8168dp_2_mdio_start(ioaddr);
1018 1189
1019 r8169_mdio_write(ioaddr, reg_addr, value); 1190 r8169_mdio_write(tp, reg, value);
1020 1191
1021 r8168dp_2_mdio_stop(ioaddr); 1192 r8168dp_2_mdio_stop(ioaddr);
1022} 1193}
1023 1194
1024static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr) 1195static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1025{ 1196{
1197 void __iomem *ioaddr = tp->mmio_addr;
1026 int value; 1198 int value;
1027 1199
1028 r8168dp_2_mdio_start(ioaddr); 1200 r8168dp_2_mdio_start(ioaddr);
1029 1201
1030 value = r8169_mdio_read(ioaddr, reg_addr); 1202 value = r8169_mdio_read(tp, reg);
1031 1203
1032 r8168dp_2_mdio_stop(ioaddr); 1204 r8168dp_2_mdio_stop(ioaddr);
1033 1205
@@ -1036,12 +1208,12 @@ static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr)
1036 1208
1037static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val) 1209static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1038{ 1210{
1039 tp->mdio_ops.write(tp->mmio_addr, location, val); 1211 tp->mdio_ops.write(tp, location, val);
1040} 1212}
1041 1213
1042static int rtl_readphy(struct rtl8169_private *tp, int location) 1214static int rtl_readphy(struct rtl8169_private *tp, int location)
1043{ 1215{
1044 return tp->mdio_ops.read(tp->mmio_addr, location); 1216 return tp->mdio_ops.read(tp, location);
1045} 1217}
1046 1218
1047static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value) 1219static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
@@ -1072,79 +1244,64 @@ static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1072 return rtl_readphy(tp, location); 1244 return rtl_readphy(tp, location);
1073} 1245}
1074 1246
1075static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value) 1247DECLARE_RTL_COND(rtl_ephyar_cond)
1076{ 1248{
1077 unsigned int i; 1249 void __iomem *ioaddr = tp->mmio_addr;
1250
1251 return RTL_R32(EPHYAR) & EPHYAR_FLAG;
1252}
1253
1254static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1255{
1256 void __iomem *ioaddr = tp->mmio_addr;
1078 1257
1079 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | 1258 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1080 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); 1259 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1081 1260
1082 for (i = 0; i < 100; i++) { 1261 rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1083 if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG)) 1262
1084 break; 1263 udelay(10);
1085 udelay(10);
1086 }
1087} 1264}
1088 1265
1089static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr) 1266static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1090{ 1267{
1091 u16 value = 0xffff; 1268 void __iomem *ioaddr = tp->mmio_addr;
1092 unsigned int i;
1093 1269
1094 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); 1270 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1095 1271
1096 for (i = 0; i < 100; i++) { 1272 return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1097 if (RTL_R32(EPHYAR) & EPHYAR_FLAG) { 1273 RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
1098 value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
1099 break;
1100 }
1101 udelay(10);
1102 }
1103
1104 return value;
1105} 1274}
1106 1275
1107static 1276static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1108void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type) 1277 u32 val, int type)
1109{ 1278{
1110 unsigned int i; 1279 void __iomem *ioaddr = tp->mmio_addr;
1111 1280
1112 BUG_ON((addr & 3) || (mask == 0)); 1281 BUG_ON((addr & 3) || (mask == 0));
1113 RTL_W32(ERIDR, val); 1282 RTL_W32(ERIDR, val);
1114 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr); 1283 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1115 1284
1116 for (i = 0; i < 100; i++) { 1285 rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1117 if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
1118 break;
1119 udelay(100);
1120 }
1121} 1286}
1122 1287
1123static u32 rtl_eri_read(void __iomem *ioaddr, int addr, int type) 1288static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1124{ 1289{
1125 u32 value = ~0x00; 1290 void __iomem *ioaddr = tp->mmio_addr;
1126 unsigned int i;
1127 1291
1128 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr); 1292 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1129 1293
1130 for (i = 0; i < 100; i++) { 1294 return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1131 if (RTL_R32(ERIAR) & ERIAR_FLAG) { 1295 RTL_R32(ERIDR) : ~0;
1132 value = RTL_R32(ERIDR);
1133 break;
1134 }
1135 udelay(100);
1136 }
1137
1138 return value;
1139} 1296}
1140 1297
1141static void 1298static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1142rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type) 1299 u32 m, int type)
1143{ 1300{
1144 u32 val; 1301 u32 val;
1145 1302
1146 val = rtl_eri_read(ioaddr, addr, type); 1303 val = rtl_eri_read(tp, addr, type);
1147 rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type); 1304 rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
1148} 1305}
1149 1306
1150struct exgmac_reg { 1307struct exgmac_reg {
@@ -1153,31 +1310,30 @@ struct exgmac_reg {
1153 u32 val; 1310 u32 val;
1154}; 1311};
1155 1312
1156static void rtl_write_exgmac_batch(void __iomem *ioaddr, 1313static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
1157 const struct exgmac_reg *r, int len) 1314 const struct exgmac_reg *r, int len)
1158{ 1315{
1159 while (len-- > 0) { 1316 while (len-- > 0) {
1160 rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC); 1317 rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1161 r++; 1318 r++;
1162 } 1319 }
1163} 1320}
1164 1321
1165static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr) 1322DECLARE_RTL_COND(rtl_efusear_cond)
1166{ 1323{
1167 u8 value = 0xff; 1324 void __iomem *ioaddr = tp->mmio_addr;
1168 unsigned int i;
1169 1325
1170 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); 1326 return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
1327}
1171 1328
1172 for (i = 0; i < 300; i++) { 1329static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1173 if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) { 1330{
1174 value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK; 1331 void __iomem *ioaddr = tp->mmio_addr;
1175 break;
1176 }
1177 udelay(100);
1178 }
1179 1332
1180 return value; 1333 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1334
1335 return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1336 RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1181} 1337}
1182 1338
1183static u16 rtl_get_events(struct rtl8169_private *tp) 1339static u16 rtl_get_events(struct rtl8169_private *tp)
@@ -1276,48 +1432,48 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
1276 if (tp->mac_version == RTL_GIGA_MAC_VER_34 || 1432 if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1277 tp->mac_version == RTL_GIGA_MAC_VER_38) { 1433 tp->mac_version == RTL_GIGA_MAC_VER_38) {
1278 if (RTL_R8(PHYstatus) & _1000bpsF) { 1434 if (RTL_R8(PHYstatus) & _1000bpsF) {
1279 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, 1435 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1280 0x00000011, ERIAR_EXGMAC); 1436 ERIAR_EXGMAC);
1281 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, 1437 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1282 0x00000005, ERIAR_EXGMAC); 1438 ERIAR_EXGMAC);
1283 } else if (RTL_R8(PHYstatus) & _100bps) { 1439 } else if (RTL_R8(PHYstatus) & _100bps) {
1284 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, 1440 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1285 0x0000001f, ERIAR_EXGMAC); 1441 ERIAR_EXGMAC);
1286 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, 1442 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1287 0x00000005, ERIAR_EXGMAC); 1443 ERIAR_EXGMAC);
1288 } else { 1444 } else {
1289 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, 1445 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1290 0x0000001f, ERIAR_EXGMAC); 1446 ERIAR_EXGMAC);
1291 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, 1447 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1292 0x0000003f, ERIAR_EXGMAC); 1448 ERIAR_EXGMAC);
1293 } 1449 }
1294 /* Reset packet filter */ 1450 /* Reset packet filter */
1295 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, 1451 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1296 ERIAR_EXGMAC); 1452 ERIAR_EXGMAC);
1297 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, 1453 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1298 ERIAR_EXGMAC); 1454 ERIAR_EXGMAC);
1299 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || 1455 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1300 tp->mac_version == RTL_GIGA_MAC_VER_36) { 1456 tp->mac_version == RTL_GIGA_MAC_VER_36) {
1301 if (RTL_R8(PHYstatus) & _1000bpsF) { 1457 if (RTL_R8(PHYstatus) & _1000bpsF) {
1302 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, 1458 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1303 0x00000011, ERIAR_EXGMAC); 1459 ERIAR_EXGMAC);
1304 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, 1460 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1305 0x00000005, ERIAR_EXGMAC); 1461 ERIAR_EXGMAC);
1306 } else { 1462 } else {
1307 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, 1463 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1308 0x0000001f, ERIAR_EXGMAC); 1464 ERIAR_EXGMAC);
1309 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, 1465 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1310 0x0000003f, ERIAR_EXGMAC); 1466 ERIAR_EXGMAC);
1311 } 1467 }
1312 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { 1468 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1313 if (RTL_R8(PHYstatus) & _10bps) { 1469 if (RTL_R8(PHYstatus) & _10bps) {
1314 rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011, 1470 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
1315 0x4d02, ERIAR_EXGMAC); 1471 ERIAR_EXGMAC);
1316 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_0011, 1472 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
1317 0x0060, ERIAR_EXGMAC); 1473 ERIAR_EXGMAC);
1318 } else { 1474 } else {
1319 rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011, 1475 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
1320 0x0000, ERIAR_EXGMAC); 1476 ERIAR_EXGMAC);
1321 } 1477 }
1322 } 1478 }
1323} 1479}
@@ -1784,6 +1940,13 @@ static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1784 } 1940 }
1785} 1941}
1786 1942
1943DECLARE_RTL_COND(rtl_counters_cond)
1944{
1945 void __iomem *ioaddr = tp->mmio_addr;
1946
1947 return RTL_R32(CounterAddrLow) & CounterDump;
1948}
1949
1787static void rtl8169_update_counters(struct net_device *dev) 1950static void rtl8169_update_counters(struct net_device *dev)
1788{ 1951{
1789 struct rtl8169_private *tp = netdev_priv(dev); 1952 struct rtl8169_private *tp = netdev_priv(dev);
@@ -1792,7 +1955,6 @@ static void rtl8169_update_counters(struct net_device *dev)
1792 struct rtl8169_counters *counters; 1955 struct rtl8169_counters *counters;
1793 dma_addr_t paddr; 1956 dma_addr_t paddr;
1794 u32 cmd; 1957 u32 cmd;
1795 int wait = 1000;
1796 1958
1797 /* 1959 /*
1798 * Some chips are unable to dump tally counters when the receiver 1960 * Some chips are unable to dump tally counters when the receiver
@@ -1810,13 +1972,8 @@ static void rtl8169_update_counters(struct net_device *dev)
1810 RTL_W32(CounterAddrLow, cmd); 1972 RTL_W32(CounterAddrLow, cmd);
1811 RTL_W32(CounterAddrLow, cmd | CounterDump); 1973 RTL_W32(CounterAddrLow, cmd | CounterDump);
1812 1974
1813 while (wait--) { 1975 if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
1814 if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) { 1976 memcpy(&tp->counters, counters, sizeof(*counters));
1815 memcpy(&tp->counters, counters, sizeof(*counters));
1816 break;
1817 }
1818 udelay(10);
1819 }
1820 1977
1821 RTL_W32(CounterAddrLow, 0); 1978 RTL_W32(CounterAddrLow, 0);
1822 RTL_W32(CounterAddrHigh, 0); 1979 RTL_W32(CounterAddrHigh, 0);
@@ -1894,6 +2051,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1894 u32 val; 2051 u32 val;
1895 int mac_version; 2052 int mac_version;
1896 } mac_info[] = { 2053 } mac_info[] = {
2054 /* 8168G family. */
2055 { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
2056 { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
2057
1897 /* 8168F family. */ 2058 /* 8168F family. */
1898 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 }, 2059 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
1899 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 }, 2060 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
@@ -1933,6 +2094,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1933 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, 2094 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
1934 2095
1935 /* 8101 family. */ 2096 /* 8101 family. */
2097 { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
2098 { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
1936 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 }, 2099 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
1937 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 }, 2100 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
1938 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 }, 2101 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
@@ -2186,7 +2349,7 @@ static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2186 index -= regno; 2349 index -= regno;
2187 break; 2350 break;
2188 case PHY_READ_EFUSE: 2351 case PHY_READ_EFUSE:
2189 predata = rtl8168d_efuse_read(tp->mmio_addr, regno); 2352 predata = rtl8168d_efuse_read(tp, regno);
2190 index++; 2353 index++;
2191 break; 2354 break;
2192 case PHY_CLEAR_READCOUNT: 2355 case PHY_CLEAR_READCOUNT:
@@ -2626,7 +2789,6 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2626 { 0x1f, 0x0000 }, 2789 { 0x1f, 0x0000 },
2627 { 0x0d, 0xf880 } 2790 { 0x0d, 0xf880 }
2628 }; 2791 };
2629 void __iomem *ioaddr = tp->mmio_addr;
2630 2792
2631 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); 2793 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2632 2794
@@ -2638,7 +2800,7 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2638 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef); 2800 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2639 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00); 2801 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2640 2802
2641 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) { 2803 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2642 static const struct phy_reg phy_reg_init[] = { 2804 static const struct phy_reg phy_reg_init[] = {
2643 { 0x1f, 0x0002 }, 2805 { 0x1f, 0x0002 },
2644 { 0x05, 0x669a }, 2806 { 0x05, 0x669a },
@@ -2738,11 +2900,10 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2738 { 0x1f, 0x0000 }, 2900 { 0x1f, 0x0000 },
2739 { 0x0d, 0xf880 } 2901 { 0x0d, 0xf880 }
2740 }; 2902 };
2741 void __iomem *ioaddr = tp->mmio_addr;
2742 2903
2743 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); 2904 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2744 2905
2745 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) { 2906 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2746 static const struct phy_reg phy_reg_init[] = { 2907 static const struct phy_reg phy_reg_init[] = {
2747 { 0x1f, 0x0002 }, 2908 { 0x1f, 0x0002 },
2748 { 0x05, 0x669a }, 2909 { 0x05, 0x669a },
@@ -3010,8 +3171,7 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3010 rtl_writephy(tp, 0x1f, 0x0000); 3171 rtl_writephy(tp, 0x1f, 0x0000);
3011 3172
3012 /* EEE setting */ 3173 /* EEE setting */
3013 rtl_w1w0_eri(tp->mmio_addr, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, 3174 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
3014 ERIAR_EXGMAC);
3015 rtl_writephy(tp, 0x1f, 0x0005); 3175 rtl_writephy(tp, 0x1f, 0x0005);
3016 rtl_writephy(tp, 0x05, 0x8b85); 3176 rtl_writephy(tp, 0x05, 0x8b85);
3017 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000); 3177 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
@@ -3115,7 +3275,6 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3115 3275
3116static void rtl8411_hw_phy_config(struct rtl8169_private *tp) 3276static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3117{ 3277{
3118 void __iomem *ioaddr = tp->mmio_addr;
3119 static const struct phy_reg phy_reg_init[] = { 3278 static const struct phy_reg phy_reg_init[] = {
3120 /* Channel estimation fine tune */ 3279 /* Channel estimation fine tune */
3121 { 0x1f, 0x0003 }, 3280 { 0x1f, 0x0003 },
@@ -3189,7 +3348,7 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3189 rtl_writephy(tp, 0x1f, 0x0000); 3348 rtl_writephy(tp, 0x1f, 0x0000);
3190 3349
3191 /* eee setting */ 3350 /* eee setting */
3192 rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC); 3351 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3193 rtl_writephy(tp, 0x1f, 0x0005); 3352 rtl_writephy(tp, 0x1f, 0x0005);
3194 rtl_writephy(tp, 0x05, 0x8b85); 3353 rtl_writephy(tp, 0x05, 0x8b85);
3195 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000); 3354 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
@@ -3211,6 +3370,55 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3211 rtl_writephy(tp, 0x1f, 0x0000); 3370 rtl_writephy(tp, 0x1f, 0x0000);
3212} 3371}
3213 3372
3373static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3374{
3375 static const u16 mac_ocp_patch[] = {
3376 0xe008, 0xe01b, 0xe01d, 0xe01f,
3377 0xe021, 0xe023, 0xe025, 0xe027,
3378 0x49d2, 0xf10d, 0x766c, 0x49e2,
3379 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
3380
3381 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
3382 0xc707, 0x8ee1, 0x9d6c, 0xc603,
3383 0xbe00, 0xb416, 0x0076, 0xe86c,
3384 0xc602, 0xbe00, 0x0000, 0xc602,
3385
3386 0xbe00, 0x0000, 0xc602, 0xbe00,
3387 0x0000, 0xc602, 0xbe00, 0x0000,
3388 0xc602, 0xbe00, 0x0000, 0xc602,
3389 0xbe00, 0x0000, 0xc602, 0xbe00,
3390
3391 0x0000, 0x0000, 0x0000, 0x0000
3392 };
3393 u32 i;
3394
3395 /* Patch code for GPHY reset */
3396 for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
3397 r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
3398 r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
3399 r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
3400
3401 rtl_apply_firmware(tp);
3402
3403 if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
3404 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
3405 else
3406 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
3407
3408 if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
3409 rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
3410 else
3411 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
3412
3413 rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
3414 rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
3415
3416 r8168_phy_ocp_write(tp, 0xa436, 0x8012);
3417 rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
3418
3419 rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
3420}
3421
3214static void rtl8102e_hw_phy_config(struct rtl8169_private *tp) 3422static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3215{ 3423{
3216 static const struct phy_reg phy_reg_init[] = { 3424 static const struct phy_reg phy_reg_init[] = {
@@ -3256,8 +3464,6 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3256 3464
3257static void rtl8402_hw_phy_config(struct rtl8169_private *tp) 3465static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3258{ 3466{
3259 void __iomem *ioaddr = tp->mmio_addr;
3260
3261 /* Disable ALDPS before setting firmware */ 3467 /* Disable ALDPS before setting firmware */
3262 rtl_writephy(tp, 0x1f, 0x0000); 3468 rtl_writephy(tp, 0x1f, 0x0000);
3263 rtl_writephy(tp, 0x18, 0x0310); 3469 rtl_writephy(tp, 0x18, 0x0310);
@@ -3266,13 +3472,35 @@ static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3266 rtl_apply_firmware(tp); 3472 rtl_apply_firmware(tp);
3267 3473
3268 /* EEE setting */ 3474 /* EEE setting */
3269 rtl_eri_write(ioaddr, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 3475 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3270 rtl_writephy(tp, 0x1f, 0x0004); 3476 rtl_writephy(tp, 0x1f, 0x0004);
3271 rtl_writephy(tp, 0x10, 0x401f); 3477 rtl_writephy(tp, 0x10, 0x401f);
3272 rtl_writephy(tp, 0x19, 0x7030); 3478 rtl_writephy(tp, 0x19, 0x7030);
3273 rtl_writephy(tp, 0x1f, 0x0000); 3479 rtl_writephy(tp, 0x1f, 0x0000);
3274} 3480}
3275 3481
3482static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3483{
3484 static const struct phy_reg phy_reg_init[] = {
3485 { 0x1f, 0x0004 },
3486 { 0x10, 0xc07f },
3487 { 0x19, 0x7030 },
3488 { 0x1f, 0x0000 }
3489 };
3490
3491 /* Disable ALDPS before ram code */
3492 rtl_writephy(tp, 0x1f, 0x0000);
3493 rtl_writephy(tp, 0x18, 0x0310);
3494 msleep(100);
3495
3496 rtl_apply_firmware(tp);
3497
3498 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3499 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3500
3501 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3502}
3503
3276static void rtl_hw_phy_config(struct net_device *dev) 3504static void rtl_hw_phy_config(struct net_device *dev)
3277{ 3505{
3278 struct rtl8169_private *tp = netdev_priv(dev); 3506 struct rtl8169_private *tp = netdev_priv(dev);
@@ -3369,6 +3597,15 @@ static void rtl_hw_phy_config(struct net_device *dev)
3369 rtl8411_hw_phy_config(tp); 3597 rtl8411_hw_phy_config(tp);
3370 break; 3598 break;
3371 3599
3600 case RTL_GIGA_MAC_VER_39:
3601 rtl8106e_hw_phy_config(tp);
3602 break;
3603
3604 case RTL_GIGA_MAC_VER_40:
3605 rtl8168g_1_hw_phy_config(tp);
3606 break;
3607
3608 case RTL_GIGA_MAC_VER_41:
3372 default: 3609 default:
3373 break; 3610 break;
3374 } 3611 }
@@ -3426,18 +3663,16 @@ static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3426 free_netdev(dev); 3663 free_netdev(dev);
3427} 3664}
3428 3665
3666DECLARE_RTL_COND(rtl_phy_reset_cond)
3667{
3668 return tp->phy_reset_pending(tp);
3669}
3670
3429static void rtl8169_phy_reset(struct net_device *dev, 3671static void rtl8169_phy_reset(struct net_device *dev,
3430 struct rtl8169_private *tp) 3672 struct rtl8169_private *tp)
3431{ 3673{
3432 unsigned int i;
3433
3434 tp->phy_reset_enable(tp); 3674 tp->phy_reset_enable(tp);
3435 for (i = 0; i < 100; i++) { 3675 rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
3436 if (!tp->phy_reset_pending(tp))
3437 return;
3438 msleep(1);
3439 }
3440 netif_err(tp, link, dev, "PHY reset failed\n");
3441} 3676}
3442 3677
3443static bool rtl_tbi_enabled(struct rtl8169_private *tp) 3678static bool rtl_tbi_enabled(struct rtl8169_private *tp)
@@ -3512,7 +3747,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3512 low >> 16 }, 3747 low >> 16 },
3513 }; 3748 };
3514 3749
3515 rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e)); 3750 rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3516 } 3751 }
3517 3752
3518 RTL_W8(Cfg9346, Cfg9346_Lock); 3753 RTL_W8(Cfg9346, Cfg9346_Lock);
@@ -3589,6 +3824,11 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
3589 ops->write = r8168dp_2_mdio_write; 3824 ops->write = r8168dp_2_mdio_write;
3590 ops->read = r8168dp_2_mdio_read; 3825 ops->read = r8168dp_2_mdio_read;
3591 break; 3826 break;
3827 case RTL_GIGA_MAC_VER_40:
3828 case RTL_GIGA_MAC_VER_41:
3829 ops->write = r8168g_mdio_write;
3830 ops->read = r8168g_mdio_read;
3831 break;
3592 default: 3832 default:
3593 ops->write = r8169_mdio_write; 3833 ops->write = r8169_mdio_write;
3594 ops->read = r8169_mdio_read; 3834 ops->read = r8169_mdio_read;
@@ -3608,6 +3848,9 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3608 case RTL_GIGA_MAC_VER_34: 3848 case RTL_GIGA_MAC_VER_34:
3609 case RTL_GIGA_MAC_VER_37: 3849 case RTL_GIGA_MAC_VER_37:
3610 case RTL_GIGA_MAC_VER_38: 3850 case RTL_GIGA_MAC_VER_38:
3851 case RTL_GIGA_MAC_VER_39:
3852 case RTL_GIGA_MAC_VER_40:
3853 case RTL_GIGA_MAC_VER_41:
3611 RTL_W32(RxConfig, RTL_R32(RxConfig) | 3854 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3612 AcceptBroadcast | AcceptMulticast | AcceptMyPhys); 3855 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3613 break; 3856 break;
@@ -3761,7 +4004,7 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
3761 4004
3762 if (tp->mac_version == RTL_GIGA_MAC_VER_32 || 4005 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3763 tp->mac_version == RTL_GIGA_MAC_VER_33) 4006 tp->mac_version == RTL_GIGA_MAC_VER_33)
3764 rtl_ephy_write(ioaddr, 0x19, 0xff64); 4007 rtl_ephy_write(tp, 0x19, 0xff64);
3765 4008
3766 if (rtl_wol_pll_power_down(tp)) 4009 if (rtl_wol_pll_power_down(tp))
3767 return; 4010 return;
@@ -3830,6 +4073,7 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
3830 case RTL_GIGA_MAC_VER_29: 4073 case RTL_GIGA_MAC_VER_29:
3831 case RTL_GIGA_MAC_VER_30: 4074 case RTL_GIGA_MAC_VER_30:
3832 case RTL_GIGA_MAC_VER_37: 4075 case RTL_GIGA_MAC_VER_37:
4076 case RTL_GIGA_MAC_VER_39:
3833 ops->down = r810x_pll_power_down; 4077 ops->down = r810x_pll_power_down;
3834 ops->up = r810x_pll_power_up; 4078 ops->up = r810x_pll_power_up;
3835 break; 4079 break;
@@ -3855,6 +4099,8 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
3855 case RTL_GIGA_MAC_VER_35: 4099 case RTL_GIGA_MAC_VER_35:
3856 case RTL_GIGA_MAC_VER_36: 4100 case RTL_GIGA_MAC_VER_36:
3857 case RTL_GIGA_MAC_VER_38: 4101 case RTL_GIGA_MAC_VER_38:
4102 case RTL_GIGA_MAC_VER_40:
4103 case RTL_GIGA_MAC_VER_41:
3858 ops->down = r8168_pll_power_down; 4104 ops->down = r8168_pll_power_down;
3859 ops->up = r8168_pll_power_up; 4105 ops->up = r8168_pll_power_up;
3860 break; 4106 break;
@@ -4051,6 +4297,8 @@ static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
4051 * No action needed for jumbo frames with 8169. 4297 * No action needed for jumbo frames with 8169.
4052 * No jumbo for 810x at all. 4298 * No jumbo for 810x at all.
4053 */ 4299 */
4300 case RTL_GIGA_MAC_VER_40:
4301 case RTL_GIGA_MAC_VER_41:
4054 default: 4302 default:
4055 ops->disable = NULL; 4303 ops->disable = NULL;
4056 ops->enable = NULL; 4304 ops->enable = NULL;
@@ -4058,20 +4306,20 @@ static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
4058 } 4306 }
4059} 4307}
4060 4308
4309DECLARE_RTL_COND(rtl_chipcmd_cond)
4310{
4311 void __iomem *ioaddr = tp->mmio_addr;
4312
4313 return RTL_R8(ChipCmd) & CmdReset;
4314}
4315
4061static void rtl_hw_reset(struct rtl8169_private *tp) 4316static void rtl_hw_reset(struct rtl8169_private *tp)
4062{ 4317{
4063 void __iomem *ioaddr = tp->mmio_addr; 4318 void __iomem *ioaddr = tp->mmio_addr;
4064 int i;
4065 4319
4066 /* Soft reset the chip. */
4067 RTL_W8(ChipCmd, CmdReset); 4320 RTL_W8(ChipCmd, CmdReset);
4068 4321
4069 /* Check that the chip has finished the reset. */ 4322 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
4070 for (i = 0; i < 100; i++) {
4071 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
4072 break;
4073 udelay(100);
4074 }
4075} 4323}
4076 4324
4077static void rtl_request_uncached_firmware(struct rtl8169_private *tp) 4325static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
@@ -4125,6 +4373,20 @@ static void rtl_rx_close(struct rtl8169_private *tp)
4125 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK); 4373 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
4126} 4374}
4127 4375
4376DECLARE_RTL_COND(rtl_npq_cond)
4377{
4378 void __iomem *ioaddr = tp->mmio_addr;
4379
4380 return RTL_R8(TxPoll) & NPQ;
4381}
4382
4383DECLARE_RTL_COND(rtl_txcfg_empty_cond)
4384{
4385 void __iomem *ioaddr = tp->mmio_addr;
4386
4387 return RTL_R32(TxConfig) & TXCFG_EMPTY;
4388}
4389
4128static void rtl8169_hw_reset(struct rtl8169_private *tp) 4390static void rtl8169_hw_reset(struct rtl8169_private *tp)
4129{ 4391{
4130 void __iomem *ioaddr = tp->mmio_addr; 4392 void __iomem *ioaddr = tp->mmio_addr;
@@ -4137,16 +4399,16 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
4137 if (tp->mac_version == RTL_GIGA_MAC_VER_27 || 4399 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4138 tp->mac_version == RTL_GIGA_MAC_VER_28 || 4400 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4139 tp->mac_version == RTL_GIGA_MAC_VER_31) { 4401 tp->mac_version == RTL_GIGA_MAC_VER_31) {
4140 while (RTL_R8(TxPoll) & NPQ) 4402 rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
4141 udelay(20);
4142 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 || 4403 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4143 tp->mac_version == RTL_GIGA_MAC_VER_35 || 4404 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
4144 tp->mac_version == RTL_GIGA_MAC_VER_36 || 4405 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4145 tp->mac_version == RTL_GIGA_MAC_VER_37 || 4406 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4407 tp->mac_version == RTL_GIGA_MAC_VER_40 ||
4408 tp->mac_version == RTL_GIGA_MAC_VER_41 ||
4146 tp->mac_version == RTL_GIGA_MAC_VER_38) { 4409 tp->mac_version == RTL_GIGA_MAC_VER_38) {
4147 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); 4410 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4148 while (!(RTL_R32(TxConfig) & TXCFG_EMPTY)) 4411 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
4149 udelay(100);
4150 } else { 4412 } else {
4151 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); 4413 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4152 udelay(100); 4414 udelay(100);
@@ -4352,15 +4614,12 @@ static void rtl_hw_start_8169(struct net_device *dev)
4352static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) 4614static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4353{ 4615{
4354 if (tp->csi_ops.write) 4616 if (tp->csi_ops.write)
4355 tp->csi_ops.write(tp->mmio_addr, addr, value); 4617 tp->csi_ops.write(tp, addr, value);
4356} 4618}
4357 4619
4358static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) 4620static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4359{ 4621{
4360 if (tp->csi_ops.read) 4622 return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
4361 return tp->csi_ops.read(tp->mmio_addr, addr);
4362 else
4363 return ~0;
4364} 4623}
4365 4624
4366static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits) 4625static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
@@ -4381,73 +4640,56 @@ static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
4381 rtl_csi_access_enable(tp, 0x27000000); 4640 rtl_csi_access_enable(tp, 0x27000000);
4382} 4641}
4383 4642
4384static void r8169_csi_write(void __iomem *ioaddr, int addr, int value) 4643DECLARE_RTL_COND(rtl_csiar_cond)
4385{ 4644{
4386 unsigned int i; 4645 void __iomem *ioaddr = tp->mmio_addr;
4646
4647 return RTL_R32(CSIAR) & CSIAR_FLAG;
4648}
4649
4650static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
4651{
4652 void __iomem *ioaddr = tp->mmio_addr;
4387 4653
4388 RTL_W32(CSIDR, value); 4654 RTL_W32(CSIDR, value);
4389 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | 4655 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4390 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); 4656 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4391 4657
4392 for (i = 0; i < 100; i++) { 4658 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4393 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
4394 break;
4395 udelay(10);
4396 }
4397} 4659}
4398 4660
4399static u32 r8169_csi_read(void __iomem *ioaddr, int addr) 4661static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
4400{ 4662{
4401 u32 value = ~0x00; 4663 void __iomem *ioaddr = tp->mmio_addr;
4402 unsigned int i;
4403 4664
4404 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | 4665 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4405 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); 4666 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4406 4667
4407 for (i = 0; i < 100; i++) { 4668 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4408 if (RTL_R32(CSIAR) & CSIAR_FLAG) { 4669 RTL_R32(CSIDR) : ~0;
4409 value = RTL_R32(CSIDR);
4410 break;
4411 }
4412 udelay(10);
4413 }
4414
4415 return value;
4416} 4670}
4417 4671
4418static void r8402_csi_write(void __iomem *ioaddr, int addr, int value) 4672static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
4419{ 4673{
4420 unsigned int i; 4674 void __iomem *ioaddr = tp->mmio_addr;
4421 4675
4422 RTL_W32(CSIDR, value); 4676 RTL_W32(CSIDR, value);
4423 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | 4677 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4424 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT | 4678 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
4425 CSIAR_FUNC_NIC); 4679 CSIAR_FUNC_NIC);
4426 4680
4427 for (i = 0; i < 100; i++) { 4681 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4428 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
4429 break;
4430 udelay(10);
4431 }
4432} 4682}
4433 4683
4434static u32 r8402_csi_read(void __iomem *ioaddr, int addr) 4684static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
4435{ 4685{
4436 u32 value = ~0x00; 4686 void __iomem *ioaddr = tp->mmio_addr;
4437 unsigned int i;
4438 4687
4439 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC | 4688 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
4440 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); 4689 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4441 4690
4442 for (i = 0; i < 100; i++) { 4691 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4443 if (RTL_R32(CSIAR) & CSIAR_FLAG) { 4692 RTL_R32(CSIDR) : ~0;
4444 value = RTL_R32(CSIDR);
4445 break;
4446 }
4447 udelay(10);
4448 }
4449
4450 return value;
4451} 4693}
4452 4694
4453static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp) 4695static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp)
@@ -4492,13 +4734,14 @@ struct ephy_info {
4492 u16 bits; 4734 u16 bits;
4493}; 4735};
4494 4736
4495static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len) 4737static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
4738 int len)
4496{ 4739{
4497 u16 w; 4740 u16 w;
4498 4741
4499 while (len-- > 0) { 4742 while (len-- > 0) {
4500 w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits; 4743 w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
4501 rtl_ephy_write(ioaddr, e->offset, w); 4744 rtl_ephy_write(tp, e->offset, w);
4502 e++; 4745 e++;
4503 } 4746 }
4504} 4747}
@@ -4582,7 +4825,6 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4582 4825
4583static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) 4826static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4584{ 4827{
4585 void __iomem *ioaddr = tp->mmio_addr;
4586 static const struct ephy_info e_info_8168cp[] = { 4828 static const struct ephy_info e_info_8168cp[] = {
4587 { 0x01, 0, 0x0001 }, 4829 { 0x01, 0, 0x0001 },
4588 { 0x02, 0x0800, 0x1000 }, 4830 { 0x02, 0x0800, 0x1000 },
@@ -4593,7 +4835,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4593 4835
4594 rtl_csi_access_enable_2(tp); 4836 rtl_csi_access_enable_2(tp);
4595 4837
4596 rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp)); 4838 rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4597 4839
4598 __rtl_hw_start_8168cp(tp); 4840 __rtl_hw_start_8168cp(tp);
4599} 4841}
@@ -4644,14 +4886,13 @@ static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4644 4886
4645 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); 4887 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4646 4888
4647 rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1)); 4889 rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4648 4890
4649 __rtl_hw_start_8168cp(tp); 4891 __rtl_hw_start_8168cp(tp);
4650} 4892}
4651 4893
4652static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) 4894static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4653{ 4895{
4654 void __iomem *ioaddr = tp->mmio_addr;
4655 static const struct ephy_info e_info_8168c_2[] = { 4896 static const struct ephy_info e_info_8168c_2[] = {
4656 { 0x01, 0, 0x0001 }, 4897 { 0x01, 0, 0x0001 },
4657 { 0x03, 0x0400, 0x0220 } 4898 { 0x03, 0x0400, 0x0220 }
@@ -4659,7 +4900,7 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4659 4900
4660 rtl_csi_access_enable_2(tp); 4901 rtl_csi_access_enable_2(tp);
4661 4902
4662 rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2)); 4903 rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4663 4904
4664 __rtl_hw_start_8168cp(tp); 4905 __rtl_hw_start_8168cp(tp);
4665} 4906}
@@ -4727,8 +4968,8 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4727 const struct ephy_info *e = e_info_8168d_4 + i; 4968 const struct ephy_info *e = e_info_8168d_4 + i;
4728 u16 w; 4969 u16 w;
4729 4970
4730 w = rtl_ephy_read(ioaddr, e->offset); 4971 w = rtl_ephy_read(tp, e->offset);
4731 rtl_ephy_write(ioaddr, 0x03, (w & e->mask) | e->bits); 4972 rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
4732 } 4973 }
4733 4974
4734 rtl_enable_clock_request(pdev); 4975 rtl_enable_clock_request(pdev);
@@ -4756,7 +4997,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4756 4997
4757 rtl_csi_access_enable_2(tp); 4998 rtl_csi_access_enable_2(tp);
4758 4999
4759 rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); 5000 rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
4760 5001
4761 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 5002 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4762 5003
@@ -4782,19 +5023,18 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
4782 5023
4783 rtl_csi_access_enable_1(tp); 5024 rtl_csi_access_enable_1(tp);
4784 5025
4785 rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); 5026 rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
4786 5027
4787 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 5028 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4788 5029
4789 rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5030 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4790 rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5031 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4791 rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); 5032 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
4792 rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); 5033 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
4793 rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); 5034 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
4794 rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC); 5035 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
4795 rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); 5036 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4796 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, 5037 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
4797 ERIAR_EXGMAC);
4798 5038
4799 RTL_W8(MaxTxPacketSize, EarlySize); 5039 RTL_W8(MaxTxPacketSize, EarlySize);
4800 5040
@@ -4820,16 +5060,16 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
4820 5060
4821 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 5061 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4822 5062
4823 rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5063 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4824 rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5064 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4825 rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); 5065 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
4826 rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); 5066 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
4827 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); 5067 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
4828 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); 5068 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
4829 rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); 5069 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4830 rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); 5070 rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4831 rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); 5071 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
4832 rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC); 5072 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
4833 5073
4834 RTL_W8(MaxTxPacketSize, EarlySize); 5074 RTL_W8(MaxTxPacketSize, EarlySize);
4835 5075
@@ -4854,10 +5094,9 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
4854 5094
4855 rtl_hw_start_8168f(tp); 5095 rtl_hw_start_8168f(tp);
4856 5096
4857 rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); 5097 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
4858 5098
4859 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, 5099 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
4860 ERIAR_EXGMAC);
4861 5100
4862 /* Adjust EEE LED frequency */ 5101 /* Adjust EEE LED frequency */
4863 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); 5102 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
@@ -4865,7 +5104,6 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
4865 5104
4866static void rtl_hw_start_8411(struct rtl8169_private *tp) 5105static void rtl_hw_start_8411(struct rtl8169_private *tp)
4867{ 5106{
4868 void __iomem *ioaddr = tp->mmio_addr;
4869 static const struct ephy_info e_info_8168f_1[] = { 5107 static const struct ephy_info e_info_8168f_1[] = {
4870 { 0x06, 0x00c0, 0x0020 }, 5108 { 0x06, 0x00c0, 0x0020 },
4871 { 0x0f, 0xffff, 0x5200 }, 5109 { 0x0f, 0xffff, 0x5200 },
@@ -4875,10 +5113,39 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
4875 5113
4876 rtl_hw_start_8168f(tp); 5114 rtl_hw_start_8168f(tp);
4877 5115
4878 rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); 5116 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
4879 5117
4880 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, 5118 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
4881 ERIAR_EXGMAC); 5119}
5120
5121static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5122{
5123 void __iomem *ioaddr = tp->mmio_addr;
5124 struct pci_dev *pdev = tp->pci_dev;
5125
5126 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5127 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5128 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
5129 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5130
5131 rtl_csi_access_enable_1(tp);
5132
5133 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5134
5135 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5136 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5137
5138 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5139 RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
5140 RTL_W8(MaxTxPacketSize, EarlySize);
5141
5142 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5143 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5144
5145 /* Adjust EEE LED frequency */
5146 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5147
5148 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
4882} 5149}
4883 5150
4884static void rtl_hw_start_8168(struct net_device *dev) 5151static void rtl_hw_start_8168(struct net_device *dev)
@@ -4982,6 +5249,11 @@ static void rtl_hw_start_8168(struct net_device *dev)
4982 rtl_hw_start_8411(tp); 5249 rtl_hw_start_8411(tp);
4983 break; 5250 break;
4984 5251
5252 case RTL_GIGA_MAC_VER_40:
5253 case RTL_GIGA_MAC_VER_41:
5254 rtl_hw_start_8168g_1(tp);
5255 break;
5256
4985 default: 5257 default:
4986 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", 5258 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
4987 dev->name, tp->mac_version); 5259 dev->name, tp->mac_version);
@@ -5036,7 +5308,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
5036 if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) 5308 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
5037 RTL_W8(Config1, cfg1 & ~LEDS0); 5309 RTL_W8(Config1, cfg1 & ~LEDS0);
5038 5310
5039 rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); 5311 rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
5040} 5312}
5041 5313
5042static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) 5314static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
@@ -5056,7 +5328,7 @@ static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
5056{ 5328{
5057 rtl_hw_start_8102e_2(tp); 5329 rtl_hw_start_8102e_2(tp);
5058 5330
5059 rtl_ephy_write(tp->mmio_addr, 0x03, 0xc2f9); 5331 rtl_ephy_write(tp, 0x03, 0xc2f9);
5060} 5332}
5061 5333
5062static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) 5334static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
@@ -5082,15 +5354,13 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5082 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); 5354 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5083 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); 5355 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5084 5356
5085 rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); 5357 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5086} 5358}
5087 5359
5088static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) 5360static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
5089{ 5361{
5090 void __iomem *ioaddr = tp->mmio_addr;
5091
5092 rtl_hw_start_8105e_1(tp); 5362 rtl_hw_start_8105e_1(tp);
5093 rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000); 5363 rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
5094} 5364}
5095 5365
5096static void rtl_hw_start_8402(struct rtl8169_private *tp) 5366static void rtl_hw_start_8402(struct rtl8169_private *tp)
@@ -5109,18 +5379,29 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
5109 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); 5379 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5110 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); 5380 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5111 5381
5112 rtl_ephy_init(ioaddr, e_info_8402, ARRAY_SIZE(e_info_8402)); 5382 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
5113 5383
5114 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); 5384 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5115 5385
5116 rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC); 5386 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5117 rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC); 5387 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5118 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); 5388 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5119 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); 5389 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5120 rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5390 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5121 rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5391 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5122 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, 5392 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5123 ERIAR_EXGMAC); 5393}
5394
5395static void rtl_hw_start_8106(struct rtl8169_private *tp)
5396{
5397 void __iomem *ioaddr = tp->mmio_addr;
5398
5399 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5400 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5401
5402 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
5403 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5404 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5124} 5405}
5125 5406
5126static void rtl_hw_start_8101(struct net_device *dev) 5407static void rtl_hw_start_8101(struct net_device *dev)
@@ -5167,6 +5448,10 @@ static void rtl_hw_start_8101(struct net_device *dev)
5167 case RTL_GIGA_MAC_VER_37: 5448 case RTL_GIGA_MAC_VER_37:
5168 rtl_hw_start_8402(tp); 5449 rtl_hw_start_8402(tp);
5169 break; 5450 break;
5451
5452 case RTL_GIGA_MAC_VER_39:
5453 rtl_hw_start_8106(tp);
5454 break;
5170 } 5455 }
5171 5456
5172 RTL_W8(Cfg9346, Cfg9346_Lock); 5457 RTL_W8(Cfg9346, Cfg9346_Lock);
@@ -6435,6 +6720,67 @@ static unsigned rtl_try_msi(struct rtl8169_private *tp,
6435 return msi; 6720 return msi;
6436} 6721}
6437 6722
6723DECLARE_RTL_COND(rtl_link_list_ready_cond)
6724{
6725 void __iomem *ioaddr = tp->mmio_addr;
6726
6727 return RTL_R8(MCU) & LINK_LIST_RDY;
6728}
6729
6730DECLARE_RTL_COND(rtl_rxtx_empty_cond)
6731{
6732 void __iomem *ioaddr = tp->mmio_addr;
6733
6734 return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
6735}
6736
6737static void __devinit rtl_hw_init_8168g(struct rtl8169_private *tp)
6738{
6739 void __iomem *ioaddr = tp->mmio_addr;
6740 u32 data;
6741
6742 tp->ocp_base = OCP_STD_PHY_BASE;
6743
6744 RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
6745
6746 if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
6747 return;
6748
6749 if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
6750 return;
6751
6752 RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
6753 msleep(1);
6754 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
6755
6756 data = r8168_mac_ocp_read(ioaddr, 0xe8de);
6757 data &= ~(1 << 14);
6758 r8168_mac_ocp_write(tp, 0xe8de, data);
6759
6760 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6761 return;
6762
6763 data = r8168_mac_ocp_read(ioaddr, 0xe8de);
6764 data |= (1 << 15);
6765 r8168_mac_ocp_write(tp, 0xe8de, data);
6766
6767 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6768 return;
6769}
6770
6771static void __devinit rtl_hw_initialize(struct rtl8169_private *tp)
6772{
6773 switch (tp->mac_version) {
6774 case RTL_GIGA_MAC_VER_40:
6775 case RTL_GIGA_MAC_VER_41:
6776 rtl_hw_init_8168g(tp);
6777 break;
6778
6779 default:
6780 break;
6781 }
6782}
6783
6438static int __devinit 6784static int __devinit
6439rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 6785rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6440{ 6786{
@@ -6544,6 +6890,8 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6544 6890
6545 rtl_irq_disable(tp); 6891 rtl_irq_disable(tp);
6546 6892
6893 rtl_hw_initialize(tp);
6894
6547 rtl_hw_reset(tp); 6895 rtl_hw_reset(tp);
6548 6896
6549 rtl_ack_events(tp, 0xffff); 6897 rtl_ack_events(tp, 0xffff);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 79bf09b4197..af0b867a6cf 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -49,6 +49,34 @@
49 NETIF_MSG_RX_ERR| \ 49 NETIF_MSG_RX_ERR| \
50 NETIF_MSG_TX_ERR) 50 NETIF_MSG_TX_ERR)
51 51
52#if defined(CONFIG_CPU_SUBTYPE_SH7734) || \
53 defined(CONFIG_CPU_SUBTYPE_SH7763) || \
54 defined(CONFIG_ARCH_R8A7740)
55static void sh_eth_select_mii(struct net_device *ndev)
56{
57 u32 value = 0x0;
58 struct sh_eth_private *mdp = netdev_priv(ndev);
59
60 switch (mdp->phy_interface) {
61 case PHY_INTERFACE_MODE_GMII:
62 value = 0x2;
63 break;
64 case PHY_INTERFACE_MODE_MII:
65 value = 0x1;
66 break;
67 case PHY_INTERFACE_MODE_RMII:
68 value = 0x0;
69 break;
70 default:
71 pr_warn("PHY interface mode was not setup. Set to MII.\n");
72 value = 0x1;
73 break;
74 }
75
76 sh_eth_write(ndev, value, RMII_MII);
77}
78#endif
79
52/* There is CPU dependent code */ 80/* There is CPU dependent code */
53#if defined(CONFIG_CPU_SUBTYPE_SH7724) 81#if defined(CONFIG_CPU_SUBTYPE_SH7724)
54#define SH_ETH_RESET_DEFAULT 1 82#define SH_ETH_RESET_DEFAULT 1
@@ -102,6 +130,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
102#elif defined(CONFIG_CPU_SUBTYPE_SH7757) 130#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
103#define SH_ETH_HAS_BOTH_MODULES 1 131#define SH_ETH_HAS_BOTH_MODULES 1
104#define SH_ETH_HAS_TSU 1 132#define SH_ETH_HAS_TSU 1
133static int sh_eth_check_reset(struct net_device *ndev);
134
105static void sh_eth_set_duplex(struct net_device *ndev) 135static void sh_eth_set_duplex(struct net_device *ndev)
106{ 136{
107 struct sh_eth_private *mdp = netdev_priv(ndev); 137 struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -176,23 +206,19 @@ static void sh_eth_chip_reset_giga(struct net_device *ndev)
176} 206}
177 207
178static int sh_eth_is_gether(struct sh_eth_private *mdp); 208static int sh_eth_is_gether(struct sh_eth_private *mdp);
179static void sh_eth_reset(struct net_device *ndev) 209static int sh_eth_reset(struct net_device *ndev)
180{ 210{
181 struct sh_eth_private *mdp = netdev_priv(ndev); 211 struct sh_eth_private *mdp = netdev_priv(ndev);
182 int cnt = 100; 212 int ret = 0;
183 213
184 if (sh_eth_is_gether(mdp)) { 214 if (sh_eth_is_gether(mdp)) {
185 sh_eth_write(ndev, 0x03, EDSR); 215 sh_eth_write(ndev, 0x03, EDSR);
186 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, 216 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
187 EDMR); 217 EDMR);
188 while (cnt > 0) { 218
189 if (!(sh_eth_read(ndev, EDMR) & 0x3)) 219 ret = sh_eth_check_reset(ndev);
190 break; 220 if (ret)
191 mdelay(1); 221 goto out;
192 cnt--;
193 }
194 if (cnt < 0)
195 printk(KERN_ERR "Device reset fail\n");
196 222
197 /* Table Init */ 223 /* Table Init */
198 sh_eth_write(ndev, 0x0, TDLAR); 224 sh_eth_write(ndev, 0x0, TDLAR);
@@ -210,6 +236,9 @@ static void sh_eth_reset(struct net_device *ndev)
210 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, 236 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
211 EDMR); 237 EDMR);
212 } 238 }
239
240out:
241 return ret;
213} 242}
214 243
215static void sh_eth_set_duplex_giga(struct net_device *ndev) 244static void sh_eth_set_duplex_giga(struct net_device *ndev)
@@ -282,7 +311,9 @@ static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
282 311
283#elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) 312#elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
284#define SH_ETH_HAS_TSU 1 313#define SH_ETH_HAS_TSU 1
314static int sh_eth_check_reset(struct net_device *ndev);
285static void sh_eth_reset_hw_crc(struct net_device *ndev); 315static void sh_eth_reset_hw_crc(struct net_device *ndev);
316
286static void sh_eth_chip_reset(struct net_device *ndev) 317static void sh_eth_chip_reset(struct net_device *ndev)
287{ 318{
288 struct sh_eth_private *mdp = netdev_priv(ndev); 319 struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -292,35 +323,6 @@ static void sh_eth_chip_reset(struct net_device *ndev)
292 mdelay(1); 323 mdelay(1);
293} 324}
294 325
295static void sh_eth_reset(struct net_device *ndev)
296{
297 int cnt = 100;
298
299 sh_eth_write(ndev, EDSR_ENALL, EDSR);
300 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
301 while (cnt > 0) {
302 if (!(sh_eth_read(ndev, EDMR) & 0x3))
303 break;
304 mdelay(1);
305 cnt--;
306 }
307 if (cnt == 0)
308 printk(KERN_ERR "Device reset fail\n");
309
310 /* Table Init */
311 sh_eth_write(ndev, 0x0, TDLAR);
312 sh_eth_write(ndev, 0x0, TDFAR);
313 sh_eth_write(ndev, 0x0, TDFXR);
314 sh_eth_write(ndev, 0x0, TDFFR);
315 sh_eth_write(ndev, 0x0, RDLAR);
316 sh_eth_write(ndev, 0x0, RDFAR);
317 sh_eth_write(ndev, 0x0, RDFXR);
318 sh_eth_write(ndev, 0x0, RDFFR);
319
320 /* Reset HW CRC register */
321 sh_eth_reset_hw_crc(ndev);
322}
323
324static void sh_eth_set_duplex(struct net_device *ndev) 326static void sh_eth_set_duplex(struct net_device *ndev)
325{ 327{
326 struct sh_eth_private *mdp = netdev_priv(ndev); 328 struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -377,9 +379,41 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
377 .tsu = 1, 379 .tsu = 1,
378#if defined(CONFIG_CPU_SUBTYPE_SH7734) 380#if defined(CONFIG_CPU_SUBTYPE_SH7734)
379 .hw_crc = 1, 381 .hw_crc = 1,
382 .select_mii = 1,
380#endif 383#endif
381}; 384};
382 385
386static int sh_eth_reset(struct net_device *ndev)
387{
388 int ret = 0;
389
390 sh_eth_write(ndev, EDSR_ENALL, EDSR);
391 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
392
393 ret = sh_eth_check_reset(ndev);
394 if (ret)
395 goto out;
396
397 /* Table Init */
398 sh_eth_write(ndev, 0x0, TDLAR);
399 sh_eth_write(ndev, 0x0, TDFAR);
400 sh_eth_write(ndev, 0x0, TDFXR);
401 sh_eth_write(ndev, 0x0, TDFFR);
402 sh_eth_write(ndev, 0x0, RDLAR);
403 sh_eth_write(ndev, 0x0, RDFAR);
404 sh_eth_write(ndev, 0x0, RDFXR);
405 sh_eth_write(ndev, 0x0, RDFFR);
406
407 /* Reset HW CRC register */
408 sh_eth_reset_hw_crc(ndev);
409
410 /* Select MII mode */
411 if (sh_eth_my_cpu_data.select_mii)
412 sh_eth_select_mii(ndev);
413out:
414 return ret;
415}
416
383static void sh_eth_reset_hw_crc(struct net_device *ndev) 417static void sh_eth_reset_hw_crc(struct net_device *ndev)
384{ 418{
385 if (sh_eth_my_cpu_data.hw_crc) 419 if (sh_eth_my_cpu_data.hw_crc)
@@ -388,44 +422,29 @@ static void sh_eth_reset_hw_crc(struct net_device *ndev)
388 422
389#elif defined(CONFIG_ARCH_R8A7740) 423#elif defined(CONFIG_ARCH_R8A7740)
390#define SH_ETH_HAS_TSU 1 424#define SH_ETH_HAS_TSU 1
425static int sh_eth_check_reset(struct net_device *ndev);
426
391static void sh_eth_chip_reset(struct net_device *ndev) 427static void sh_eth_chip_reset(struct net_device *ndev)
392{ 428{
393 struct sh_eth_private *mdp = netdev_priv(ndev); 429 struct sh_eth_private *mdp = netdev_priv(ndev);
394 unsigned long mii;
395 430
396 /* reset device */ 431 /* reset device */
397 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); 432 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
398 mdelay(1); 433 mdelay(1);
399 434
400 switch (mdp->phy_interface) { 435 sh_eth_select_mii(ndev);
401 case PHY_INTERFACE_MODE_GMII:
402 mii = 2;
403 break;
404 case PHY_INTERFACE_MODE_MII:
405 mii = 1;
406 break;
407 case PHY_INTERFACE_MODE_RMII:
408 default:
409 mii = 0;
410 break;
411 }
412 sh_eth_write(ndev, mii, RMII_MII);
413} 436}
414 437
415static void sh_eth_reset(struct net_device *ndev) 438static int sh_eth_reset(struct net_device *ndev)
416{ 439{
417 int cnt = 100; 440 int ret = 0;
418 441
419 sh_eth_write(ndev, EDSR_ENALL, EDSR); 442 sh_eth_write(ndev, EDSR_ENALL, EDSR);
420 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); 443 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
421 while (cnt > 0) { 444
422 if (!(sh_eth_read(ndev, EDMR) & 0x3)) 445 ret = sh_eth_check_reset(ndev);
423 break; 446 if (ret)
424 mdelay(1); 447 goto out;
425 cnt--;
426 }
427 if (cnt == 0)
428 printk(KERN_ERR "Device reset fail\n");
429 448
430 /* Table Init */ 449 /* Table Init */
431 sh_eth_write(ndev, 0x0, TDLAR); 450 sh_eth_write(ndev, 0x0, TDLAR);
@@ -436,6 +455,9 @@ static void sh_eth_reset(struct net_device *ndev)
436 sh_eth_write(ndev, 0x0, RDFAR); 455 sh_eth_write(ndev, 0x0, RDFAR);
437 sh_eth_write(ndev, 0x0, RDFXR); 456 sh_eth_write(ndev, 0x0, RDFXR);
438 sh_eth_write(ndev, 0x0, RDFFR); 457 sh_eth_write(ndev, 0x0, RDFFR);
458
459out:
460 return ret;
439} 461}
440 462
441static void sh_eth_set_duplex(struct net_device *ndev) 463static void sh_eth_set_duplex(struct net_device *ndev)
@@ -492,6 +514,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
492 .no_trimd = 1, 514 .no_trimd = 1,
493 .no_ade = 1, 515 .no_ade = 1,
494 .tsu = 1, 516 .tsu = 1,
517 .select_mii = 1,
495}; 518};
496 519
497#elif defined(CONFIG_CPU_SUBTYPE_SH7619) 520#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
@@ -543,11 +566,31 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
543 566
544#if defined(SH_ETH_RESET_DEFAULT) 567#if defined(SH_ETH_RESET_DEFAULT)
545/* Chip Reset */ 568/* Chip Reset */
546static void sh_eth_reset(struct net_device *ndev) 569static int sh_eth_reset(struct net_device *ndev)
547{ 570{
548 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR); 571 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
549 mdelay(3); 572 mdelay(3);
550 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR); 573 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
574
575 return 0;
576}
577#else
578static int sh_eth_check_reset(struct net_device *ndev)
579{
580 int ret = 0;
581 int cnt = 100;
582
583 while (cnt > 0) {
584 if (!(sh_eth_read(ndev, EDMR) & 0x3))
585 break;
586 mdelay(1);
587 cnt--;
588 }
589 if (cnt < 0) {
590 printk(KERN_ERR "Device reset fail\n");
591 ret = -ETIMEDOUT;
592 }
593 return ret;
551} 594}
552#endif 595#endif
553 596
@@ -739,21 +782,23 @@ static void sh_eth_ring_free(struct net_device *ndev)
739 782
740 /* Free Rx skb ringbuffer */ 783 /* Free Rx skb ringbuffer */
741 if (mdp->rx_skbuff) { 784 if (mdp->rx_skbuff) {
742 for (i = 0; i < RX_RING_SIZE; i++) { 785 for (i = 0; i < mdp->num_rx_ring; i++) {
743 if (mdp->rx_skbuff[i]) 786 if (mdp->rx_skbuff[i])
744 dev_kfree_skb(mdp->rx_skbuff[i]); 787 dev_kfree_skb(mdp->rx_skbuff[i]);
745 } 788 }
746 } 789 }
747 kfree(mdp->rx_skbuff); 790 kfree(mdp->rx_skbuff);
791 mdp->rx_skbuff = NULL;
748 792
749 /* Free Tx skb ringbuffer */ 793 /* Free Tx skb ringbuffer */
750 if (mdp->tx_skbuff) { 794 if (mdp->tx_skbuff) {
751 for (i = 0; i < TX_RING_SIZE; i++) { 795 for (i = 0; i < mdp->num_tx_ring; i++) {
752 if (mdp->tx_skbuff[i]) 796 if (mdp->tx_skbuff[i])
753 dev_kfree_skb(mdp->tx_skbuff[i]); 797 dev_kfree_skb(mdp->tx_skbuff[i]);
754 } 798 }
755 } 799 }
756 kfree(mdp->tx_skbuff); 800 kfree(mdp->tx_skbuff);
801 mdp->tx_skbuff = NULL;
757} 802}
758 803
759/* format skb and descriptor buffer */ 804/* format skb and descriptor buffer */
@@ -764,8 +809,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
764 struct sk_buff *skb; 809 struct sk_buff *skb;
765 struct sh_eth_rxdesc *rxdesc = NULL; 810 struct sh_eth_rxdesc *rxdesc = NULL;
766 struct sh_eth_txdesc *txdesc = NULL; 811 struct sh_eth_txdesc *txdesc = NULL;
767 int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE; 812 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
768 int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE; 813 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
769 814
770 mdp->cur_rx = mdp->cur_tx = 0; 815 mdp->cur_rx = mdp->cur_tx = 0;
771 mdp->dirty_rx = mdp->dirty_tx = 0; 816 mdp->dirty_rx = mdp->dirty_tx = 0;
@@ -773,7 +818,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
773 memset(mdp->rx_ring, 0, rx_ringsize); 818 memset(mdp->rx_ring, 0, rx_ringsize);
774 819
775 /* build Rx ring buffer */ 820 /* build Rx ring buffer */
776 for (i = 0; i < RX_RING_SIZE; i++) { 821 for (i = 0; i < mdp->num_rx_ring; i++) {
777 /* skb */ 822 /* skb */
778 mdp->rx_skbuff[i] = NULL; 823 mdp->rx_skbuff[i] = NULL;
779 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 824 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
@@ -799,7 +844,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
799 } 844 }
800 } 845 }
801 846
802 mdp->dirty_rx = (u32) (i - RX_RING_SIZE); 847 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
803 848
804 /* Mark the last entry as wrapping the ring. */ 849 /* Mark the last entry as wrapping the ring. */
805 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL); 850 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
@@ -807,7 +852,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
807 memset(mdp->tx_ring, 0, tx_ringsize); 852 memset(mdp->tx_ring, 0, tx_ringsize);
808 853
809 /* build Tx ring buffer */ 854 /* build Tx ring buffer */
810 for (i = 0; i < TX_RING_SIZE; i++) { 855 for (i = 0; i < mdp->num_tx_ring; i++) {
811 mdp->tx_skbuff[i] = NULL; 856 mdp->tx_skbuff[i] = NULL;
812 txdesc = &mdp->tx_ring[i]; 857 txdesc = &mdp->tx_ring[i];
813 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 858 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
@@ -841,7 +886,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
841 mdp->rx_buf_sz += NET_IP_ALIGN; 886 mdp->rx_buf_sz += NET_IP_ALIGN;
842 887
843 /* Allocate RX and TX skb rings */ 888 /* Allocate RX and TX skb rings */
844 mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE, 889 mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * mdp->num_rx_ring,
845 GFP_KERNEL); 890 GFP_KERNEL);
846 if (!mdp->rx_skbuff) { 891 if (!mdp->rx_skbuff) {
847 dev_err(&ndev->dev, "Cannot allocate Rx skb\n"); 892 dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
@@ -849,7 +894,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
849 return ret; 894 return ret;
850 } 895 }
851 896
852 mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE, 897 mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * mdp->num_tx_ring,
853 GFP_KERNEL); 898 GFP_KERNEL);
854 if (!mdp->tx_skbuff) { 899 if (!mdp->tx_skbuff) {
855 dev_err(&ndev->dev, "Cannot allocate Tx skb\n"); 900 dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
@@ -858,7 +903,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
858 } 903 }
859 904
860 /* Allocate all Rx descriptors. */ 905 /* Allocate all Rx descriptors. */
861 rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE; 906 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
862 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, 907 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
863 GFP_KERNEL); 908 GFP_KERNEL);
864 909
@@ -872,7 +917,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
872 mdp->dirty_rx = 0; 917 mdp->dirty_rx = 0;
873 918
874 /* Allocate all Tx descriptors. */ 919 /* Allocate all Tx descriptors. */
875 tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE; 920 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
876 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, 921 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
877 GFP_KERNEL); 922 GFP_KERNEL);
878 if (!mdp->tx_ring) { 923 if (!mdp->tx_ring) {
@@ -890,19 +935,41 @@ desc_ring_free:
890skb_ring_free: 935skb_ring_free:
891 /* Free Rx and Tx skb ring buffer */ 936 /* Free Rx and Tx skb ring buffer */
892 sh_eth_ring_free(ndev); 937 sh_eth_ring_free(ndev);
938 mdp->tx_ring = NULL;
939 mdp->rx_ring = NULL;
893 940
894 return ret; 941 return ret;
895} 942}
896 943
897static int sh_eth_dev_init(struct net_device *ndev) 944static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
945{
946 int ringsize;
947
948 if (mdp->rx_ring) {
949 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
950 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
951 mdp->rx_desc_dma);
952 mdp->rx_ring = NULL;
953 }
954
955 if (mdp->tx_ring) {
956 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
957 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
958 mdp->tx_desc_dma);
959 mdp->tx_ring = NULL;
960 }
961}
962
963static int sh_eth_dev_init(struct net_device *ndev, bool start)
898{ 964{
899 int ret = 0; 965 int ret = 0;
900 struct sh_eth_private *mdp = netdev_priv(ndev); 966 struct sh_eth_private *mdp = netdev_priv(ndev);
901 u_int32_t rx_int_var, tx_int_var;
902 u32 val; 967 u32 val;
903 968
904 /* Soft Reset */ 969 /* Soft Reset */
905 sh_eth_reset(ndev); 970 ret = sh_eth_reset(ndev);
971 if (ret)
972 goto out;
906 973
907 /* Descriptor format */ 974 /* Descriptor format */
908 sh_eth_ring_format(ndev); 975 sh_eth_ring_format(ndev);
@@ -926,9 +993,7 @@ static int sh_eth_dev_init(struct net_device *ndev)
926 /* Frame recv control */ 993 /* Frame recv control */
927 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR); 994 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
928 995
929 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5; 996 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
930 tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
931 sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER);
932 997
933 if (mdp->cd->bculr) 998 if (mdp->cd->bculr)
934 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ 999 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
@@ -943,7 +1008,8 @@ static int sh_eth_dev_init(struct net_device *ndev)
943 RFLR); 1008 RFLR);
944 1009
945 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); 1010 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
946 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 1011 if (start)
1012 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
947 1013
948 /* PAUSE Prohibition */ 1014 /* PAUSE Prohibition */
949 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | 1015 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
@@ -958,7 +1024,8 @@ static int sh_eth_dev_init(struct net_device *ndev)
958 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); 1024 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
959 1025
960 /* E-MAC Interrupt Enable register */ 1026 /* E-MAC Interrupt Enable register */
961 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); 1027 if (start)
1028 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
962 1029
963 /* Set MAC address */ 1030 /* Set MAC address */
964 update_mac_address(ndev); 1031 update_mac_address(ndev);
@@ -971,11 +1038,14 @@ static int sh_eth_dev_init(struct net_device *ndev)
971 if (mdp->cd->tpauser) 1038 if (mdp->cd->tpauser)
972 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); 1039 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
973 1040
974 /* Setting the Rx mode will start the Rx process. */ 1041 if (start) {
975 sh_eth_write(ndev, EDRRR_R, EDRRR); 1042 /* Setting the Rx mode will start the Rx process. */
1043 sh_eth_write(ndev, EDRRR_R, EDRRR);
976 1044
977 netif_start_queue(ndev); 1045 netif_start_queue(ndev);
1046 }
978 1047
1048out:
979 return ret; 1049 return ret;
980} 1050}
981 1051
@@ -988,7 +1058,7 @@ static int sh_eth_txfree(struct net_device *ndev)
988 int entry = 0; 1058 int entry = 0;
989 1059
990 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { 1060 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
991 entry = mdp->dirty_tx % TX_RING_SIZE; 1061 entry = mdp->dirty_tx % mdp->num_tx_ring;
992 txdesc = &mdp->tx_ring[entry]; 1062 txdesc = &mdp->tx_ring[entry];
993 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) 1063 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
994 break; 1064 break;
@@ -1001,7 +1071,7 @@ static int sh_eth_txfree(struct net_device *ndev)
1001 freeNum++; 1071 freeNum++;
1002 } 1072 }
1003 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 1073 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1004 if (entry >= TX_RING_SIZE - 1) 1074 if (entry >= mdp->num_tx_ring - 1)
1005 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 1075 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1006 1076
1007 ndev->stats.tx_packets++; 1077 ndev->stats.tx_packets++;
@@ -1016,8 +1086,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1016 struct sh_eth_private *mdp = netdev_priv(ndev); 1086 struct sh_eth_private *mdp = netdev_priv(ndev);
1017 struct sh_eth_rxdesc *rxdesc; 1087 struct sh_eth_rxdesc *rxdesc;
1018 1088
1019 int entry = mdp->cur_rx % RX_RING_SIZE; 1089 int entry = mdp->cur_rx % mdp->num_rx_ring;
1020 int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx; 1090 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1021 struct sk_buff *skb; 1091 struct sk_buff *skb;
1022 u16 pkt_len = 0; 1092 u16 pkt_len = 0;
1023 u32 desc_status; 1093 u32 desc_status;
@@ -1068,13 +1138,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1068 ndev->stats.rx_bytes += pkt_len; 1138 ndev->stats.rx_bytes += pkt_len;
1069 } 1139 }
1070 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT); 1140 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
1071 entry = (++mdp->cur_rx) % RX_RING_SIZE; 1141 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1072 rxdesc = &mdp->rx_ring[entry]; 1142 rxdesc = &mdp->rx_ring[entry];
1073 } 1143 }
1074 1144
1075 /* Refill the Rx ring buffers. */ 1145 /* Refill the Rx ring buffers. */
1076 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { 1146 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1077 entry = mdp->dirty_rx % RX_RING_SIZE; 1147 entry = mdp->dirty_rx % mdp->num_rx_ring;
1078 rxdesc = &mdp->rx_ring[entry]; 1148 rxdesc = &mdp->rx_ring[entry];
1079 /* The size of the buffer is 16 byte boundary. */ 1149 /* The size of the buffer is 16 byte boundary. */
1080 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1150 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
@@ -1091,7 +1161,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1091 skb_checksum_none_assert(skb); 1161 skb_checksum_none_assert(skb);
1092 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 1162 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1093 } 1163 }
1094 if (entry >= RX_RING_SIZE - 1) 1164 if (entry >= mdp->num_rx_ring - 1)
1095 rxdesc->status |= 1165 rxdesc->status |=
1096 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); 1166 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
1097 else 1167 else
@@ -1293,14 +1363,6 @@ other_irq:
1293 return ret; 1363 return ret;
1294} 1364}
1295 1365
1296static void sh_eth_timer(unsigned long data)
1297{
1298 struct net_device *ndev = (struct net_device *)data;
1299 struct sh_eth_private *mdp = netdev_priv(ndev);
1300
1301 mod_timer(&mdp->timer, jiffies + (10 * HZ));
1302}
1303
1304/* PHY state control function */ 1366/* PHY state control function */
1305static void sh_eth_adjust_link(struct net_device *ndev) 1367static void sh_eth_adjust_link(struct net_device *ndev)
1306{ 1368{
@@ -1499,6 +1561,71 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1499 } 1561 }
1500} 1562}
1501 1563
1564static void sh_eth_get_ringparam(struct net_device *ndev,
1565 struct ethtool_ringparam *ring)
1566{
1567 struct sh_eth_private *mdp = netdev_priv(ndev);
1568
1569 ring->rx_max_pending = RX_RING_MAX;
1570 ring->tx_max_pending = TX_RING_MAX;
1571 ring->rx_pending = mdp->num_rx_ring;
1572 ring->tx_pending = mdp->num_tx_ring;
1573}
1574
1575static int sh_eth_set_ringparam(struct net_device *ndev,
1576 struct ethtool_ringparam *ring)
1577{
1578 struct sh_eth_private *mdp = netdev_priv(ndev);
1579 int ret;
1580
1581 if (ring->tx_pending > TX_RING_MAX ||
1582 ring->rx_pending > RX_RING_MAX ||
1583 ring->tx_pending < TX_RING_MIN ||
1584 ring->rx_pending < RX_RING_MIN)
1585 return -EINVAL;
1586 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1587 return -EINVAL;
1588
1589 if (netif_running(ndev)) {
1590 netif_tx_disable(ndev);
1591 /* Disable interrupts by clearing the interrupt mask. */
1592 sh_eth_write(ndev, 0x0000, EESIPR);
1593 /* Stop the chip's Tx and Rx processes. */
1594 sh_eth_write(ndev, 0, EDTRR);
1595 sh_eth_write(ndev, 0, EDRRR);
1596 synchronize_irq(ndev->irq);
1597 }
1598
1599 /* Free all the skbuffs in the Rx queue. */
1600 sh_eth_ring_free(ndev);
1601 /* Free DMA buffer */
1602 sh_eth_free_dma_buffer(mdp);
1603
1604 /* Set new parameters */
1605 mdp->num_rx_ring = ring->rx_pending;
1606 mdp->num_tx_ring = ring->tx_pending;
1607
1608 ret = sh_eth_ring_init(ndev);
1609 if (ret < 0) {
1610 dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__);
1611 return ret;
1612 }
1613 ret = sh_eth_dev_init(ndev, false);
1614 if (ret < 0) {
1615 dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__);
1616 return ret;
1617 }
1618
1619 if (netif_running(ndev)) {
1620 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1621 /* Setting the Rx mode will start the Rx process. */
1622 sh_eth_write(ndev, EDRRR_R, EDRRR);
1623 netif_wake_queue(ndev);
1624 }
1625
1626 return 0;
1627}
1628
1502static const struct ethtool_ops sh_eth_ethtool_ops = { 1629static const struct ethtool_ops sh_eth_ethtool_ops = {
1503 .get_settings = sh_eth_get_settings, 1630 .get_settings = sh_eth_get_settings,
1504 .set_settings = sh_eth_set_settings, 1631 .set_settings = sh_eth_set_settings,
@@ -1509,6 +1636,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
1509 .get_strings = sh_eth_get_strings, 1636 .get_strings = sh_eth_get_strings,
1510 .get_ethtool_stats = sh_eth_get_ethtool_stats, 1637 .get_ethtool_stats = sh_eth_get_ethtool_stats,
1511 .get_sset_count = sh_eth_get_sset_count, 1638 .get_sset_count = sh_eth_get_sset_count,
1639 .get_ringparam = sh_eth_get_ringparam,
1640 .set_ringparam = sh_eth_set_ringparam,
1512}; 1641};
1513 1642
1514/* network device open function */ 1643/* network device open function */
@@ -1539,7 +1668,7 @@ static int sh_eth_open(struct net_device *ndev)
1539 goto out_free_irq; 1668 goto out_free_irq;
1540 1669
1541 /* device init */ 1670 /* device init */
1542 ret = sh_eth_dev_init(ndev); 1671 ret = sh_eth_dev_init(ndev, true);
1543 if (ret) 1672 if (ret)
1544 goto out_free_irq; 1673 goto out_free_irq;
1545 1674
@@ -1548,11 +1677,6 @@ static int sh_eth_open(struct net_device *ndev)
1548 if (ret) 1677 if (ret)
1549 goto out_free_irq; 1678 goto out_free_irq;
1550 1679
1551 /* Set the timer to check for link beat. */
1552 init_timer(&mdp->timer);
1553 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
1554 setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
1555
1556 return ret; 1680 return ret;
1557 1681
1558out_free_irq: 1682out_free_irq:
@@ -1577,11 +1701,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
1577 /* tx_errors count up */ 1701 /* tx_errors count up */
1578 ndev->stats.tx_errors++; 1702 ndev->stats.tx_errors++;
1579 1703
1580 /* timer off */
1581 del_timer_sync(&mdp->timer);
1582
1583 /* Free all the skbuffs in the Rx queue. */ 1704 /* Free all the skbuffs in the Rx queue. */
1584 for (i = 0; i < RX_RING_SIZE; i++) { 1705 for (i = 0; i < mdp->num_rx_ring; i++) {
1585 rxdesc = &mdp->rx_ring[i]; 1706 rxdesc = &mdp->rx_ring[i];
1586 rxdesc->status = 0; 1707 rxdesc->status = 0;
1587 rxdesc->addr = 0xBADF00D0; 1708 rxdesc->addr = 0xBADF00D0;
@@ -1589,18 +1710,14 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
1589 dev_kfree_skb(mdp->rx_skbuff[i]); 1710 dev_kfree_skb(mdp->rx_skbuff[i]);
1590 mdp->rx_skbuff[i] = NULL; 1711 mdp->rx_skbuff[i] = NULL;
1591 } 1712 }
1592 for (i = 0; i < TX_RING_SIZE; i++) { 1713 for (i = 0; i < mdp->num_tx_ring; i++) {
1593 if (mdp->tx_skbuff[i]) 1714 if (mdp->tx_skbuff[i])
1594 dev_kfree_skb(mdp->tx_skbuff[i]); 1715 dev_kfree_skb(mdp->tx_skbuff[i]);
1595 mdp->tx_skbuff[i] = NULL; 1716 mdp->tx_skbuff[i] = NULL;
1596 } 1717 }
1597 1718
1598 /* device init */ 1719 /* device init */
1599 sh_eth_dev_init(ndev); 1720 sh_eth_dev_init(ndev, true);
1600
1601 /* timer on */
1602 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
1603 add_timer(&mdp->timer);
1604} 1721}
1605 1722
1606/* Packet transmit function */ 1723/* Packet transmit function */
@@ -1612,7 +1729,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1612 unsigned long flags; 1729 unsigned long flags;
1613 1730
1614 spin_lock_irqsave(&mdp->lock, flags); 1731 spin_lock_irqsave(&mdp->lock, flags);
1615 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) { 1732 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
1616 if (!sh_eth_txfree(ndev)) { 1733 if (!sh_eth_txfree(ndev)) {
1617 if (netif_msg_tx_queued(mdp)) 1734 if (netif_msg_tx_queued(mdp))
1618 dev_warn(&ndev->dev, "TxFD exhausted.\n"); 1735 dev_warn(&ndev->dev, "TxFD exhausted.\n");
@@ -1623,7 +1740,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1623 } 1740 }
1624 spin_unlock_irqrestore(&mdp->lock, flags); 1741 spin_unlock_irqrestore(&mdp->lock, flags);
1625 1742
1626 entry = mdp->cur_tx % TX_RING_SIZE; 1743 entry = mdp->cur_tx % mdp->num_tx_ring;
1627 mdp->tx_skbuff[entry] = skb; 1744 mdp->tx_skbuff[entry] = skb;
1628 txdesc = &mdp->tx_ring[entry]; 1745 txdesc = &mdp->tx_ring[entry];
1629 /* soft swap. */ 1746 /* soft swap. */
@@ -1637,7 +1754,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1637 else 1754 else
1638 txdesc->buffer_length = skb->len; 1755 txdesc->buffer_length = skb->len;
1639 1756
1640 if (entry >= TX_RING_SIZE - 1) 1757 if (entry >= mdp->num_tx_ring - 1)
1641 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); 1758 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
1642 else 1759 else
1643 txdesc->status |= cpu_to_edmac(mdp, TD_TACT); 1760 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
@@ -1654,7 +1771,6 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1654static int sh_eth_close(struct net_device *ndev) 1771static int sh_eth_close(struct net_device *ndev)
1655{ 1772{
1656 struct sh_eth_private *mdp = netdev_priv(ndev); 1773 struct sh_eth_private *mdp = netdev_priv(ndev);
1657 int ringsize;
1658 1774
1659 netif_stop_queue(ndev); 1775 netif_stop_queue(ndev);
1660 1776
@@ -1673,18 +1789,11 @@ static int sh_eth_close(struct net_device *ndev)
1673 1789
1674 free_irq(ndev->irq, ndev); 1790 free_irq(ndev->irq, ndev);
1675 1791
1676 del_timer_sync(&mdp->timer);
1677
1678 /* Free all the skbuffs in the Rx queue. */ 1792 /* Free all the skbuffs in the Rx queue. */
1679 sh_eth_ring_free(ndev); 1793 sh_eth_ring_free(ndev);
1680 1794
1681 /* free DMA buffer */ 1795 /* free DMA buffer */
1682 ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE; 1796 sh_eth_free_dma_buffer(mdp);
1683 dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1684
1685 /* free DMA buffer */
1686 ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
1687 dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
1688 1797
1689 pm_runtime_put_sync(&mdp->pdev->dev); 1798 pm_runtime_put_sync(&mdp->pdev->dev);
1690 1799
@@ -2275,6 +2384,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2275 ether_setup(ndev); 2384 ether_setup(ndev);
2276 2385
2277 mdp = netdev_priv(ndev); 2386 mdp = netdev_priv(ndev);
2387 mdp->num_tx_ring = TX_RING_SIZE;
2388 mdp->num_rx_ring = RX_RING_SIZE;
2278 mdp->addr = ioremap(res->start, resource_size(res)); 2389 mdp->addr = ioremap(res->start, resource_size(res));
2279 if (mdp->addr == NULL) { 2390 if (mdp->addr == NULL) {
2280 ret = -ENOMEM; 2391 ret = -ENOMEM;
@@ -2312,8 +2423,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2312 2423
2313 /* debug message level */ 2424 /* debug message level */
2314 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; 2425 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
2315 mdp->post_rx = POST_RX >> (devno << 1);
2316 mdp->post_fw = POST_FW >> (devno << 1);
2317 2426
2318 /* read and set MAC address */ 2427 /* read and set MAC address */
2319 read_mac_address(ndev, pd->mac_addr); 2428 read_mac_address(ndev, pd->mac_addr);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 57b8e1fc5d1..bae84fd2e73 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -27,6 +27,10 @@
27#define TX_TIMEOUT (5*HZ) 27#define TX_TIMEOUT (5*HZ)
28#define TX_RING_SIZE 64 /* Tx ring size */ 28#define TX_RING_SIZE 64 /* Tx ring size */
29#define RX_RING_SIZE 64 /* Rx ring size */ 29#define RX_RING_SIZE 64 /* Rx ring size */
30#define TX_RING_MIN 64
31#define RX_RING_MIN 64
32#define TX_RING_MAX 1024
33#define RX_RING_MAX 1024
30#define ETHERSMALL 60 34#define ETHERSMALL 60
31#define PKT_BUF_SZ 1538 35#define PKT_BUF_SZ 1538
32#define SH_ETH_TSU_TIMEOUT_MS 500 36#define SH_ETH_TSU_TIMEOUT_MS 500
@@ -585,71 +589,6 @@ enum RPADIR_BIT {
585/* FDR */ 589/* FDR */
586#define DEFAULT_FDR_INIT 0x00000707 590#define DEFAULT_FDR_INIT 0x00000707
587 591
588enum phy_offsets {
589 PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3,
590 PHY_ANA = 4, PHY_ANL = 5, PHY_ANE = 6,
591 PHY_16 = 16,
592};
593
594/* PHY_CTRL */
595enum PHY_CTRL_BIT {
596 PHY_C_RESET = 0x8000, PHY_C_LOOPBK = 0x4000, PHY_C_SPEEDSL = 0x2000,
597 PHY_C_ANEGEN = 0x1000, PHY_C_PWRDN = 0x0800, PHY_C_ISO = 0x0400,
598 PHY_C_RANEG = 0x0200, PHY_C_DUPLEX = 0x0100, PHY_C_COLT = 0x0080,
599};
600#define DM9161_PHY_C_ANEGEN 0 /* auto nego special */
601
602/* PHY_STAT */
603enum PHY_STAT_BIT {
604 PHY_S_100T4 = 0x8000, PHY_S_100X_F = 0x4000, PHY_S_100X_H = 0x2000,
605 PHY_S_10T_F = 0x1000, PHY_S_10T_H = 0x0800, PHY_S_ANEGC = 0x0020,
606 PHY_S_RFAULT = 0x0010, PHY_S_ANEGA = 0x0008, PHY_S_LINK = 0x0004,
607 PHY_S_JAB = 0x0002, PHY_S_EXTD = 0x0001,
608};
609
610/* PHY_ANA */
611enum PHY_ANA_BIT {
612 PHY_A_NP = 0x8000, PHY_A_ACK = 0x4000, PHY_A_RF = 0x2000,
613 PHY_A_FCS = 0x0400, PHY_A_T4 = 0x0200, PHY_A_FDX = 0x0100,
614 PHY_A_HDX = 0x0080, PHY_A_10FDX = 0x0040, PHY_A_10HDX = 0x0020,
615 PHY_A_SEL = 0x001e,
616};
617/* PHY_ANL */
618enum PHY_ANL_BIT {
619 PHY_L_NP = 0x8000, PHY_L_ACK = 0x4000, PHY_L_RF = 0x2000,
620 PHY_L_FCS = 0x0400, PHY_L_T4 = 0x0200, PHY_L_FDX = 0x0100,
621 PHY_L_HDX = 0x0080, PHY_L_10FDX = 0x0040, PHY_L_10HDX = 0x0020,
622 PHY_L_SEL = 0x001f,
623};
624
625/* PHY_ANE */
626enum PHY_ANE_BIT {
627 PHY_E_PDF = 0x0010, PHY_E_LPNPA = 0x0008, PHY_E_NPA = 0x0004,
628 PHY_E_PRX = 0x0002, PHY_E_LPANEGA = 0x0001,
629};
630
631/* DM9161 */
632enum PHY_16_BIT {
633 PHY_16_BP4B45 = 0x8000, PHY_16_BPSCR = 0x4000, PHY_16_BPALIGN = 0x2000,
634 PHY_16_BP_ADPOK = 0x1000, PHY_16_Repeatmode = 0x0800,
635 PHY_16_TXselect = 0x0400,
636 PHY_16_Rsvd = 0x0200, PHY_16_RMIIEnable = 0x0100,
637 PHY_16_Force100LNK = 0x0080,
638 PHY_16_APDLED_CTL = 0x0040, PHY_16_COLLED_CTL = 0x0020,
639 PHY_16_RPDCTR_EN = 0x0010,
640 PHY_16_ResetStMch = 0x0008, PHY_16_PreamSupr = 0x0004,
641 PHY_16_Sleepmode = 0x0002,
642 PHY_16_RemoteLoopOut = 0x0001,
643};
644
645#define POST_RX 0x08
646#define POST_FW 0x04
647#define POST0_RX (POST_RX)
648#define POST0_FW (POST_FW)
649#define POST1_RX (POST_RX >> 2)
650#define POST1_FW (POST_FW >> 2)
651#define POST_ALL (POST0_RX | POST0_FW | POST1_RX | POST1_FW)
652
653/* ARSTR */ 592/* ARSTR */
654enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, }; 593enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, };
655 594
@@ -757,6 +696,7 @@ struct sh_eth_cpu_data {
757 unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */ 696 unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
758 unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */ 697 unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */
759 unsigned hw_crc:1; /* E-DMAC have CSMR */ 698 unsigned hw_crc:1; /* E-DMAC have CSMR */
699 unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */
760}; 700};
761 701
762struct sh_eth_private { 702struct sh_eth_private {
@@ -765,13 +705,14 @@ struct sh_eth_private {
765 const u16 *reg_offset; 705 const u16 *reg_offset;
766 void __iomem *addr; 706 void __iomem *addr;
767 void __iomem *tsu_addr; 707 void __iomem *tsu_addr;
708 u32 num_rx_ring;
709 u32 num_tx_ring;
768 dma_addr_t rx_desc_dma; 710 dma_addr_t rx_desc_dma;
769 dma_addr_t tx_desc_dma; 711 dma_addr_t tx_desc_dma;
770 struct sh_eth_rxdesc *rx_ring; 712 struct sh_eth_rxdesc *rx_ring;
771 struct sh_eth_txdesc *tx_ring; 713 struct sh_eth_txdesc *tx_ring;
772 struct sk_buff **rx_skbuff; 714 struct sk_buff **rx_skbuff;
773 struct sk_buff **tx_skbuff; 715 struct sk_buff **tx_skbuff;
774 struct timer_list timer;
775 spinlock_t lock; 716 spinlock_t lock;
776 u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */ 717 u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
777 u32 cur_tx, dirty_tx; 718 u32 cur_tx, dirty_tx;
@@ -786,10 +727,6 @@ struct sh_eth_private {
786 int msg_enable; 727 int msg_enable;
787 int speed; 728 int speed;
788 int duplex; 729 int duplex;
789 u32 rx_int_var, tx_int_var; /* interrupt control variables */
790 char post_rx; /* POST receive */
791 char post_fw; /* POST forward */
792 struct net_device_stats tsu_stats; /* TSU forward status */
793 int port; /* for TSU */ 730 int port; /* for TSU */
794 int vlan_num_ids; /* for VLAN tag filter */ 731 int vlan_num_ids; /* for VLAN tag filter */
795 732
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 0310b9f08c9..db4beed9766 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -48,8 +48,7 @@
48 48
49/* Unused commands: 0x23, 0x27, 0x30, 0x31 */ 49/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
50 50
51/** 51/* MCDI version 1
52 * MCDI version 1
53 * 52 *
54 * Each MCDI request starts with an MCDI_HEADER, which is a 32byte 53 * Each MCDI request starts with an MCDI_HEADER, which is a 32byte
55 * structure, filled in by the client. 54 * structure, filled in by the client.
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 0e575359af1..a1965c07d1e 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -527,7 +527,7 @@ struct efx_phy_operations {
527}; 527};
528 528
529/** 529/**
530 * @enum efx_phy_mode - PHY operating mode flags 530 * enum efx_phy_mode - PHY operating mode flags
531 * @PHY_MODE_NORMAL: on and should pass traffic 531 * @PHY_MODE_NORMAL: on and should pass traffic
532 * @PHY_MODE_TX_DISABLED: on with TX disabled 532 * @PHY_MODE_TX_DISABLED: on with TX disabled
533 * @PHY_MODE_LOW_POWER: set to low power through MDIO 533 * @PHY_MODE_LOW_POWER: set to low power through MDIO
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 243e91f3dff..fca61fea38e 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -336,6 +336,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
336/** 336/**
337 * efx_fast_push_rx_descriptors - push new RX descriptors quickly 337 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
338 * @rx_queue: RX descriptor queue 338 * @rx_queue: RX descriptor queue
339 *
339 * This will aim to fill the RX descriptor queue up to 340 * This will aim to fill the RX descriptor queue up to
340 * @rx_queue->@max_fill. If there is insufficient atomic 341 * @rx_queue->@max_fill. If there is insufficient atomic
341 * memory to do so, a slow fill will be scheduled. 342 * memory to do so, a slow fill will be scheduled.
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index ac149d99f78..b5ba3084c7f 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -583,7 +583,7 @@ static inline void ioc3_rx(struct net_device *dev)
583 unsigned long *rxr; 583 unsigned long *rxr;
584 u32 w0, err; 584 u32 w0, err;
585 585
586 rxr = (unsigned long *) ip->rxr; /* Ring base */ 586 rxr = ip->rxr; /* Ring base */
587 rx_entry = ip->rx_ci; /* RX consume index */ 587 rx_entry = ip->rx_ci; /* RX consume index */
588 n_entry = ip->rx_pi; 588 n_entry = ip->rx_pi;
589 589
@@ -903,7 +903,7 @@ static void ioc3_alloc_rings(struct net_device *dev)
903 if (ip->rxr == NULL) { 903 if (ip->rxr == NULL) {
904 /* Allocate and initialize rx ring. 4kb = 512 entries */ 904 /* Allocate and initialize rx ring. 4kb = 512 entries */
905 ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC); 905 ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
906 rxr = (unsigned long *) ip->rxr; 906 rxr = ip->rxr;
907 if (!rxr) 907 if (!rxr)
908 printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n"); 908 printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n");
909 909
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 8814b2f5d46..8d15f7a74b4 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -773,7 +773,7 @@ static int smc911x_phy_fixed(struct net_device *dev)
773 return 1; 773 return 1;
774} 774}
775 775
776/* 776/**
777 * smc911x_phy_reset - reset the phy 777 * smc911x_phy_reset - reset the phy
778 * @dev: net device 778 * @dev: net device
779 * @phy: phy address 779 * @phy: phy address
@@ -819,7 +819,7 @@ static int smc911x_phy_reset(struct net_device *dev, int phy)
819 return reg & PMT_CTRL_PHY_RST_; 819 return reg & PMT_CTRL_PHY_RST_;
820} 820}
821 821
822/* 822/**
823 * smc911x_phy_powerdown - powerdown phy 823 * smc911x_phy_powerdown - powerdown phy
824 * @dev: net device 824 * @dev: net device
825 * @phy: phy address 825 * @phy: phy address
@@ -837,7 +837,7 @@ static void smc911x_phy_powerdown(struct net_device *dev, int phy)
837 SMC_SET_PHY_BMCR(lp, phy, bmcr); 837 SMC_SET_PHY_BMCR(lp, phy, bmcr);
838} 838}
839 839
840/* 840/**
841 * smc911x_phy_check_media - check the media status and adjust BMCR 841 * smc911x_phy_check_media - check the media status and adjust BMCR
842 * @dev: net device 842 * @dev: net device
843 * @init: set true for initialisation 843 * @init: set true for initialisation
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index fee44935501..318adc935a5 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -942,7 +942,7 @@ static int smc_phy_fixed(struct net_device *dev)
942 return 1; 942 return 1;
943} 943}
944 944
945/* 945/**
946 * smc_phy_reset - reset the phy 946 * smc_phy_reset - reset the phy
947 * @dev: net device 947 * @dev: net device
948 * @phy: phy address 948 * @phy: phy address
@@ -976,7 +976,7 @@ static int smc_phy_reset(struct net_device *dev, int phy)
976 return bmcr & BMCR_RESET; 976 return bmcr & BMCR_RESET;
977} 977}
978 978
979/* 979/**
980 * smc_phy_powerdown - powerdown phy 980 * smc_phy_powerdown - powerdown phy
981 * @dev: net device 981 * @dev: net device
982 * 982 *
@@ -1000,7 +1000,7 @@ static void smc_phy_powerdown(struct net_device *dev)
1000 smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN); 1000 smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN);
1001} 1001}
1002 1002
1003/* 1003/**
1004 * smc_phy_check_media - check the media status and adjust TCR 1004 * smc_phy_check_media - check the media status and adjust TCR
1005 * @dev: net device 1005 * @dev: net device
1006 * @init: set true for initialisation 1006 * @init: set true for initialisation
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 1466e5d2af4..54ca99dbb40 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1442,6 +1442,14 @@ smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, u8 dev_addr[6])
1442 smsc911x_mac_write(pdata, ADDRL, mac_low32); 1442 smsc911x_mac_write(pdata, ADDRL, mac_low32);
1443} 1443}
1444 1444
1445static void smsc911x_disable_irq_chip(struct net_device *dev)
1446{
1447 struct smsc911x_data *pdata = netdev_priv(dev);
1448
1449 smsc911x_reg_write(pdata, INT_EN, 0);
1450 smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
1451}
1452
1445static int smsc911x_open(struct net_device *dev) 1453static int smsc911x_open(struct net_device *dev)
1446{ 1454{
1447 struct smsc911x_data *pdata = netdev_priv(dev); 1455 struct smsc911x_data *pdata = netdev_priv(dev);
@@ -1494,8 +1502,7 @@ static int smsc911x_open(struct net_device *dev)
1494 spin_unlock_irq(&pdata->mac_lock); 1502 spin_unlock_irq(&pdata->mac_lock);
1495 1503
1496 /* Initialise irqs, but leave all sources disabled */ 1504 /* Initialise irqs, but leave all sources disabled */
1497 smsc911x_reg_write(pdata, INT_EN, 0); 1505 smsc911x_disable_irq_chip(dev);
1498 smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
1499 1506
1500 /* Set interrupt deassertion to 100uS */ 1507 /* Set interrupt deassertion to 100uS */
1501 intcfg = ((10 << 24) | INT_CFG_IRQ_EN_); 1508 intcfg = ((10 << 24) | INT_CFG_IRQ_EN_);
@@ -2215,9 +2222,6 @@ static int __devinit smsc911x_init(struct net_device *dev)
2215 if (smsc911x_soft_reset(pdata)) 2222 if (smsc911x_soft_reset(pdata))
2216 return -ENODEV; 2223 return -ENODEV;
2217 2224
2218 /* Disable all interrupt sources until we bring the device up */
2219 smsc911x_reg_write(pdata, INT_EN, 0);
2220
2221 ether_setup(dev); 2225 ether_setup(dev);
2222 dev->flags |= IFF_MULTICAST; 2226 dev->flags |= IFF_MULTICAST;
2223 netif_napi_add(dev, &pdata->napi, smsc911x_poll, SMSC_NAPI_WEIGHT); 2227 netif_napi_add(dev, &pdata->napi, smsc911x_poll, SMSC_NAPI_WEIGHT);
@@ -2434,8 +2438,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2434 smsc911x_reg_write(pdata, INT_CFG, intcfg); 2438 smsc911x_reg_write(pdata, INT_CFG, intcfg);
2435 2439
2436 /* Ensure interrupts are globally disabled before connecting ISR */ 2440 /* Ensure interrupts are globally disabled before connecting ISR */
2437 smsc911x_reg_write(pdata, INT_EN, 0); 2441 smsc911x_disable_irq_chip(dev);
2438 smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
2439 2442
2440 retval = request_irq(dev->irq, smsc911x_irqhandler, 2443 retval = request_irq(dev->irq, smsc911x_irqhandler,
2441 irq_flags | IRQF_SHARED, dev->name, dev); 2444 irq_flags | IRQF_SHARED, dev->name, dev);
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index fd33b21f6c9..1fcd914ec39 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1640,8 +1640,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1640 goto out_free_io_4; 1640 goto out_free_io_4;
1641 1641
1642 /* descriptors are aligned due to the nature of pci_alloc_consistent */ 1642 /* descriptors are aligned due to the nature of pci_alloc_consistent */
1643 pd->tx_ring = (struct smsc9420_dma_desc *) 1643 pd->tx_ring = (pd->rx_ring + RX_RING_SIZE);
1644 (pd->rx_ring + RX_RING_SIZE);
1645 pd->tx_dma_addr = pd->rx_dma_addr + 1644 pd->tx_dma_addr = pd->rx_dma_addr +
1646 sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE; 1645 sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE;
1647 1646
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index bcd54d6e94f..e2d083228f3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -95,6 +95,16 @@ struct stmmac_extra_stats {
95 unsigned long poll_n; 95 unsigned long poll_n;
96 unsigned long sched_timer_n; 96 unsigned long sched_timer_n;
97 unsigned long normal_irq_n; 97 unsigned long normal_irq_n;
98 unsigned long mmc_tx_irq_n;
99 unsigned long mmc_rx_irq_n;
100 unsigned long mmc_rx_csum_offload_irq_n;
101 /* EEE */
102 unsigned long irq_receive_pmt_irq_n;
103 unsigned long irq_tx_path_in_lpi_mode_n;
104 unsigned long irq_tx_path_exit_lpi_mode_n;
105 unsigned long irq_rx_path_in_lpi_mode_n;
106 unsigned long irq_rx_path_exit_lpi_mode_n;
107 unsigned long phy_eee_wakeup_error_n;
98}; 108};
99 109
100/* CSR Frequency Access Defines*/ 110/* CSR Frequency Access Defines*/
@@ -162,6 +172,17 @@ enum tx_dma_irq_status {
162 handle_tx_rx = 3, 172 handle_tx_rx = 3,
163}; 173};
164 174
175enum core_specific_irq_mask {
176 core_mmc_tx_irq = 1,
177 core_mmc_rx_irq = 2,
178 core_mmc_rx_csum_offload_irq = 4,
179 core_irq_receive_pmt_irq = 8,
180 core_irq_tx_path_in_lpi_mode = 16,
181 core_irq_tx_path_exit_lpi_mode = 32,
182 core_irq_rx_path_in_lpi_mode = 64,
183 core_irq_rx_path_exit_lpi_mode = 128,
184};
185
165/* DMA HW capabilities */ 186/* DMA HW capabilities */
166struct dma_features { 187struct dma_features {
167 unsigned int mbps_10_100; 188 unsigned int mbps_10_100;
@@ -208,6 +229,10 @@ struct dma_features {
208#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */ 229#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
209#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */ 230#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
210 231
232/* Default LPI timers */
233#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8
234#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0
235
211struct stmmac_desc_ops { 236struct stmmac_desc_ops {
212 /* DMA RX descriptor ring initialization */ 237 /* DMA RX descriptor ring initialization */
213 void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size, 238 void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
@@ -278,7 +303,7 @@ struct stmmac_ops {
278 /* Dump MAC registers */ 303 /* Dump MAC registers */
279 void (*dump_regs) (void __iomem *ioaddr); 304 void (*dump_regs) (void __iomem *ioaddr);
280 /* Handle extra events on specific interrupts hw dependent */ 305 /* Handle extra events on specific interrupts hw dependent */
281 void (*host_irq_status) (void __iomem *ioaddr); 306 int (*host_irq_status) (void __iomem *ioaddr);
282 /* Multicast filter setting */ 307 /* Multicast filter setting */
283 void (*set_filter) (struct net_device *dev, int id); 308 void (*set_filter) (struct net_device *dev, int id);
284 /* Flow control setting */ 309 /* Flow control setting */
@@ -291,6 +316,10 @@ struct stmmac_ops {
291 unsigned int reg_n); 316 unsigned int reg_n);
292 void (*get_umac_addr) (void __iomem *ioaddr, unsigned char *addr, 317 void (*get_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
293 unsigned int reg_n); 318 unsigned int reg_n);
319 void (*set_eee_mode) (void __iomem *ioaddr);
320 void (*reset_eee_mode) (void __iomem *ioaddr);
321 void (*set_eee_timer) (void __iomem *ioaddr, int ls, int tw);
322 void (*set_eee_pls) (void __iomem *ioaddr, int link);
294}; 323};
295 324
296struct mac_link { 325struct mac_link {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 23478bf4ed7..f90fcb5f957 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -36,6 +36,7 @@
36 36
37#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */ 37#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
38enum dwmac1000_irq_status { 38enum dwmac1000_irq_status {
39 lpiis_irq = 0x400,
39 time_stamp_irq = 0x0200, 40 time_stamp_irq = 0x0200,
40 mmc_rx_csum_offload_irq = 0x0080, 41 mmc_rx_csum_offload_irq = 0x0080,
41 mmc_tx_irq = 0x0040, 42 mmc_tx_irq = 0x0040,
@@ -60,6 +61,25 @@ enum power_event {
60 power_down = 0x00000001, 61 power_down = 0x00000001,
61}; 62};
62 63
64/* Energy Efficient Ethernet (EEE)
65 *
66 * LPI status, timer and control register offset
67 */
68#define LPI_CTRL_STATUS 0x0030
69#define LPI_TIMER_CTRL 0x0034
70
71/* LPI control and status defines */
72#define LPI_CTRL_STATUS_LPITXA 0x00080000 /* Enable LPI TX Automate */
73#define LPI_CTRL_STATUS_PLSEN 0x00040000 /* Enable PHY Link Status */
74#define LPI_CTRL_STATUS_PLS 0x00020000 /* PHY Link Status */
75#define LPI_CTRL_STATUS_LPIEN 0x00010000 /* LPI Enable */
76#define LPI_CTRL_STATUS_RLPIST 0x00000200 /* Receive LPI state */
77#define LPI_CTRL_STATUS_TLPIST 0x00000100 /* Transmit LPI state */
78#define LPI_CTRL_STATUS_RLPIEX 0x00000008 /* Receive LPI Exit */
79#define LPI_CTRL_STATUS_RLPIEN 0x00000004 /* Receive LPI Entry */
80#define LPI_CTRL_STATUS_TLPIEX 0x00000002 /* Transmit LPI Exit */
81#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
82
63/* GMAC HW ADDR regs */ 83/* GMAC HW ADDR regs */
64#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \ 84#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
65 (reg * 8)) 85 (reg * 8))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index b5e4d02f15c..bfe02260549 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -194,26 +194,107 @@ static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
194} 194}
195 195
196 196
197static void dwmac1000_irq_status(void __iomem *ioaddr) 197static int dwmac1000_irq_status(void __iomem *ioaddr)
198{ 198{
199 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); 199 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
200 int status = 0;
200 201
201 /* Not used events (e.g. MMC interrupts) are not handled. */ 202 /* Not used events (e.g. MMC interrupts) are not handled. */
202 if ((intr_status & mmc_tx_irq)) 203 if ((intr_status & mmc_tx_irq)) {
203 CHIP_DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n", 204 CHIP_DBG(KERN_INFO "GMAC: MMC tx interrupt: 0x%08x\n",
204 readl(ioaddr + GMAC_MMC_TX_INTR)); 205 readl(ioaddr + GMAC_MMC_TX_INTR));
205 if (unlikely(intr_status & mmc_rx_irq)) 206 status |= core_mmc_tx_irq;
206 CHIP_DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n", 207 }
208 if (unlikely(intr_status & mmc_rx_irq)) {
209 CHIP_DBG(KERN_INFO "GMAC: MMC rx interrupt: 0x%08x\n",
207 readl(ioaddr + GMAC_MMC_RX_INTR)); 210 readl(ioaddr + GMAC_MMC_RX_INTR));
208 if (unlikely(intr_status & mmc_rx_csum_offload_irq)) 211 status |= core_mmc_rx_irq;
209 CHIP_DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n", 212 }
213 if (unlikely(intr_status & mmc_rx_csum_offload_irq)) {
214 CHIP_DBG(KERN_INFO "GMAC: MMC rx csum offload: 0x%08x\n",
210 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD)); 215 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
216 status |= core_mmc_rx_csum_offload_irq;
217 }
211 if (unlikely(intr_status & pmt_irq)) { 218 if (unlikely(intr_status & pmt_irq)) {
212 CHIP_DBG(KERN_DEBUG "GMAC: received Magic frame\n"); 219 CHIP_DBG(KERN_INFO "GMAC: received Magic frame\n");
213 /* clear the PMT bits 5 and 6 by reading the PMT 220 /* clear the PMT bits 5 and 6 by reading the PMT
214 * status register. */ 221 * status register. */
215 readl(ioaddr + GMAC_PMT); 222 readl(ioaddr + GMAC_PMT);
223 status |= core_irq_receive_pmt_irq;
216 } 224 }
225 /* MAC trx/rx EEE LPI entry/exit interrupts */
226 if (intr_status & lpiis_irq) {
227 /* Clean LPI interrupt by reading the Reg 12 */
228 u32 lpi_status = readl(ioaddr + LPI_CTRL_STATUS);
229
230 if (lpi_status & LPI_CTRL_STATUS_TLPIEN) {
231 CHIP_DBG(KERN_INFO "GMAC TX entered in LPI\n");
232 status |= core_irq_tx_path_in_lpi_mode;
233 }
234 if (lpi_status & LPI_CTRL_STATUS_TLPIEX) {
235 CHIP_DBG(KERN_INFO "GMAC TX exit from LPI\n");
236 status |= core_irq_tx_path_exit_lpi_mode;
237 }
238 if (lpi_status & LPI_CTRL_STATUS_RLPIEN) {
239 CHIP_DBG(KERN_INFO "GMAC RX entered in LPI\n");
240 status |= core_irq_rx_path_in_lpi_mode;
241 }
242 if (lpi_status & LPI_CTRL_STATUS_RLPIEX) {
243 CHIP_DBG(KERN_INFO "GMAC RX exit from LPI\n");
244 status |= core_irq_rx_path_exit_lpi_mode;
245 }
246 }
247
248 return status;
249}
250
251static void dwmac1000_set_eee_mode(void __iomem *ioaddr)
252{
253 u32 value;
254
255 /* Enable the link status receive on RGMII, SGMII ore SMII
256 * receive path and instruct the transmit to enter in LPI
257 * state. */
258 value = readl(ioaddr + LPI_CTRL_STATUS);
259 value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
260 writel(value, ioaddr + LPI_CTRL_STATUS);
261}
262
263static void dwmac1000_reset_eee_mode(void __iomem *ioaddr)
264{
265 u32 value;
266
267 value = readl(ioaddr + LPI_CTRL_STATUS);
268 value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA);
269 writel(value, ioaddr + LPI_CTRL_STATUS);
270}
271
272static void dwmac1000_set_eee_pls(void __iomem *ioaddr, int link)
273{
274 u32 value;
275
276 value = readl(ioaddr + LPI_CTRL_STATUS);
277
278 if (link)
279 value |= LPI_CTRL_STATUS_PLS;
280 else
281 value &= ~LPI_CTRL_STATUS_PLS;
282
283 writel(value, ioaddr + LPI_CTRL_STATUS);
284}
285
286static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
287{
288 int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16);
289
290 /* Program the timers in the LPI timer control register:
291 * LS: minimum time (ms) for which the link
292 * status from PHY should be ok before transmitting
293 * the LPI pattern.
294 * TW: minimum time (us) for which the core waits
295 * after it has stopped transmitting the LPI pattern.
296 */
297 writel(value, ioaddr + LPI_TIMER_CTRL);
217} 298}
218 299
219static const struct stmmac_ops dwmac1000_ops = { 300static const struct stmmac_ops dwmac1000_ops = {
@@ -226,6 +307,10 @@ static const struct stmmac_ops dwmac1000_ops = {
226 .pmt = dwmac1000_pmt, 307 .pmt = dwmac1000_pmt,
227 .set_umac_addr = dwmac1000_set_umac_addr, 308 .set_umac_addr = dwmac1000_set_umac_addr,
228 .get_umac_addr = dwmac1000_get_umac_addr, 309 .get_umac_addr = dwmac1000_get_umac_addr,
310 .set_eee_mode = dwmac1000_set_eee_mode,
311 .reset_eee_mode = dwmac1000_reset_eee_mode,
312 .set_eee_timer = dwmac1000_set_eee_timer,
313 .set_eee_pls = dwmac1000_set_eee_pls,
229}; 314};
230 315
231struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr) 316struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 19e0f4eed2b..f83210e7c22 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -72,9 +72,9 @@ static int dwmac100_rx_ipc_enable(void __iomem *ioaddr)
72 return 0; 72 return 0;
73} 73}
74 74
75static void dwmac100_irq_status(void __iomem *ioaddr) 75static int dwmac100_irq_status(void __iomem *ioaddr)
76{ 76{
77 return; 77 return 0;
78} 78}
79 79
80static void dwmac100_set_umac_addr(void __iomem *ioaddr, unsigned char *addr, 80static void dwmac100_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 6e0360f9cfd..e678ce39d01 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -70,6 +70,7 @@
70#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL) 70#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
71 71
72/* DMA Status register defines */ 72/* DMA Status register defines */
73#define DMA_STATUS_GLPII 0x40000000 /* GMAC LPI interrupt */
73#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */ 74#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
74#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */ 75#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
75#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */ 76#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index dc20c56efc9..ab4c376cb27 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -87,6 +87,12 @@ struct stmmac_priv {
87#endif 87#endif
88 int clk_csr; 88 int clk_csr;
89 int synopsys_id; 89 int synopsys_id;
90 struct timer_list eee_ctrl_timer;
91 bool tx_path_in_lpi_mode;
92 int lpi_irq;
93 int eee_enabled;
94 int eee_active;
95 int tx_lpi_timer;
90}; 96};
91 97
92extern int phyaddr; 98extern int phyaddr;
@@ -104,6 +110,8 @@ int stmmac_dvr_remove(struct net_device *ndev);
104struct stmmac_priv *stmmac_dvr_probe(struct device *device, 110struct stmmac_priv *stmmac_dvr_probe(struct device *device,
105 struct plat_stmmacenet_data *plat_dat, 111 struct plat_stmmacenet_data *plat_dat,
106 void __iomem *addr); 112 void __iomem *addr);
113void stmmac_disable_eee_mode(struct stmmac_priv *priv);
114bool stmmac_eee_init(struct stmmac_priv *priv);
107 115
108#ifdef CONFIG_HAVE_CLK 116#ifdef CONFIG_HAVE_CLK
109static inline int stmmac_clk_enable(struct stmmac_priv *priv) 117static inline int stmmac_clk_enable(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index ce431846fc6..76fd61aa005 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -93,6 +93,16 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
93 STMMAC_STAT(poll_n), 93 STMMAC_STAT(poll_n),
94 STMMAC_STAT(sched_timer_n), 94 STMMAC_STAT(sched_timer_n),
95 STMMAC_STAT(normal_irq_n), 95 STMMAC_STAT(normal_irq_n),
96 STMMAC_STAT(normal_irq_n),
97 STMMAC_STAT(mmc_tx_irq_n),
98 STMMAC_STAT(mmc_rx_irq_n),
99 STMMAC_STAT(mmc_rx_csum_offload_irq_n),
100 STMMAC_STAT(irq_receive_pmt_irq_n),
101 STMMAC_STAT(irq_tx_path_in_lpi_mode_n),
102 STMMAC_STAT(irq_tx_path_exit_lpi_mode_n),
103 STMMAC_STAT(irq_rx_path_in_lpi_mode_n),
104 STMMAC_STAT(irq_rx_path_exit_lpi_mode_n),
105 STMMAC_STAT(phy_eee_wakeup_error_n),
96}; 106};
97#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) 107#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
98 108
@@ -366,6 +376,11 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
366 (*(u32 *)p); 376 (*(u32 *)p);
367 } 377 }
368 } 378 }
379 if (priv->eee_enabled) {
380 int val = phy_get_eee_err(priv->phydev);
381 if (val)
382 priv->xstats.phy_eee_wakeup_error_n = val;
383 }
369 } 384 }
370 for (i = 0; i < STMMAC_STATS_LEN; i++) { 385 for (i = 0; i < STMMAC_STATS_LEN; i++) {
371 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; 386 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
@@ -464,6 +479,46 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
464 return 0; 479 return 0;
465} 480}
466 481
482static int stmmac_ethtool_op_get_eee(struct net_device *dev,
483 struct ethtool_eee *edata)
484{
485 struct stmmac_priv *priv = netdev_priv(dev);
486
487 if (!priv->dma_cap.eee)
488 return -EOPNOTSUPP;
489
490 edata->eee_enabled = priv->eee_enabled;
491 edata->eee_active = priv->eee_active;
492 edata->tx_lpi_timer = priv->tx_lpi_timer;
493
494 return phy_ethtool_get_eee(priv->phydev, edata);
495}
496
497static int stmmac_ethtool_op_set_eee(struct net_device *dev,
498 struct ethtool_eee *edata)
499{
500 struct stmmac_priv *priv = netdev_priv(dev);
501
502 priv->eee_enabled = edata->eee_enabled;
503
504 if (!priv->eee_enabled)
505 stmmac_disable_eee_mode(priv);
506 else {
507 /* We are asking for enabling the EEE but it is safe
508 * to verify all by invoking the eee_init function.
509 * In case of failure it will return an error.
510 */
511 priv->eee_enabled = stmmac_eee_init(priv);
512 if (!priv->eee_enabled)
513 return -EOPNOTSUPP;
514
515 /* Do not change tx_lpi_timer in case of failure */
516 priv->tx_lpi_timer = edata->tx_lpi_timer;
517 }
518
519 return phy_ethtool_set_eee(priv->phydev, edata);
520}
521
467static const struct ethtool_ops stmmac_ethtool_ops = { 522static const struct ethtool_ops stmmac_ethtool_ops = {
468 .begin = stmmac_check_if_running, 523 .begin = stmmac_check_if_running,
469 .get_drvinfo = stmmac_ethtool_getdrvinfo, 524 .get_drvinfo = stmmac_ethtool_getdrvinfo,
@@ -480,6 +535,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
480 .get_strings = stmmac_get_strings, 535 .get_strings = stmmac_get_strings,
481 .get_wol = stmmac_get_wol, 536 .get_wol = stmmac_get_wol,
482 .set_wol = stmmac_set_wol, 537 .set_wol = stmmac_set_wol,
538 .get_eee = stmmac_ethtool_op_get_eee,
539 .set_eee = stmmac_ethtool_op_set_eee,
483 .get_sset_count = stmmac_get_sset_count, 540 .get_sset_count = stmmac_get_sset_count,
484 .get_ts_info = ethtool_op_get_ts_info, 541 .get_ts_info = ethtool_op_get_ts_info,
485}; 542};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ea3003edde1..f6b04c1a367 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -133,6 +133,12 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
133 NETIF_MSG_LINK | NETIF_MSG_IFUP | 133 NETIF_MSG_LINK | NETIF_MSG_IFUP |
134 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 134 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
135 135
136#define STMMAC_DEFAULT_LPI_TIMER 1000
137static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
138module_param(eee_timer, int, S_IRUGO | S_IWUSR);
139MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
140#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
141
136static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 142static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
137 143
138#ifdef CONFIG_STMMAC_DEBUG_FS 144#ifdef CONFIG_STMMAC_DEBUG_FS
@@ -161,6 +167,8 @@ static void stmmac_verify_args(void)
161 flow_ctrl = FLOW_OFF; 167 flow_ctrl = FLOW_OFF;
162 if (unlikely((pause < 0) || (pause > 0xffff))) 168 if (unlikely((pause < 0) || (pause > 0xffff)))
163 pause = PAUSE_TIME; 169 pause = PAUSE_TIME;
170 if (eee_timer < 0)
171 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
164} 172}
165 173
166static void stmmac_clk_csr_set(struct stmmac_priv *priv) 174static void stmmac_clk_csr_set(struct stmmac_priv *priv)
@@ -229,6 +237,85 @@ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
229 phydev->speed); 237 phydev->speed);
230} 238}
231 239
240static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
241{
242 /* Check and enter in LPI mode */
243 if ((priv->dirty_tx == priv->cur_tx) &&
244 (priv->tx_path_in_lpi_mode == false))
245 priv->hw->mac->set_eee_mode(priv->ioaddr);
246}
247
248void stmmac_disable_eee_mode(struct stmmac_priv *priv)
249{
250 /* Exit and disable EEE in case of we are are in LPI state. */
251 priv->hw->mac->reset_eee_mode(priv->ioaddr);
252 del_timer_sync(&priv->eee_ctrl_timer);
253 priv->tx_path_in_lpi_mode = false;
254}
255
256/**
257 * stmmac_eee_ctrl_timer
258 * @arg : data hook
259 * Description:
260 * If there is no data transfer and if we are not in LPI state,
261 * then MAC Transmitter can be moved to LPI state.
262 */
263static void stmmac_eee_ctrl_timer(unsigned long arg)
264{
265 struct stmmac_priv *priv = (struct stmmac_priv *)arg;
266
267 stmmac_enable_eee_mode(priv);
268 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
269}
270
271/**
272 * stmmac_eee_init
273 * @priv: private device pointer
274 * Description:
275 * If the EEE support has been enabled while configuring the driver,
276 * if the GMAC actually supports the EEE (from the HW cap reg) and the
277 * phy can also manage EEE, so enable the LPI state and start the timer
278 * to verify if the tx path can enter in LPI state.
279 */
280bool stmmac_eee_init(struct stmmac_priv *priv)
281{
282 bool ret = false;
283
284 /* MAC core supports the EEE feature. */
285 if (priv->dma_cap.eee) {
286 /* Check if the PHY supports EEE */
287 if (phy_init_eee(priv->phydev, 1))
288 goto out;
289
290 priv->eee_active = 1;
291 init_timer(&priv->eee_ctrl_timer);
292 priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
293 priv->eee_ctrl_timer.data = (unsigned long)priv;
294 priv->eee_ctrl_timer.expires = STMMAC_LPI_TIMER(eee_timer);
295 add_timer(&priv->eee_ctrl_timer);
296
297 priv->hw->mac->set_eee_timer(priv->ioaddr,
298 STMMAC_DEFAULT_LIT_LS_TIMER,
299 priv->tx_lpi_timer);
300
301 pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
302
303 ret = true;
304 }
305out:
306 return ret;
307}
308
309static void stmmac_eee_adjust(struct stmmac_priv *priv)
310{
311 /* When the EEE has been already initialised we have to
312 * modify the PLS bit in the LPI ctrl & status reg according
313 * to the PHY link status. For this reason.
314 */
315 if (priv->eee_enabled)
316 priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
317}
318
232/** 319/**
233 * stmmac_adjust_link 320 * stmmac_adjust_link
234 * @dev: net device structure 321 * @dev: net device structure
@@ -249,6 +336,7 @@ static void stmmac_adjust_link(struct net_device *dev)
249 phydev->addr, phydev->link); 336 phydev->addr, phydev->link);
250 337
251 spin_lock_irqsave(&priv->lock, flags); 338 spin_lock_irqsave(&priv->lock, flags);
339
252 if (phydev->link) { 340 if (phydev->link) {
253 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); 341 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
254 342
@@ -315,6 +403,8 @@ static void stmmac_adjust_link(struct net_device *dev)
315 if (new_state && netif_msg_link(priv)) 403 if (new_state && netif_msg_link(priv))
316 phy_print_status(phydev); 404 phy_print_status(phydev);
317 405
406 stmmac_eee_adjust(priv);
407
318 spin_unlock_irqrestore(&priv->lock, flags); 408 spin_unlock_irqrestore(&priv->lock, flags);
319 409
320 DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n"); 410 DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
@@ -332,7 +422,7 @@ static int stmmac_init_phy(struct net_device *dev)
332{ 422{
333 struct stmmac_priv *priv = netdev_priv(dev); 423 struct stmmac_priv *priv = netdev_priv(dev);
334 struct phy_device *phydev; 424 struct phy_device *phydev;
335 char phy_id[MII_BUS_ID_SIZE + 3]; 425 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
336 char bus_id[MII_BUS_ID_SIZE]; 426 char bus_id[MII_BUS_ID_SIZE];
337 int interface = priv->plat->interface; 427 int interface = priv->plat->interface;
338 priv->oldlink = 0; 428 priv->oldlink = 0;
@@ -346,11 +436,12 @@ static int stmmac_init_phy(struct net_device *dev)
346 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", 436 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
347 priv->plat->bus_id); 437 priv->plat->bus_id);
348 438
349 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, 439 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
350 priv->plat->phy_addr); 440 priv->plat->phy_addr);
351 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id); 441 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id_fmt);
352 442
353 phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0, interface); 443 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, 0,
444 interface);
354 445
355 if (IS_ERR(phydev)) { 446 if (IS_ERR(phydev)) {
356 pr_err("%s: Could not attach to PHY\n", dev->name); 447 pr_err("%s: Could not attach to PHY\n", dev->name);
@@ -677,7 +768,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
677 768
678 priv->hw->desc->release_tx_desc(p); 769 priv->hw->desc->release_tx_desc(p);
679 770
680 entry = (++priv->dirty_tx) % txsize; 771 priv->dirty_tx++;
681 } 772 }
682 if (unlikely(netif_queue_stopped(priv->dev) && 773 if (unlikely(netif_queue_stopped(priv->dev) &&
683 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) { 774 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
@@ -689,6 +780,11 @@ static void stmmac_tx(struct stmmac_priv *priv)
689 } 780 }
690 netif_tx_unlock(priv->dev); 781 netif_tx_unlock(priv->dev);
691 } 782 }
783
784 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
785 stmmac_enable_eee_mode(priv);
786 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
787 }
692 spin_unlock(&priv->tx_lock); 788 spin_unlock(&priv->tx_lock);
693} 789}
694 790
@@ -1027,6 +1123,17 @@ static int stmmac_open(struct net_device *dev)
1027 } 1123 }
1028 } 1124 }
1029 1125
1126 /* Request the IRQ lines */
1127 if (priv->lpi_irq != -ENXIO) {
1128 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
1129 dev->name, dev);
1130 if (unlikely(ret < 0)) {
1131 pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1132 __func__, priv->lpi_irq, ret);
1133 goto open_error_lpiirq;
1134 }
1135 }
1136
1030 /* Enable the MAC Rx/Tx */ 1137 /* Enable the MAC Rx/Tx */
1031 stmmac_set_mac(priv->ioaddr, true); 1138 stmmac_set_mac(priv->ioaddr, true);
1032 1139
@@ -1062,12 +1169,19 @@ static int stmmac_open(struct net_device *dev)
1062 if (priv->phydev) 1169 if (priv->phydev)
1063 phy_start(priv->phydev); 1170 phy_start(priv->phydev);
1064 1171
1172 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER;
1173 priv->eee_enabled = stmmac_eee_init(priv);
1174
1065 napi_enable(&priv->napi); 1175 napi_enable(&priv->napi);
1066 skb_queue_head_init(&priv->rx_recycle); 1176 skb_queue_head_init(&priv->rx_recycle);
1067 netif_start_queue(dev); 1177 netif_start_queue(dev);
1068 1178
1069 return 0; 1179 return 0;
1070 1180
1181open_error_lpiirq:
1182 if (priv->wol_irq != dev->irq)
1183 free_irq(priv->wol_irq, dev);
1184
1071open_error_wolirq: 1185open_error_wolirq:
1072 free_irq(dev->irq, dev); 1186 free_irq(dev->irq, dev);
1073 1187
@@ -1093,6 +1207,9 @@ static int stmmac_release(struct net_device *dev)
1093{ 1207{
1094 struct stmmac_priv *priv = netdev_priv(dev); 1208 struct stmmac_priv *priv = netdev_priv(dev);
1095 1209
1210 if (priv->eee_enabled)
1211 del_timer_sync(&priv->eee_ctrl_timer);
1212
1096 /* Stop and disconnect the PHY */ 1213 /* Stop and disconnect the PHY */
1097 if (priv->phydev) { 1214 if (priv->phydev) {
1098 phy_stop(priv->phydev); 1215 phy_stop(priv->phydev);
@@ -1115,6 +1232,8 @@ static int stmmac_release(struct net_device *dev)
1115 free_irq(dev->irq, dev); 1232 free_irq(dev->irq, dev);
1116 if (priv->wol_irq != dev->irq) 1233 if (priv->wol_irq != dev->irq)
1117 free_irq(priv->wol_irq, dev); 1234 free_irq(priv->wol_irq, dev);
1235 if (priv->lpi_irq != -ENXIO)
1236 free_irq(priv->lpi_irq, dev);
1118 1237
1119 /* Stop TX/RX DMA and clear the descriptors */ 1238 /* Stop TX/RX DMA and clear the descriptors */
1120 priv->hw->dma->stop_tx(priv->ioaddr); 1239 priv->hw->dma->stop_tx(priv->ioaddr);
@@ -1164,6 +1283,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1164 1283
1165 spin_lock(&priv->tx_lock); 1284 spin_lock(&priv->tx_lock);
1166 1285
1286 if (priv->tx_path_in_lpi_mode)
1287 stmmac_disable_eee_mode(priv);
1288
1167 entry = priv->cur_tx % txsize; 1289 entry = priv->cur_tx % txsize;
1168 1290
1169#ifdef STMMAC_XMIT_DEBUG 1291#ifdef STMMAC_XMIT_DEBUG
@@ -1311,7 +1433,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1311 display_ring(priv->dma_rx, rxsize); 1433 display_ring(priv->dma_rx, rxsize);
1312 } 1434 }
1313#endif 1435#endif
1314 count = 0;
1315 while (!priv->hw->desc->get_rx_owner(p)) { 1436 while (!priv->hw->desc->get_rx_owner(p)) {
1316 int status; 1437 int status;
1317 1438
@@ -1544,10 +1665,37 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
1544 return IRQ_NONE; 1665 return IRQ_NONE;
1545 } 1666 }
1546 1667
1547 if (priv->plat->has_gmac) 1668 /* To handle GMAC own interrupts */
1548 /* To handle GMAC own interrupts */ 1669 if (priv->plat->has_gmac) {
1549 priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr); 1670 int status = priv->hw->mac->host_irq_status((void __iomem *)
1671 dev->base_addr);
1672 if (unlikely(status)) {
1673 if (status & core_mmc_tx_irq)
1674 priv->xstats.mmc_tx_irq_n++;
1675 if (status & core_mmc_rx_irq)
1676 priv->xstats.mmc_rx_irq_n++;
1677 if (status & core_mmc_rx_csum_offload_irq)
1678 priv->xstats.mmc_rx_csum_offload_irq_n++;
1679 if (status & core_irq_receive_pmt_irq)
1680 priv->xstats.irq_receive_pmt_irq_n++;
1681
1682 /* For LPI we need to save the tx status */
1683 if (status & core_irq_tx_path_in_lpi_mode) {
1684 priv->xstats.irq_tx_path_in_lpi_mode_n++;
1685 priv->tx_path_in_lpi_mode = true;
1686 }
1687 if (status & core_irq_tx_path_exit_lpi_mode) {
1688 priv->xstats.irq_tx_path_exit_lpi_mode_n++;
1689 priv->tx_path_in_lpi_mode = false;
1690 }
1691 if (status & core_irq_rx_path_in_lpi_mode)
1692 priv->xstats.irq_rx_path_in_lpi_mode_n++;
1693 if (status & core_irq_rx_path_exit_lpi_mode)
1694 priv->xstats.irq_rx_path_exit_lpi_mode_n++;
1695 }
1696 }
1550 1697
1698 /* To handle DMA interrupts */
1551 stmmac_dma_interrupt(priv); 1699 stmmac_dma_interrupt(priv);
1552 1700
1553 return IRQ_HANDLED; 1701 return IRQ_HANDLED;
@@ -2133,42 +2281,38 @@ static int __init stmmac_cmdline_opt(char *str)
2133 return -EINVAL; 2281 return -EINVAL;
2134 while ((opt = strsep(&str, ",")) != NULL) { 2282 while ((opt = strsep(&str, ",")) != NULL) {
2135 if (!strncmp(opt, "debug:", 6)) { 2283 if (!strncmp(opt, "debug:", 6)) {
2136 if (strict_strtoul(opt + 6, 0, (unsigned long *)&debug)) 2284 if (kstrtoint(opt + 6, 0, &debug))
2137 goto err; 2285 goto err;
2138 } else if (!strncmp(opt, "phyaddr:", 8)) { 2286 } else if (!strncmp(opt, "phyaddr:", 8)) {
2139 if (strict_strtoul(opt + 8, 0, 2287 if (kstrtoint(opt + 8, 0, &phyaddr))
2140 (unsigned long *)&phyaddr))
2141 goto err; 2288 goto err;
2142 } else if (!strncmp(opt, "dma_txsize:", 11)) { 2289 } else if (!strncmp(opt, "dma_txsize:", 11)) {
2143 if (strict_strtoul(opt + 11, 0, 2290 if (kstrtoint(opt + 11, 0, &dma_txsize))
2144 (unsigned long *)&dma_txsize))
2145 goto err; 2291 goto err;
2146 } else if (!strncmp(opt, "dma_rxsize:", 11)) { 2292 } else if (!strncmp(opt, "dma_rxsize:", 11)) {
2147 if (strict_strtoul(opt + 11, 0, 2293 if (kstrtoint(opt + 11, 0, &dma_rxsize))
2148 (unsigned long *)&dma_rxsize))
2149 goto err; 2294 goto err;
2150 } else if (!strncmp(opt, "buf_sz:", 7)) { 2295 } else if (!strncmp(opt, "buf_sz:", 7)) {
2151 if (strict_strtoul(opt + 7, 0, 2296 if (kstrtoint(opt + 7, 0, &buf_sz))
2152 (unsigned long *)&buf_sz))
2153 goto err; 2297 goto err;
2154 } else if (!strncmp(opt, "tc:", 3)) { 2298 } else if (!strncmp(opt, "tc:", 3)) {
2155 if (strict_strtoul(opt + 3, 0, (unsigned long *)&tc)) 2299 if (kstrtoint(opt + 3, 0, &tc))
2156 goto err; 2300 goto err;
2157 } else if (!strncmp(opt, "watchdog:", 9)) { 2301 } else if (!strncmp(opt, "watchdog:", 9)) {
2158 if (strict_strtoul(opt + 9, 0, 2302 if (kstrtoint(opt + 9, 0, &watchdog))
2159 (unsigned long *)&watchdog))
2160 goto err; 2303 goto err;
2161 } else if (!strncmp(opt, "flow_ctrl:", 10)) { 2304 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
2162 if (strict_strtoul(opt + 10, 0, 2305 if (kstrtoint(opt + 10, 0, &flow_ctrl))
2163 (unsigned long *)&flow_ctrl))
2164 goto err; 2306 goto err;
2165 } else if (!strncmp(opt, "pause:", 6)) { 2307 } else if (!strncmp(opt, "pause:", 6)) {
2166 if (strict_strtoul(opt + 6, 0, (unsigned long *)&pause)) 2308 if (kstrtoint(opt + 6, 0, &pause))
2309 goto err;
2310 } else if (!strncmp(opt, "eee_timer:", 6)) {
2311 if (kstrtoint(opt + 10, 0, &eee_timer))
2167 goto err; 2312 goto err;
2168#ifdef CONFIG_STMMAC_TIMER 2313#ifdef CONFIG_STMMAC_TIMER
2169 } else if (!strncmp(opt, "tmrate:", 7)) { 2314 } else if (!strncmp(opt, "tmrate:", 7)) {
2170 if (strict_strtoul(opt + 7, 0, 2315 if (kstrtoint(opt + 7, 0, &tmrate))
2171 (unsigned long *)&tmrate))
2172 goto err; 2316 goto err;
2173#endif 2317#endif
2174 } 2318 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index cf826e6b6aa..13afb8edfad 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -125,7 +125,7 @@ err_out_req_reg_failed:
125} 125}
126 126
127/** 127/**
128 * stmmac_dvr_remove 128 * stmmac_pci_remove
129 * 129 *
130 * @pdev: platform device pointer 130 * @pdev: platform device pointer
131 * Description: this function calls the main to free the net resources 131 * Description: this function calls the main to free the net resources
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 680d2b8dfe2..7d36163d0d2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -156,6 +156,8 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
156 if (priv->wol_irq == -ENXIO) 156 if (priv->wol_irq == -ENXIO)
157 priv->wol_irq = priv->dev->irq; 157 priv->wol_irq = priv->dev->irq;
158 158
159 priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
160
159 platform_set_drvdata(pdev, priv->dev); 161 platform_set_drvdata(pdev, priv->dev);
160 162
161 pr_debug("STMMAC platform driver registration completed"); 163 pr_debug("STMMAC platform driver registration completed");
@@ -190,7 +192,7 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
190 192
191 platform_set_drvdata(pdev, NULL); 193 platform_set_drvdata(pdev, NULL);
192 194
193 iounmap((void *)priv->ioaddr); 195 iounmap((void __force __iomem *)priv->ioaddr);
194 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 196 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
195 release_mem_region(res->start, resource_size(res)); 197 release_mem_region(res->start, resource_size(res));
196 198
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 2a83fc57edb..967fe8cb476 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -233,7 +233,6 @@ static void bigmac_init_rings(struct bigmac *bp, int from_irq)
233 continue; 233 continue;
234 234
235 bp->rx_skbs[i] = skb; 235 bp->rx_skbs[i] = skb;
236 skb->dev = dev;
237 236
238 /* Because we reserve afterwards. */ 237 /* Because we reserve afterwards. */
239 skb_put(skb, ETH_FRAME_LEN); 238 skb_put(skb, ETH_FRAME_LEN);
@@ -838,7 +837,6 @@ static void bigmac_rx(struct bigmac *bp)
838 RX_BUF_ALLOC_SIZE - 34, 837 RX_BUF_ALLOC_SIZE - 34,
839 DMA_FROM_DEVICE); 838 DMA_FROM_DEVICE);
840 bp->rx_skbs[elem] = new_skb; 839 bp->rx_skbs[elem] = new_skb;
841 new_skb->dev = bp->dev;
842 skb_put(new_skb, ETH_FRAME_LEN); 840 skb_put(new_skb, ETH_FRAME_LEN);
843 skb_reserve(new_skb, 34); 841 skb_reserve(new_skb, 34);
844 this->rx_addr = 842 this->rx_addr =
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 3cf4ab75583..9ae12d0c963 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -752,7 +752,6 @@ static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size
752 if (likely(skb)) { 752 if (likely(skb)) {
753 unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data); 753 unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data);
754 skb_reserve(skb, offset); 754 skb_reserve(skb, offset);
755 skb->dev = dev;
756 } 755 }
757 return skb; 756 return skb;
758} 757}
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index dfc00c4683e..73f341b8bef 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1249,7 +1249,6 @@ static void happy_meal_clean_rings(struct happy_meal *hp)
1249static void happy_meal_init_rings(struct happy_meal *hp) 1249static void happy_meal_init_rings(struct happy_meal *hp)
1250{ 1250{
1251 struct hmeal_init_block *hb = hp->happy_block; 1251 struct hmeal_init_block *hb = hp->happy_block;
1252 struct net_device *dev = hp->dev;
1253 int i; 1252 int i;
1254 1253
1255 HMD(("happy_meal_init_rings: counters to zero, ")); 1254 HMD(("happy_meal_init_rings: counters to zero, "));
@@ -1270,7 +1269,6 @@ static void happy_meal_init_rings(struct happy_meal *hp)
1270 continue; 1269 continue;
1271 } 1270 }
1272 hp->rx_skbs[i] = skb; 1271 hp->rx_skbs[i] = skb;
1273 skb->dev = dev;
1274 1272
1275 /* Because we reserve afterwards. */ 1273 /* Because we reserve afterwards. */
1276 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); 1274 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
@@ -2031,7 +2029,6 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
2031 } 2029 }
2032 dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); 2030 dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
2033 hp->rx_skbs[elem] = new_skb; 2031 hp->rx_skbs[elem] = new_skb;
2034 new_skb->dev = dev;
2035 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); 2032 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
2036 hme_write_rxd(hp, this, 2033 hme_write_rxd(hp, this,
2037 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), 2034 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 7d4a040d84a..aeded7ff1c8 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -441,7 +441,7 @@ static void qe_rx(struct sunqe *qep)
441 } else { 441 } else {
442 skb_reserve(skb, 2); 442 skb_reserve(skb, 2);
443 skb_put(skb, len); 443 skb_put(skb, len);
444 skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf, 444 skb_copy_to_linear_data(skb, this_qbuf,
445 len); 445 len);
446 skb->protocol = eth_type_trans(skb, qep->dev); 446 skb->protocol = eth_type_trans(skb, qep->dev);
447 netif_rx(skb); 447 netif_rx(skb);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 447a6932cab..6ce9edd95c0 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -137,14 +137,15 @@ static void print_eth_id(struct net_device *ndev)
137#define bdx_disable_interrupts(priv) \ 137#define bdx_disable_interrupts(priv) \
138 do { WRITE_REG(priv, regIMR, 0); } while (0) 138 do { WRITE_REG(priv, regIMR, 0); } while (0)
139 139
140/* bdx_fifo_init 140/**
141 * create TX/RX descriptor fifo for host-NIC communication. 141 * bdx_fifo_init - create TX/RX descriptor fifo for host-NIC communication.
142 * @priv: NIC private structure
143 * @f: fifo to initialize
144 * @fsz_type: fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB
145 * @reg_XXX: offsets of registers relative to base address
146 *
142 * 1K extra space is allocated at the end of the fifo to simplify 147 * 1K extra space is allocated at the end of the fifo to simplify
143 * processing of descriptors that wraps around fifo's end 148 * processing of descriptors that wraps around fifo's end
144 * @priv - NIC private structure
145 * @f - fifo to initialize
146 * @fsz_type - fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB
147 * @reg_XXX - offsets of registers relative to base address
148 * 149 *
149 * Returns 0 on success, negative value on failure 150 * Returns 0 on success, negative value on failure
150 * 151 *
@@ -177,9 +178,10 @@ bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
177 RET(0); 178 RET(0);
178} 179}
179 180
180/* bdx_fifo_free - free all resources used by fifo 181/**
181 * @priv - NIC private structure 182 * bdx_fifo_free - free all resources used by fifo
182 * @f - fifo to release 183 * @priv: NIC private structure
184 * @f: fifo to release
183 */ 185 */
184static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f) 186static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
185{ 187{
@@ -192,9 +194,9 @@ static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
192 RET(); 194 RET();
193} 195}
194 196
195/* 197/**
196 * bdx_link_changed - notifies OS about hw link state. 198 * bdx_link_changed - notifies OS about hw link state.
197 * @bdx_priv - hw adapter structure 199 * @priv: hw adapter structure
198 */ 200 */
199static void bdx_link_changed(struct bdx_priv *priv) 201static void bdx_link_changed(struct bdx_priv *priv)
200{ 202{
@@ -233,10 +235,10 @@ static void bdx_isr_extra(struct bdx_priv *priv, u32 isr)
233 235
234} 236}
235 237
236/* bdx_isr - Interrupt Service Routine for Bordeaux NIC 238/**
237 * @irq - interrupt number 239 * bdx_isr_napi - Interrupt Service Routine for Bordeaux NIC
238 * @ndev - network device 240 * @irq: interrupt number
239 * @regs - CPU registers 241 * @dev: network device
240 * 242 *
241 * Return IRQ_NONE if it was not our interrupt, IRQ_HANDLED - otherwise 243 * Return IRQ_NONE if it was not our interrupt, IRQ_HANDLED - otherwise
242 * 244 *
@@ -307,8 +309,10 @@ static int bdx_poll(struct napi_struct *napi, int budget)
307 return work_done; 309 return work_done;
308} 310}
309 311
310/* bdx_fw_load - loads firmware to NIC 312/**
311 * @priv - NIC private structure 313 * bdx_fw_load - loads firmware to NIC
314 * @priv: NIC private structure
315 *
312 * Firmware is loaded via TXD fifo, so it must be initialized first. 316 * Firmware is loaded via TXD fifo, so it must be initialized first.
313 * Firware must be loaded once per NIC not per PCI device provided by NIC (NIC 317 * Firware must be loaded once per NIC not per PCI device provided by NIC (NIC
314 * can have few of them). So all drivers use semaphore register to choose one 318 * can have few of them). So all drivers use semaphore register to choose one
@@ -380,8 +384,9 @@ static void bdx_restore_mac(struct net_device *ndev, struct bdx_priv *priv)
380 RET(); 384 RET();
381} 385}
382 386
383/* bdx_hw_start - inits registers and starts HW's Rx and Tx engines 387/**
384 * @priv - NIC private structure 388 * bdx_hw_start - inits registers and starts HW's Rx and Tx engines
389 * @priv: NIC private structure
385 */ 390 */
386static int bdx_hw_start(struct bdx_priv *priv) 391static int bdx_hw_start(struct bdx_priv *priv)
387{ 392{
@@ -691,12 +696,13 @@ static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
691 RET(-EOPNOTSUPP); 696 RET(-EOPNOTSUPP);
692} 697}
693 698
694/* 699/**
695 * __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid 700 * __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid
696 * by passing VLAN filter table to hardware 701 * @ndev: network device
697 * @ndev network device 702 * @vid: VLAN vid
698 * @vid VLAN vid 703 * @op: add or kill operation
699 * @op add or kill operation 704 *
705 * Passes VLAN filter table to hardware
700 */ 706 */
701static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable) 707static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
702{ 708{
@@ -722,10 +728,10 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
722 RET(); 728 RET();
723} 729}
724 730
725/* 731/**
726 * bdx_vlan_rx_add_vid - kernel hook for adding VLAN vid to hw filtering table 732 * bdx_vlan_rx_add_vid - kernel hook for adding VLAN vid to hw filtering table
727 * @ndev network device 733 * @ndev: network device
728 * @vid VLAN vid to add 734 * @vid: VLAN vid to add
729 */ 735 */
730static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid) 736static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
731{ 737{
@@ -733,10 +739,10 @@ static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
733 return 0; 739 return 0;
734} 740}
735 741
736/* 742/**
737 * bdx_vlan_rx_kill_vid - kernel hook for killing VLAN vid in hw filtering table 743 * bdx_vlan_rx_kill_vid - kernel hook for killing VLAN vid in hw filtering table
738 * @ndev network device 744 * @ndev: network device
739 * @vid VLAN vid to kill 745 * @vid: VLAN vid to kill
740 */ 746 */
741static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid) 747static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
742{ 748{
@@ -974,8 +980,9 @@ static inline void bdx_rxdb_free_elem(struct rxdb *db, int n)
974 * Rx Init * 980 * Rx Init *
975 *************************************************************************/ 981 *************************************************************************/
976 982
977/* bdx_rx_init - initialize RX all related HW and SW resources 983/**
978 * @priv - NIC private structure 984 * bdx_rx_init - initialize RX all related HW and SW resources
985 * @priv: NIC private structure
979 * 986 *
980 * Returns 0 on success, negative value on failure 987 * Returns 0 on success, negative value on failure
981 * 988 *
@@ -1016,9 +1023,10 @@ err_mem:
1016 return -ENOMEM; 1023 return -ENOMEM;
1017} 1024}
1018 1025
1019/* bdx_rx_free_skbs - frees and unmaps all skbs allocated for the fifo 1026/**
1020 * @priv - NIC private structure 1027 * bdx_rx_free_skbs - frees and unmaps all skbs allocated for the fifo
1021 * @f - RXF fifo 1028 * @priv: NIC private structure
1029 * @f: RXF fifo
1022 */ 1030 */
1023static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f) 1031static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1024{ 1032{
@@ -1045,8 +1053,10 @@ static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1045 } 1053 }
1046} 1054}
1047 1055
1048/* bdx_rx_free - release all Rx resources 1056/**
1049 * @priv - NIC private structure 1057 * bdx_rx_free - release all Rx resources
1058 * @priv: NIC private structure
1059 *
1050 * It assumes that Rx is desabled in HW 1060 * It assumes that Rx is desabled in HW
1051 */ 1061 */
1052static void bdx_rx_free(struct bdx_priv *priv) 1062static void bdx_rx_free(struct bdx_priv *priv)
@@ -1067,9 +1077,11 @@ static void bdx_rx_free(struct bdx_priv *priv)
1067 * Rx Engine * 1077 * Rx Engine *
1068 *************************************************************************/ 1078 *************************************************************************/
1069 1079
1070/* bdx_rx_alloc_skbs - fill rxf fifo with new skbs 1080/**
1071 * @priv - nic's private structure 1081 * bdx_rx_alloc_skbs - fill rxf fifo with new skbs
1072 * @f - RXF fifo that needs skbs 1082 * @priv: nic's private structure
1083 * @f: RXF fifo that needs skbs
1084 *
1073 * It allocates skbs, build rxf descs and push it (rxf descr) into rxf fifo. 1085 * It allocates skbs, build rxf descs and push it (rxf descr) into rxf fifo.
1074 * skb's virtual and physical addresses are stored in skb db. 1086 * skb's virtual and physical addresses are stored in skb db.
1075 * To calculate free space, func uses cached values of RPTR and WPTR 1087 * To calculate free space, func uses cached values of RPTR and WPTR
@@ -1179,13 +1191,15 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
1179 RET(); 1191 RET();
1180} 1192}
1181 1193
1182/* bdx_rx_receive - receives full packets from RXD fifo and pass them to OS 1194/**
1195 * bdx_rx_receive - receives full packets from RXD fifo and pass them to OS
1183 * NOTE: a special treatment is given to non-continuous descriptors 1196 * NOTE: a special treatment is given to non-continuous descriptors
1184 * that start near the end, wraps around and continue at the beginning. a second 1197 * that start near the end, wraps around and continue at the beginning. a second
1185 * part is copied right after the first, and then descriptor is interpreted as 1198 * part is copied right after the first, and then descriptor is interpreted as
1186 * normal. fifo has an extra space to allow such operations 1199 * normal. fifo has an extra space to allow such operations
1187 * @priv - nic's private structure 1200 * @priv: nic's private structure
1188 * @f - RXF fifo that needs skbs 1201 * @f: RXF fifo that needs skbs
1202 * @budget: maximum number of packets to receive
1189 */ 1203 */
1190 1204
1191/* TBD: replace memcpy func call by explicite inline asm */ 1205/* TBD: replace memcpy func call by explicite inline asm */
@@ -1375,9 +1389,10 @@ static inline int bdx_tx_db_size(struct txdb *db)
1375 return db->size - taken; 1389 return db->size - taken;
1376} 1390}
1377 1391
1378/* __bdx_tx_ptr_next - helper function, increment read/write pointer + wrap 1392/**
1379 * @d - tx data base 1393 * __bdx_tx_db_ptr_next - helper function, increment read/write pointer + wrap
1380 * @ptr - read or write pointer 1394 * @db: tx data base
1395 * @pptr: read or write pointer
1381 */ 1396 */
1382static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr) 1397static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr)
1383{ 1398{
@@ -1394,8 +1409,9 @@ static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr)
1394 *pptr = db->start; 1409 *pptr = db->start;
1395} 1410}
1396 1411
1397/* bdx_tx_db_inc_rptr - increment read pointer 1412/**
1398 * @d - tx data base 1413 * bdx_tx_db_inc_rptr - increment read pointer
1414 * @db: tx data base
1399 */ 1415 */
1400static inline void bdx_tx_db_inc_rptr(struct txdb *db) 1416static inline void bdx_tx_db_inc_rptr(struct txdb *db)
1401{ 1417{
@@ -1403,8 +1419,9 @@ static inline void bdx_tx_db_inc_rptr(struct txdb *db)
1403 __bdx_tx_db_ptr_next(db, &db->rptr); 1419 __bdx_tx_db_ptr_next(db, &db->rptr);
1404} 1420}
1405 1421
1406/* bdx_tx_db_inc_rptr - increment write pointer 1422/**
1407 * @d - tx data base 1423 * bdx_tx_db_inc_wptr - increment write pointer
1424 * @db: tx data base
1408 */ 1425 */
1409static inline void bdx_tx_db_inc_wptr(struct txdb *db) 1426static inline void bdx_tx_db_inc_wptr(struct txdb *db)
1410{ 1427{
@@ -1413,9 +1430,11 @@ static inline void bdx_tx_db_inc_wptr(struct txdb *db)
1413 a result of write */ 1430 a result of write */
1414} 1431}
1415 1432
1416/* bdx_tx_db_init - creates and initializes tx db 1433/**
1417 * @d - tx data base 1434 * bdx_tx_db_init - creates and initializes tx db
1418 * @sz_type - size of tx fifo 1435 * @d: tx data base
1436 * @sz_type: size of tx fifo
1437 *
1419 * Returns 0 on success, error code otherwise 1438 * Returns 0 on success, error code otherwise
1420 */ 1439 */
1421static int bdx_tx_db_init(struct txdb *d, int sz_type) 1440static int bdx_tx_db_init(struct txdb *d, int sz_type)
@@ -1441,8 +1460,9 @@ static int bdx_tx_db_init(struct txdb *d, int sz_type)
1441 return 0; 1460 return 0;
1442} 1461}
1443 1462
1444/* bdx_tx_db_close - closes tx db and frees all memory 1463/**
1445 * @d - tx data base 1464 * bdx_tx_db_close - closes tx db and frees all memory
1465 * @d: tx data base
1446 */ 1466 */
1447static void bdx_tx_db_close(struct txdb *d) 1467static void bdx_tx_db_close(struct txdb *d)
1448{ 1468{
@@ -1463,9 +1483,11 @@ static struct {
1463 u16 qwords; /* qword = 64 bit */ 1483 u16 qwords; /* qword = 64 bit */
1464} txd_sizes[MAX_SKB_FRAGS + 1]; 1484} txd_sizes[MAX_SKB_FRAGS + 1];
1465 1485
1466/* txdb_map_skb - creates and stores dma mappings for skb's data blocks 1486/**
1467 * @priv - NIC private structure 1487 * bdx_tx_map_skb - creates and stores dma mappings for skb's data blocks
1468 * @skb - socket buffer to map 1488 * @priv: NIC private structure
1489 * @skb: socket buffer to map
1490 * @txdd: TX descriptor to use
1469 * 1491 *
1470 * It makes dma mappings for skb's data blocks and writes them to PBL of 1492 * It makes dma mappings for skb's data blocks and writes them to PBL of
1471 * new tx descriptor. It also stores them in the tx db, so they could be 1493 * new tx descriptor. It also stores them in the tx db, so they could be
@@ -1562,9 +1584,10 @@ err_mem:
1562 return -ENOMEM; 1584 return -ENOMEM;
1563} 1585}
1564 1586
1565/* 1587/**
1566 * bdx_tx_space - calculates available space in TX fifo 1588 * bdx_tx_space - calculates available space in TX fifo
1567 * @priv - NIC private structure 1589 * @priv: NIC private structure
1590 *
1568 * Returns available space in TX fifo in bytes 1591 * Returns available space in TX fifo in bytes
1569 */ 1592 */
1570static inline int bdx_tx_space(struct bdx_priv *priv) 1593static inline int bdx_tx_space(struct bdx_priv *priv)
@@ -1579,9 +1602,10 @@ static inline int bdx_tx_space(struct bdx_priv *priv)
1579 return fsize; 1602 return fsize;
1580} 1603}
1581 1604
1582/* bdx_tx_transmit - send packet to NIC 1605/**
1583 * @skb - packet to send 1606 * bdx_tx_transmit - send packet to NIC
1584 * ndev - network device assigned to NIC 1607 * @skb: packet to send
1608 * @ndev: network device assigned to NIC
1585 * Return codes: 1609 * Return codes:
1586 * o NETDEV_TX_OK everything ok. 1610 * o NETDEV_TX_OK everything ok.
1587 * o NETDEV_TX_BUSY Cannot transmit packet, try later 1611 * o NETDEV_TX_BUSY Cannot transmit packet, try later
@@ -1699,8 +1723,10 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
1699 return NETDEV_TX_OK; 1723 return NETDEV_TX_OK;
1700} 1724}
1701 1725
1702/* bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ. 1726/**
1703 * @priv - bdx adapter 1727 * bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ.
1728 * @priv: bdx adapter
1729 *
1704 * It scans TXF fifo for descriptors, frees DMA mappings and reports to OS 1730 * It scans TXF fifo for descriptors, frees DMA mappings and reports to OS
1705 * that those packets were sent 1731 * that those packets were sent
1706 */ 1732 */
@@ -1761,7 +1787,8 @@ static void bdx_tx_cleanup(struct bdx_priv *priv)
1761 spin_unlock(&priv->tx_lock); 1787 spin_unlock(&priv->tx_lock);
1762} 1788}
1763 1789
1764/* bdx_tx_free_skbs - frees all skbs from TXD fifo. 1790/**
1791 * bdx_tx_free_skbs - frees all skbs from TXD fifo.
1765 * It gets called when OS stops this dev, eg upon "ifconfig down" or rmmod 1792 * It gets called when OS stops this dev, eg upon "ifconfig down" or rmmod
1766 */ 1793 */
1767static void bdx_tx_free_skbs(struct bdx_priv *priv) 1794static void bdx_tx_free_skbs(struct bdx_priv *priv)
@@ -1790,10 +1817,11 @@ static void bdx_tx_free(struct bdx_priv *priv)
1790 bdx_tx_db_close(&priv->txdb); 1817 bdx_tx_db_close(&priv->txdb);
1791} 1818}
1792 1819
1793/* bdx_tx_push_desc - push descriptor to TxD fifo 1820/**
1794 * @priv - NIC private structure 1821 * bdx_tx_push_desc - push descriptor to TxD fifo
1795 * @data - desc's data 1822 * @priv: NIC private structure
1796 * @size - desc's size 1823 * @data: desc's data
1824 * @size: desc's size
1797 * 1825 *
1798 * Pushes desc to TxD fifo and overlaps it if needed. 1826 * Pushes desc to TxD fifo and overlaps it if needed.
1799 * NOTE: this func does not check for available space. this is responsibility 1827 * NOTE: this func does not check for available space. this is responsibility
@@ -1819,10 +1847,11 @@ static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size)
1819 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR); 1847 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1820} 1848}
1821 1849
1822/* bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way 1850/**
1823 * @priv - NIC private structure 1851 * bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way
1824 * @data - desc's data 1852 * @priv: NIC private structure
1825 * @size - desc's size 1853 * @data: desc's data
1854 * @size: desc's size
1826 * 1855 *
1827 * NOTE: this func does check for available space and, if necessary, waits for 1856 * NOTE: this func does check for available space and, if necessary, waits for
1828 * NIC to read existing data before writing new one. 1857 * NIC to read existing data before writing new one.
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 4da93a5d7ec..ab0bbb78699 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -375,7 +375,7 @@ static char *emac_rxhost_errcodes[16] = {
375#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg))) 375#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg)))
376 376
377/** 377/**
378 * emac_dump_regs: Dump important EMAC registers to debug terminal 378 * emac_dump_regs - Dump important EMAC registers to debug terminal
379 * @priv: The DaVinci EMAC private adapter structure 379 * @priv: The DaVinci EMAC private adapter structure
380 * 380 *
381 * Executes ethtool set cmd & sets phy mode 381 * Executes ethtool set cmd & sets phy mode
@@ -466,7 +466,7 @@ static void emac_dump_regs(struct emac_priv *priv)
466} 466}
467 467
468/** 468/**
469 * emac_get_drvinfo: Get EMAC driver information 469 * emac_get_drvinfo - Get EMAC driver information
470 * @ndev: The DaVinci EMAC network adapter 470 * @ndev: The DaVinci EMAC network adapter
471 * @info: ethtool info structure containing name and version 471 * @info: ethtool info structure containing name and version
472 * 472 *
@@ -481,7 +481,7 @@ static void emac_get_drvinfo(struct net_device *ndev,
481} 481}
482 482
483/** 483/**
484 * emac_get_settings: Get EMAC settings 484 * emac_get_settings - Get EMAC settings
485 * @ndev: The DaVinci EMAC network adapter 485 * @ndev: The DaVinci EMAC network adapter
486 * @ecmd: ethtool command 486 * @ecmd: ethtool command
487 * 487 *
@@ -500,7 +500,7 @@ static int emac_get_settings(struct net_device *ndev,
500} 500}
501 501
502/** 502/**
503 * emac_set_settings: Set EMAC settings 503 * emac_set_settings - Set EMAC settings
504 * @ndev: The DaVinci EMAC network adapter 504 * @ndev: The DaVinci EMAC network adapter
505 * @ecmd: ethtool command 505 * @ecmd: ethtool command
506 * 506 *
@@ -518,7 +518,7 @@ static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
518} 518}
519 519
520/** 520/**
521 * emac_get_coalesce : Get interrupt coalesce settings for this device 521 * emac_get_coalesce - Get interrupt coalesce settings for this device
522 * @ndev : The DaVinci EMAC network adapter 522 * @ndev : The DaVinci EMAC network adapter
523 * @coal : ethtool coalesce settings structure 523 * @coal : ethtool coalesce settings structure
524 * 524 *
@@ -536,7 +536,7 @@ static int emac_get_coalesce(struct net_device *ndev,
536} 536}
537 537
538/** 538/**
539 * emac_set_coalesce : Set interrupt coalesce settings for this device 539 * emac_set_coalesce - Set interrupt coalesce settings for this device
540 * @ndev : The DaVinci EMAC network adapter 540 * @ndev : The DaVinci EMAC network adapter
541 * @coal : ethtool coalesce settings structure 541 * @coal : ethtool coalesce settings structure
542 * 542 *
@@ -614,11 +614,9 @@ static int emac_set_coalesce(struct net_device *ndev,
614} 614}
615 615
616 616
617/** 617/* ethtool_ops: DaVinci EMAC Ethtool structure
618 * ethtool_ops: DaVinci EMAC Ethtool structure
619 * 618 *
620 * Ethtool support for EMAC adapter 619 * Ethtool support for EMAC adapter
621 *
622 */ 620 */
623static const struct ethtool_ops ethtool_ops = { 621static const struct ethtool_ops ethtool_ops = {
624 .get_drvinfo = emac_get_drvinfo, 622 .get_drvinfo = emac_get_drvinfo,
@@ -631,7 +629,7 @@ static const struct ethtool_ops ethtool_ops = {
631}; 629};
632 630
633/** 631/**
634 * emac_update_phystatus: Update Phy status 632 * emac_update_phystatus - Update Phy status
635 * @priv: The DaVinci EMAC private adapter structure 633 * @priv: The DaVinci EMAC private adapter structure
636 * 634 *
637 * Updates phy status and takes action for network queue if required 635 * Updates phy status and takes action for network queue if required
@@ -697,7 +695,7 @@ static void emac_update_phystatus(struct emac_priv *priv)
697} 695}
698 696
699/** 697/**
700 * hash_get: Calculate hash value from mac address 698 * hash_get - Calculate hash value from mac address
701 * @addr: mac address to delete from hash table 699 * @addr: mac address to delete from hash table
702 * 700 *
703 * Calculates hash value from mac address 701 * Calculates hash value from mac address
@@ -723,9 +721,9 @@ static u32 hash_get(u8 *addr)
723} 721}
724 722
725/** 723/**
726 * hash_add: Hash function to add mac addr from hash table 724 * hash_add - Hash function to add mac addr from hash table
727 * @priv: The DaVinci EMAC private adapter structure 725 * @priv: The DaVinci EMAC private adapter structure
728 * mac_addr: mac address to delete from hash table 726 * @mac_addr: mac address to delete from hash table
729 * 727 *
730 * Adds mac address to the internal hash table 728 * Adds mac address to the internal hash table
731 * 729 *
@@ -765,9 +763,9 @@ static int hash_add(struct emac_priv *priv, u8 *mac_addr)
765} 763}
766 764
767/** 765/**
768 * hash_del: Hash function to delete mac addr from hash table 766 * hash_del - Hash function to delete mac addr from hash table
769 * @priv: The DaVinci EMAC private adapter structure 767 * @priv: The DaVinci EMAC private adapter structure
770 * mac_addr: mac address to delete from hash table 768 * @mac_addr: mac address to delete from hash table
771 * 769 *
772 * Removes mac address from the internal hash table 770 * Removes mac address from the internal hash table
773 * 771 *
@@ -807,7 +805,7 @@ static int hash_del(struct emac_priv *priv, u8 *mac_addr)
807#define EMAC_ALL_MULTI_CLR 3 805#define EMAC_ALL_MULTI_CLR 3
808 806
809/** 807/**
810 * emac_add_mcast: Set multicast address in the EMAC adapter (Internal) 808 * emac_add_mcast - Set multicast address in the EMAC adapter (Internal)
811 * @priv: The DaVinci EMAC private adapter structure 809 * @priv: The DaVinci EMAC private adapter structure
812 * @action: multicast operation to perform 810 * @action: multicast operation to perform
813 * mac_addr: mac address to set 811 * mac_addr: mac address to set
@@ -855,7 +853,7 @@ static void emac_add_mcast(struct emac_priv *priv, u32 action, u8 *mac_addr)
855} 853}
856 854
857/** 855/**
858 * emac_dev_mcast_set: Set multicast address in the EMAC adapter 856 * emac_dev_mcast_set - Set multicast address in the EMAC adapter
859 * @ndev: The DaVinci EMAC network adapter 857 * @ndev: The DaVinci EMAC network adapter
860 * 858 *
861 * Set multicast addresses in EMAC adapter 859 * Set multicast addresses in EMAC adapter
@@ -901,7 +899,7 @@ static void emac_dev_mcast_set(struct net_device *ndev)
901 *************************************************************************/ 899 *************************************************************************/
902 900
903/** 901/**
904 * emac_int_disable: Disable EMAC module interrupt (from adapter) 902 * emac_int_disable - Disable EMAC module interrupt (from adapter)
905 * @priv: The DaVinci EMAC private adapter structure 903 * @priv: The DaVinci EMAC private adapter structure
906 * 904 *
907 * Disable EMAC interrupt on the adapter 905 * Disable EMAC interrupt on the adapter
@@ -931,7 +929,7 @@ static void emac_int_disable(struct emac_priv *priv)
931} 929}
932 930
933/** 931/**
934 * emac_int_enable: Enable EMAC module interrupt (from adapter) 932 * emac_int_enable - Enable EMAC module interrupt (from adapter)
935 * @priv: The DaVinci EMAC private adapter structure 933 * @priv: The DaVinci EMAC private adapter structure
936 * 934 *
937 * Enable EMAC interrupt on the adapter 935 * Enable EMAC interrupt on the adapter
@@ -967,7 +965,7 @@ static void emac_int_enable(struct emac_priv *priv)
967} 965}
968 966
969/** 967/**
970 * emac_irq: EMAC interrupt handler 968 * emac_irq - EMAC interrupt handler
971 * @irq: interrupt number 969 * @irq: interrupt number
972 * @dev_id: EMAC network adapter data structure ptr 970 * @dev_id: EMAC network adapter data structure ptr
973 * 971 *
@@ -1060,7 +1058,7 @@ static void emac_tx_handler(void *token, int len, int status)
1060} 1058}
1061 1059
1062/** 1060/**
1063 * emac_dev_xmit: EMAC Transmit function 1061 * emac_dev_xmit - EMAC Transmit function
1064 * @skb: SKB pointer 1062 * @skb: SKB pointer
1065 * @ndev: The DaVinci EMAC network adapter 1063 * @ndev: The DaVinci EMAC network adapter
1066 * 1064 *
@@ -1111,7 +1109,7 @@ fail_tx:
1111} 1109}
1112 1110
1113/** 1111/**
1114 * emac_dev_tx_timeout: EMAC Transmit timeout function 1112 * emac_dev_tx_timeout - EMAC Transmit timeout function
1115 * @ndev: The DaVinci EMAC network adapter 1113 * @ndev: The DaVinci EMAC network adapter
1116 * 1114 *
1117 * Called when system detects that a skb timeout period has expired 1115 * Called when system detects that a skb timeout period has expired
@@ -1138,7 +1136,7 @@ static void emac_dev_tx_timeout(struct net_device *ndev)
1138} 1136}
1139 1137
1140/** 1138/**
1141 * emac_set_type0addr: Set EMAC Type0 mac address 1139 * emac_set_type0addr - Set EMAC Type0 mac address
1142 * @priv: The DaVinci EMAC private adapter structure 1140 * @priv: The DaVinci EMAC private adapter structure
1143 * @ch: RX channel number 1141 * @ch: RX channel number
1144 * @mac_addr: MAC address to set in device 1142 * @mac_addr: MAC address to set in device
@@ -1165,7 +1163,7 @@ static void emac_set_type0addr(struct emac_priv *priv, u32 ch, char *mac_addr)
1165} 1163}
1166 1164
1167/** 1165/**
1168 * emac_set_type1addr: Set EMAC Type1 mac address 1166 * emac_set_type1addr - Set EMAC Type1 mac address
1169 * @priv: The DaVinci EMAC private adapter structure 1167 * @priv: The DaVinci EMAC private adapter structure
1170 * @ch: RX channel number 1168 * @ch: RX channel number
1171 * @mac_addr: MAC address to set in device 1169 * @mac_addr: MAC address to set in device
@@ -1187,7 +1185,7 @@ static void emac_set_type1addr(struct emac_priv *priv, u32 ch, char *mac_addr)
1187} 1185}
1188 1186
1189/** 1187/**
1190 * emac_set_type2addr: Set EMAC Type2 mac address 1188 * emac_set_type2addr - Set EMAC Type2 mac address
1191 * @priv: The DaVinci EMAC private adapter structure 1189 * @priv: The DaVinci EMAC private adapter structure
1192 * @ch: RX channel number 1190 * @ch: RX channel number
1193 * @mac_addr: MAC address to set in device 1191 * @mac_addr: MAC address to set in device
@@ -1213,7 +1211,7 @@ static void emac_set_type2addr(struct emac_priv *priv, u32 ch,
1213} 1211}
1214 1212
1215/** 1213/**
1216 * emac_setmac: Set mac address in the adapter (internal function) 1214 * emac_setmac - Set mac address in the adapter (internal function)
1217 * @priv: The DaVinci EMAC private adapter structure 1215 * @priv: The DaVinci EMAC private adapter structure
1218 * @ch: RX channel number 1216 * @ch: RX channel number
1219 * @mac_addr: MAC address to set in device 1217 * @mac_addr: MAC address to set in device
@@ -1242,7 +1240,7 @@ static void emac_setmac(struct emac_priv *priv, u32 ch, char *mac_addr)
1242} 1240}
1243 1241
1244/** 1242/**
1245 * emac_dev_setmac_addr: Set mac address in the adapter 1243 * emac_dev_setmac_addr - Set mac address in the adapter
1246 * @ndev: The DaVinci EMAC network adapter 1244 * @ndev: The DaVinci EMAC network adapter
1247 * @addr: MAC address to set in device 1245 * @addr: MAC address to set in device
1248 * 1246 *
@@ -1277,7 +1275,7 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
1277} 1275}
1278 1276
1279/** 1277/**
1280 * emac_hw_enable: Enable EMAC hardware for packet transmission/reception 1278 * emac_hw_enable - Enable EMAC hardware for packet transmission/reception
1281 * @priv: The DaVinci EMAC private adapter structure 1279 * @priv: The DaVinci EMAC private adapter structure
1282 * 1280 *
1283 * Enables EMAC hardware for packet processing - enables PHY, enables RX 1281 * Enables EMAC hardware for packet processing - enables PHY, enables RX
@@ -1347,7 +1345,7 @@ static int emac_hw_enable(struct emac_priv *priv)
1347} 1345}
1348 1346
1349/** 1347/**
1350 * emac_poll: EMAC NAPI Poll function 1348 * emac_poll - EMAC NAPI Poll function
1351 * @ndev: The DaVinci EMAC network adapter 1349 * @ndev: The DaVinci EMAC network adapter
1352 * @budget: Number of receive packets to process (as told by NAPI layer) 1350 * @budget: Number of receive packets to process (as told by NAPI layer)
1353 * 1351 *
@@ -1430,7 +1428,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
1430 1428
1431#ifdef CONFIG_NET_POLL_CONTROLLER 1429#ifdef CONFIG_NET_POLL_CONTROLLER
1432/** 1430/**
1433 * emac_poll_controller: EMAC Poll controller function 1431 * emac_poll_controller - EMAC Poll controller function
1434 * @ndev: The DaVinci EMAC network adapter 1432 * @ndev: The DaVinci EMAC network adapter
1435 * 1433 *
1436 * Polled functionality used by netconsole and others in non interrupt mode 1434 * Polled functionality used by netconsole and others in non interrupt mode
@@ -1489,7 +1487,7 @@ static void emac_adjust_link(struct net_device *ndev)
1489 *************************************************************************/ 1487 *************************************************************************/
1490 1488
1491/** 1489/**
1492 * emac_devioctl: EMAC adapter ioctl 1490 * emac_devioctl - EMAC adapter ioctl
1493 * @ndev: The DaVinci EMAC network adapter 1491 * @ndev: The DaVinci EMAC network adapter
1494 * @ifrq: request parameter 1492 * @ifrq: request parameter
1495 * @cmd: command parameter 1493 * @cmd: command parameter
@@ -1516,7 +1514,7 @@ static int match_first_device(struct device *dev, void *data)
1516} 1514}
1517 1515
1518/** 1516/**
1519 * emac_dev_open: EMAC device open 1517 * emac_dev_open - EMAC device open
1520 * @ndev: The DaVinci EMAC network adapter 1518 * @ndev: The DaVinci EMAC network adapter
1521 * 1519 *
1522 * Called when system wants to start the interface. We init TX/RX channels 1520 * Called when system wants to start the interface. We init TX/RX channels
@@ -1649,7 +1647,7 @@ rollback:
1649} 1647}
1650 1648
1651/** 1649/**
1652 * emac_dev_stop: EMAC device stop 1650 * emac_dev_stop - EMAC device stop
1653 * @ndev: The DaVinci EMAC network adapter 1651 * @ndev: The DaVinci EMAC network adapter
1654 * 1652 *
1655 * Called when system wants to stop or down the interface. We stop the network 1653 * Called when system wants to stop or down the interface. We stop the network
@@ -1691,7 +1689,7 @@ static int emac_dev_stop(struct net_device *ndev)
1691} 1689}
1692 1690
1693/** 1691/**
1694 * emac_dev_getnetstats: EMAC get statistics function 1692 * emac_dev_getnetstats - EMAC get statistics function
1695 * @ndev: The DaVinci EMAC network adapter 1693 * @ndev: The DaVinci EMAC network adapter
1696 * 1694 *
1697 * Called when system wants to get statistics from the device. 1695 * Called when system wants to get statistics from the device.
@@ -1763,7 +1761,7 @@ static const struct net_device_ops emac_netdev_ops = {
1763}; 1761};
1764 1762
1765/** 1763/**
1766 * davinci_emac_probe: EMAC device probe 1764 * davinci_emac_probe - EMAC device probe
1767 * @pdev: The DaVinci EMAC device that we are removing 1765 * @pdev: The DaVinci EMAC device that we are removing
1768 * 1766 *
1769 * Called when probing for emac devicesr. We get details of instances and 1767 * Called when probing for emac devicesr. We get details of instances and
@@ -1949,7 +1947,7 @@ free_clk:
1949} 1947}
1950 1948
1951/** 1949/**
1952 * davinci_emac_remove: EMAC device remove 1950 * davinci_emac_remove - EMAC device remove
1953 * @pdev: The DaVinci EMAC device that we are removing 1951 * @pdev: The DaVinci EMAC device that we are removing
1954 * 1952 *
1955 * Called when removing the device driver. We disable clock usage and release 1953 * Called when removing the device driver. We disable clock usage and release
@@ -2015,9 +2013,7 @@ static const struct dev_pm_ops davinci_emac_pm_ops = {
2015 .resume = davinci_emac_resume, 2013 .resume = davinci_emac_resume,
2016}; 2014};
2017 2015
2018/** 2016/* davinci_emac_driver: EMAC platform driver structure */
2019 * davinci_emac_driver: EMAC platform driver structure
2020 */
2021static struct platform_driver davinci_emac_driver = { 2017static struct platform_driver davinci_emac_driver = {
2022 .driver = { 2018 .driver = {
2023 .name = "davinci_emac", 2019 .name = "davinci_emac",
@@ -2029,7 +2025,7 @@ static struct platform_driver davinci_emac_driver = {
2029}; 2025};
2030 2026
2031/** 2027/**
2032 * davinci_emac_init: EMAC driver module init 2028 * davinci_emac_init - EMAC driver module init
2033 * 2029 *
2034 * Called when initializing the driver. We register the driver with 2030 * Called when initializing the driver. We register the driver with
2035 * the platform. 2031 * the platform.
@@ -2041,7 +2037,7 @@ static int __init davinci_emac_init(void)
2041late_initcall(davinci_emac_init); 2037late_initcall(davinci_emac_init);
2042 2038
2043/** 2039/**
2044 * davinci_emac_exit: EMAC driver module exit 2040 * davinci_emac_exit - EMAC driver module exit
2045 * 2041 *
2046 * Called when exiting the driver completely. We unregister the driver with 2042 * Called when exiting the driver completely. We unregister the driver with
2047 * the platform and exit 2043 * the platform and exit
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 6199f6b387b..c1ebfe9efcb 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -114,7 +114,8 @@ spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
114 out_be32(card->regs + reg, value); 114 out_be32(card->regs + reg, value);
115} 115}
116 116
117/** spider_net_write_phy - write to phy register 117/**
118 * spider_net_write_phy - write to phy register
118 * @netdev: adapter to be written to 119 * @netdev: adapter to be written to
119 * @mii_id: id of MII 120 * @mii_id: id of MII
120 * @reg: PHY register 121 * @reg: PHY register
@@ -137,7 +138,8 @@ spider_net_write_phy(struct net_device *netdev, int mii_id,
137 spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue); 138 spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
138} 139}
139 140
140/** spider_net_read_phy - read from phy register 141/**
142 * spider_net_read_phy - read from phy register
141 * @netdev: network device to be read from 143 * @netdev: network device to be read from
142 * @mii_id: id of MII 144 * @mii_id: id of MII
143 * @reg: PHY register 145 * @reg: PHY register
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index ea3e0a21ba7..a46c1985968 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -486,7 +486,7 @@ static void __devinit velocity_get_options(struct velocity_opt *opts, int index,
486 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); 486 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
487 velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname); 487 velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
488 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname); 488 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
489 velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); 489 velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
490 opts->numrx = (opts->numrx & ~3); 490 opts->numrx = (opts->numrx & ~3);
491} 491}
492 492
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 1eaf7128afe..f8e35188011 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -197,7 +197,7 @@ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
197#endif 197#endif
198 198
199/** 199/**
200 * * temac_dma_bd_release - Release buffer descriptor rings 200 * temac_dma_bd_release - Release buffer descriptor rings
201 */ 201 */
202static void temac_dma_bd_release(struct net_device *ndev) 202static void temac_dma_bd_release(struct net_device *ndev)
203{ 203{
@@ -768,7 +768,6 @@ static void ll_temac_recv(struct net_device *ndev)
768 DMA_FROM_DEVICE); 768 DMA_FROM_DEVICE);
769 769
770 skb_put(skb, length); 770 skb_put(skb, length);
771 skb->dev = ndev;
772 skb->protocol = eth_type_trans(skb, ndev); 771 skb->protocol = eth_type_trans(skb, ndev);
773 skb_checksum_none_assert(skb); 772 skb_checksum_none_assert(skb);
774 773
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 4ad80f77109..6695a1dadf4 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -2962,7 +2962,7 @@ static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
2962 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP | 2962 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2963 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN)); 2963 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2964 bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX)); 2964 bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX));
2965 bp->p_rcv_buff_va[i+j] = (char *) (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX)); 2965 bp->p_rcv_buff_va[i+j] = (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
2966 } 2966 }
2967#endif 2967#endif
2968 } 2968 }
@@ -3030,7 +3030,7 @@ static void dfx_rcv_queue_process(
3030#ifdef DYNAMIC_BUFFERS 3030#ifdef DYNAMIC_BUFFERS
3031 p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data); 3031 p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data);
3032#else 3032#else
3033 p_buff = (char *) bp->p_rcv_buff_va[entry]; 3033 p_buff = bp->p_rcv_buff_va[entry];
3034#endif 3034#endif
3035 memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32)); 3035 memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
3036 3036
diff --git a/drivers/net/fddi/skfp/pmf.c b/drivers/net/fddi/skfp/pmf.c
index 9ac4665d741..24d8566cfd8 100644
--- a/drivers/net/fddi/skfp/pmf.c
+++ b/drivers/net/fddi/skfp/pmf.c
@@ -1242,7 +1242,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
1242 if (len < 8) 1242 if (len < 8)
1243 goto len_error ; 1243 goto len_error ;
1244 if (set) 1244 if (set)
1245 memcpy((char *) to,(char *) from+2,6) ; 1245 memcpy(to,from+2,6) ;
1246 to += 8 ; 1246 to += 8 ;
1247 from += 8 ; 1247 from += 8 ;
1248 len -= 8 ; 1248 len -= 8 ;
@@ -1251,7 +1251,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
1251 if (len < 4) 1251 if (len < 4)
1252 goto len_error ; 1252 goto len_error ;
1253 if (set) 1253 if (set)
1254 memcpy((char *) to,(char *) from,4) ; 1254 memcpy(to,from,4) ;
1255 to += 4 ; 1255 to += 4 ;
1256 from += 4 ; 1256 from += 4 ;
1257 len -= 4 ; 1257 len -= 4 ;
@@ -1260,7 +1260,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
1260 if (len < 8) 1260 if (len < 8)
1261 goto len_error ; 1261 goto len_error ;
1262 if (set) 1262 if (set)
1263 memcpy((char *) to,(char *) from,8) ; 1263 memcpy(to,from,8) ;
1264 to += 8 ; 1264 to += 8 ;
1265 from += 8 ; 1265 from += 8 ;
1266 len -= 8 ; 1266 len -= 8 ;
@@ -1269,7 +1269,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
1269 if (len < 32) 1269 if (len < 32)
1270 goto len_error ; 1270 goto len_error ;
1271 if (set) 1271 if (set)
1272 memcpy((char *) to,(char *) from,32) ; 1272 memcpy(to,from,32) ;
1273 to += 32 ; 1273 to += 32 ;
1274 from += 32 ; 1274 from += 32 ;
1275 len -= 32 ; 1275 len -= 32 ;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index aed1a6105b2..2c0894a92ab 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -485,7 +485,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
485 485
486 return; 486 return;
487 default: 487 default:
488 count = kiss_esc(p, (unsigned char *)ax->xbuff, len); 488 count = kiss_esc(p, ax->xbuff, len);
489 } 489 }
490 } else { 490 } else {
491 unsigned short crc; 491 unsigned short crc;
@@ -497,7 +497,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
497 case CRC_MODE_SMACK: 497 case CRC_MODE_SMACK:
498 *p |= 0x80; 498 *p |= 0x80;
499 crc = swab16(crc16(0, p, len)); 499 crc = swab16(crc16(0, p, len));
500 count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2); 500 count = kiss_esc_crc(p, ax->xbuff, crc, len+2);
501 break; 501 break;
502 case CRC_MODE_FLEX_TEST: 502 case CRC_MODE_FLEX_TEST:
503 ax->crcmode = CRC_MODE_NONE; 503 ax->crcmode = CRC_MODE_NONE;
@@ -506,11 +506,11 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
506 case CRC_MODE_FLEX: 506 case CRC_MODE_FLEX:
507 *p |= 0x20; 507 *p |= 0x20;
508 crc = calc_crc_flex(p, len); 508 crc = calc_crc_flex(p, len);
509 count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2); 509 count = kiss_esc_crc(p, ax->xbuff, crc, len+2);
510 break; 510 break;
511 511
512 default: 512 default:
513 count = kiss_esc(p, (unsigned char *)ax->xbuff, len); 513 count = kiss_esc(p, ax->xbuff, len);
514 } 514 }
515 } 515 }
516 spin_unlock_bh(&ax->buflock); 516 spin_unlock_bh(&ax->buflock);
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 0c569831db5..6cee2917eb0 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -614,7 +614,7 @@ retry_send_cmplt:
614static void netvsc_receive_completion(void *context) 614static void netvsc_receive_completion(void *context)
615{ 615{
616 struct hv_netvsc_packet *packet = context; 616 struct hv_netvsc_packet *packet = context;
617 struct hv_device *device = (struct hv_device *)packet->device; 617 struct hv_device *device = packet->device;
618 struct netvsc_device *net_device; 618 struct netvsc_device *net_device;
619 u64 transaction_id = 0; 619 u64 transaction_id = 0;
620 bool fsend_receive_comp = false; 620 bool fsend_receive_comp = false;
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index dcc80d652b7..84872043b5c 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -1017,7 +1017,7 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
1017{ 1017{
1018 1018
1019 int iobase; 1019 int iobase;
1020 struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv; 1020 struct ali_ircc_cb *self = priv;
1021 struct net_device *dev; 1021 struct net_device *dev;
1022 1022
1023 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); 1023 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
@@ -1052,7 +1052,7 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
1052 */ 1052 */
1053static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed) 1053static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
1054{ 1054{
1055 struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv; 1055 struct ali_ircc_cb *self = priv;
1056 unsigned long flags; 1056 unsigned long flags;
1057 int iobase; 1057 int iobase;
1058 int fcr; /* FIFO control reg */ 1058 int fcr; /* FIFO control reg */
@@ -1121,7 +1121,7 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
1121static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed) 1121static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
1122{ 1122{
1123 1123
1124 struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv; 1124 struct ali_ircc_cb *self = priv;
1125 int iobase,dongle_id; 1125 int iobase,dongle_id;
1126 int tmp = 0; 1126 int tmp = 0;
1127 1127
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index fc503aa5288..e09417df8f3 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -794,7 +794,7 @@ static int __devinit au1k_irda_net_init(struct net_device *dev)
794 794
795 /* allocate the data buffers */ 795 /* allocate the data buffers */
796 aup->db[0].vaddr = 796 aup->db[0].vaddr =
797 (void *)dma_alloc(MAX_BUF_SIZE * 2 * NUM_IR_DESC, &temp); 797 dma_alloc(MAX_BUF_SIZE * 2 * NUM_IR_DESC, &temp);
798 if (!aup->db[0].vaddr) 798 if (!aup->db[0].vaddr)
799 goto out3; 799 goto out3;
800 800
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 2ee56de7b0c..0737bd4d166 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -847,13 +847,12 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
847 const struct iovec *iv, unsigned long len, 847 const struct iovec *iv, unsigned long len,
848 int noblock) 848 int noblock)
849{ 849{
850 DECLARE_WAITQUEUE(wait, current); 850 DEFINE_WAIT(wait);
851 struct sk_buff *skb; 851 struct sk_buff *skb;
852 ssize_t ret = 0; 852 ssize_t ret = 0;
853 853
854 add_wait_queue(sk_sleep(&q->sk), &wait);
855 while (len) { 854 while (len) {
856 current->state = TASK_INTERRUPTIBLE; 855 prepare_to_wait(sk_sleep(&q->sk), &wait, TASK_INTERRUPTIBLE);
857 856
858 /* Read frames from the queue */ 857 /* Read frames from the queue */
859 skb = skb_dequeue(&q->sk.sk_receive_queue); 858 skb = skb_dequeue(&q->sk.sk_receive_queue);
@@ -875,8 +874,7 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
875 break; 874 break;
876 } 875 }
877 876
878 current->state = TASK_RUNNING; 877 finish_wait(sk_sleep(&q->sk), &wait);
879 remove_wait_queue(sk_sleep(&q->sk), &wait);
880 return ret; 878 return ret;
881} 879}
882 880
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 944cdfb80fe..3090dc65a6f 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -67,6 +67,11 @@ config BCM63XX_PHY
67 ---help--- 67 ---help---
68 Currently supports the 6348 and 6358 PHYs. 68 Currently supports the 6348 and 6358 PHYs.
69 69
70config BCM87XX_PHY
71 tristate "Driver for Broadcom BCM8706 and BCM8727 PHYs"
72 help
73 Currently supports the BCM8706 and BCM8727 10G Ethernet PHYs.
74
70config ICPLUS_PHY 75config ICPLUS_PHY
71 tristate "Drivers for ICPlus PHYs" 76 tristate "Drivers for ICPlus PHYs"
72 ---help--- 77 ---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index f51af688ef8..6d2dc6c94f2 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_SMSC_PHY) += smsc.o
12obj-$(CONFIG_VITESSE_PHY) += vitesse.o 12obj-$(CONFIG_VITESSE_PHY) += vitesse.o
13obj-$(CONFIG_BROADCOM_PHY) += broadcom.o 13obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
14obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o 14obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
15obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
15obj-$(CONFIG_ICPLUS_PHY) += icplus.o 16obj-$(CONFIG_ICPLUS_PHY) += icplus.o
16obj-$(CONFIG_REALTEK_PHY) += realtek.o 17obj-$(CONFIG_REALTEK_PHY) += realtek.o
17obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o 18obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index cfabd5fe537..a3fb5ceb648 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -77,13 +77,7 @@ static struct phy_driver am79c_driver = {
77 77
78static int __init am79c_init(void) 78static int __init am79c_init(void)
79{ 79{
80 int ret; 80 return phy_driver_register(&am79c_driver);
81
82 ret = phy_driver_register(&am79c_driver);
83 if (ret)
84 return ret;
85
86 return 0;
87} 81}
88 82
89static void __exit am79c_exit(void) 83static void __exit am79c_exit(void)
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index cd802eb25fd..84c7a39b1c6 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -71,7 +71,8 @@ static int bcm63xx_config_intr(struct phy_device *phydev)
71 return err; 71 return err;
72} 72}
73 73
74static struct phy_driver bcm63xx_1_driver = { 74static struct phy_driver bcm63xx_driver[] = {
75{
75 .phy_id = 0x00406000, 76 .phy_id = 0x00406000,
76 .phy_id_mask = 0xfffffc00, 77 .phy_id_mask = 0xfffffc00,
77 .name = "Broadcom BCM63XX (1)", 78 .name = "Broadcom BCM63XX (1)",
@@ -84,10 +85,8 @@ static struct phy_driver bcm63xx_1_driver = {
84 .ack_interrupt = bcm63xx_ack_interrupt, 85 .ack_interrupt = bcm63xx_ack_interrupt,
85 .config_intr = bcm63xx_config_intr, 86 .config_intr = bcm63xx_config_intr,
86 .driver = { .owner = THIS_MODULE }, 87 .driver = { .owner = THIS_MODULE },
87}; 88}, {
88 89 /* same phy as above, with just a different OUI */
89/* same phy as above, with just a different OUI */
90static struct phy_driver bcm63xx_2_driver = {
91 .phy_id = 0x002bdc00, 90 .phy_id = 0x002bdc00,
92 .phy_id_mask = 0xfffffc00, 91 .phy_id_mask = 0xfffffc00,
93 .name = "Broadcom BCM63XX (2)", 92 .name = "Broadcom BCM63XX (2)",
@@ -99,30 +98,18 @@ static struct phy_driver bcm63xx_2_driver = {
99 .ack_interrupt = bcm63xx_ack_interrupt, 98 .ack_interrupt = bcm63xx_ack_interrupt,
100 .config_intr = bcm63xx_config_intr, 99 .config_intr = bcm63xx_config_intr,
101 .driver = { .owner = THIS_MODULE }, 100 .driver = { .owner = THIS_MODULE },
102}; 101} };
103 102
104static int __init bcm63xx_phy_init(void) 103static int __init bcm63xx_phy_init(void)
105{ 104{
106 int ret; 105 return phy_drivers_register(bcm63xx_driver,
107 106 ARRAY_SIZE(bcm63xx_driver));
108 ret = phy_driver_register(&bcm63xx_1_driver);
109 if (ret)
110 goto out_63xx_1;
111 ret = phy_driver_register(&bcm63xx_2_driver);
112 if (ret)
113 goto out_63xx_2;
114 return ret;
115
116out_63xx_2:
117 phy_driver_unregister(&bcm63xx_1_driver);
118out_63xx_1:
119 return ret;
120} 107}
121 108
122static void __exit bcm63xx_phy_exit(void) 109static void __exit bcm63xx_phy_exit(void)
123{ 110{
124 phy_driver_unregister(&bcm63xx_1_driver); 111 phy_drivers_unregister(bcm63xx_driver,
125 phy_driver_unregister(&bcm63xx_2_driver); 112 ARRAY_SIZE(bcm63xx_driver));
126} 113}
127 114
128module_init(bcm63xx_phy_init); 115module_init(bcm63xx_phy_init);
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
new file mode 100644
index 00000000000..2346b38b983
--- /dev/null
+++ b/drivers/net/phy/bcm87xx.c
@@ -0,0 +1,231 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2011 - 2012 Cavium, Inc.
7 */
8
9#include <linux/module.h>
10#include <linux/phy.h>
11#include <linux/of.h>
12
13#define PHY_ID_BCM8706 0x0143bdc1
14#define PHY_ID_BCM8727 0x0143bff0
15
16#define BCM87XX_PMD_RX_SIGNAL_DETECT (MII_ADDR_C45 | 0x1000a)
17#define BCM87XX_10GBASER_PCS_STATUS (MII_ADDR_C45 | 0x30020)
18#define BCM87XX_XGXS_LANE_STATUS (MII_ADDR_C45 | 0x40018)
19
20#define BCM87XX_LASI_CONTROL (MII_ADDR_C45 | 0x39002)
21#define BCM87XX_LASI_STATUS (MII_ADDR_C45 | 0x39005)
22
23#if IS_ENABLED(CONFIG_OF_MDIO)
24/* Set and/or override some configuration registers based on the
25 * broadcom,c45-reg-init property stored in the of_node for the phydev.
26 *
27 * broadcom,c45-reg-init = <devid reg mask value>,...;
28 *
29 * There may be one or more sets of <devid reg mask value>:
30 *
31 * devid: which sub-device to use.
32 * reg: the register.
33 * mask: if non-zero, ANDed with existing register value.
34 * value: ORed with the masked value and written to the regiser.
35 *
36 */
37static int bcm87xx_of_reg_init(struct phy_device *phydev)
38{
39 const __be32 *paddr;
40 const __be32 *paddr_end;
41 int len, ret;
42
43 if (!phydev->dev.of_node)
44 return 0;
45
46 paddr = of_get_property(phydev->dev.of_node,
47 "broadcom,c45-reg-init", &len);
48 if (!paddr)
49 return 0;
50
51 paddr_end = paddr + (len /= sizeof(*paddr));
52
53 ret = 0;
54
55 while (paddr + 3 < paddr_end) {
56 u16 devid = be32_to_cpup(paddr++);
57 u16 reg = be32_to_cpup(paddr++);
58 u16 mask = be32_to_cpup(paddr++);
59 u16 val_bits = be32_to_cpup(paddr++);
60 int val;
61 u32 regnum = MII_ADDR_C45 | (devid << 16) | reg;
62 val = 0;
63 if (mask) {
64 val = phy_read(phydev, regnum);
65 if (val < 0) {
66 ret = val;
67 goto err;
68 }
69 val &= mask;
70 }
71 val |= val_bits;
72
73 ret = phy_write(phydev, regnum, val);
74 if (ret < 0)
75 goto err;
76 }
77err:
78 return ret;
79}
80#else
81static int bcm87xx_of_reg_init(struct phy_device *phydev)
82{
83 return 0;
84}
85#endif /* CONFIG_OF_MDIO */
86
87static int bcm87xx_config_init(struct phy_device *phydev)
88{
89 phydev->supported = SUPPORTED_10000baseR_FEC;
90 phydev->advertising = ADVERTISED_10000baseR_FEC;
91 phydev->state = PHY_NOLINK;
92 phydev->autoneg = AUTONEG_DISABLE;
93
94 bcm87xx_of_reg_init(phydev);
95
96 return 0;
97}
98
99static int bcm87xx_config_aneg(struct phy_device *phydev)
100{
101 return -EINVAL;
102}
103
104static int bcm87xx_read_status(struct phy_device *phydev)
105{
106 int rx_signal_detect;
107 int pcs_status;
108 int xgxs_lane_status;
109
110 rx_signal_detect = phy_read(phydev, BCM87XX_PMD_RX_SIGNAL_DETECT);
111 if (rx_signal_detect < 0)
112 return rx_signal_detect;
113
114 if ((rx_signal_detect & 1) == 0)
115 goto no_link;
116
117 pcs_status = phy_read(phydev, BCM87XX_10GBASER_PCS_STATUS);
118 if (pcs_status < 0)
119 return pcs_status;
120
121 if ((pcs_status & 1) == 0)
122 goto no_link;
123
124 xgxs_lane_status = phy_read(phydev, BCM87XX_XGXS_LANE_STATUS);
125 if (xgxs_lane_status < 0)
126 return xgxs_lane_status;
127
128 if ((xgxs_lane_status & 0x1000) == 0)
129 goto no_link;
130
131 phydev->speed = 10000;
132 phydev->link = 1;
133 phydev->duplex = 1;
134 return 0;
135
136no_link:
137 phydev->link = 0;
138 return 0;
139}
140
141static int bcm87xx_config_intr(struct phy_device *phydev)
142{
143 int reg, err;
144
145 reg = phy_read(phydev, BCM87XX_LASI_CONTROL);
146
147 if (reg < 0)
148 return reg;
149
150 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
151 reg |= 1;
152 else
153 reg &= ~1;
154
155 err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg);
156 return err;
157}
158
159static int bcm87xx_did_interrupt(struct phy_device *phydev)
160{
161 int reg;
162
163 reg = phy_read(phydev, BCM87XX_LASI_STATUS);
164
165 if (reg < 0) {
166 dev_err(&phydev->dev,
167 "Error: Read of BCM87XX_LASI_STATUS failed: %d\n", reg);
168 return 0;
169 }
170 return (reg & 1) != 0;
171}
172
173static int bcm87xx_ack_interrupt(struct phy_device *phydev)
174{
175 /* Reading the LASI status clears it. */
176 bcm87xx_did_interrupt(phydev);
177 return 0;
178}
179
180static int bcm8706_match_phy_device(struct phy_device *phydev)
181{
182 return phydev->c45_ids.device_ids[4] == PHY_ID_BCM8706;
183}
184
185static int bcm8727_match_phy_device(struct phy_device *phydev)
186{
187 return phydev->c45_ids.device_ids[4] == PHY_ID_BCM8727;
188}
189
190static struct phy_driver bcm87xx_driver[] = {
191{
192 .phy_id = PHY_ID_BCM8706,
193 .phy_id_mask = 0xffffffff,
194 .name = "Broadcom BCM8706",
195 .flags = PHY_HAS_INTERRUPT,
196 .config_init = bcm87xx_config_init,
197 .config_aneg = bcm87xx_config_aneg,
198 .read_status = bcm87xx_read_status,
199 .ack_interrupt = bcm87xx_ack_interrupt,
200 .config_intr = bcm87xx_config_intr,
201 .did_interrupt = bcm87xx_did_interrupt,
202 .match_phy_device = bcm8706_match_phy_device,
203 .driver = { .owner = THIS_MODULE },
204}, {
205 .phy_id = PHY_ID_BCM8727,
206 .phy_id_mask = 0xffffffff,
207 .name = "Broadcom BCM8727",
208 .flags = PHY_HAS_INTERRUPT,
209 .config_init = bcm87xx_config_init,
210 .config_aneg = bcm87xx_config_aneg,
211 .read_status = bcm87xx_read_status,
212 .ack_interrupt = bcm87xx_ack_interrupt,
213 .config_intr = bcm87xx_config_intr,
214 .did_interrupt = bcm87xx_did_interrupt,
215 .match_phy_device = bcm8727_match_phy_device,
216 .driver = { .owner = THIS_MODULE },
217} };
218
219static int __init bcm87xx_init(void)
220{
221 return phy_drivers_register(bcm87xx_driver,
222 ARRAY_SIZE(bcm87xx_driver));
223}
224module_init(bcm87xx_init);
225
226static void __exit bcm87xx_exit(void)
227{
228 phy_drivers_unregister(bcm87xx_driver,
229 ARRAY_SIZE(bcm87xx_driver));
230}
231module_exit(bcm87xx_exit);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 60338ff6309..f8c90ea7510 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -682,7 +682,8 @@ static int brcm_fet_config_intr(struct phy_device *phydev)
682 return err; 682 return err;
683} 683}
684 684
685static struct phy_driver bcm5411_driver = { 685static struct phy_driver broadcom_drivers[] = {
686{
686 .phy_id = PHY_ID_BCM5411, 687 .phy_id = PHY_ID_BCM5411,
687 .phy_id_mask = 0xfffffff0, 688 .phy_id_mask = 0xfffffff0,
688 .name = "Broadcom BCM5411", 689 .name = "Broadcom BCM5411",
@@ -695,9 +696,7 @@ static struct phy_driver bcm5411_driver = {
695 .ack_interrupt = bcm54xx_ack_interrupt, 696 .ack_interrupt = bcm54xx_ack_interrupt,
696 .config_intr = bcm54xx_config_intr, 697 .config_intr = bcm54xx_config_intr,
697 .driver = { .owner = THIS_MODULE }, 698 .driver = { .owner = THIS_MODULE },
698}; 699}, {
699
700static struct phy_driver bcm5421_driver = {
701 .phy_id = PHY_ID_BCM5421, 700 .phy_id = PHY_ID_BCM5421,
702 .phy_id_mask = 0xfffffff0, 701 .phy_id_mask = 0xfffffff0,
703 .name = "Broadcom BCM5421", 702 .name = "Broadcom BCM5421",
@@ -710,9 +709,7 @@ static struct phy_driver bcm5421_driver = {
710 .ack_interrupt = bcm54xx_ack_interrupt, 709 .ack_interrupt = bcm54xx_ack_interrupt,
711 .config_intr = bcm54xx_config_intr, 710 .config_intr = bcm54xx_config_intr,
712 .driver = { .owner = THIS_MODULE }, 711 .driver = { .owner = THIS_MODULE },
713}; 712}, {
714
715static struct phy_driver bcm5461_driver = {
716 .phy_id = PHY_ID_BCM5461, 713 .phy_id = PHY_ID_BCM5461,
717 .phy_id_mask = 0xfffffff0, 714 .phy_id_mask = 0xfffffff0,
718 .name = "Broadcom BCM5461", 715 .name = "Broadcom BCM5461",
@@ -725,9 +722,7 @@ static struct phy_driver bcm5461_driver = {
725 .ack_interrupt = bcm54xx_ack_interrupt, 722 .ack_interrupt = bcm54xx_ack_interrupt,
726 .config_intr = bcm54xx_config_intr, 723 .config_intr = bcm54xx_config_intr,
727 .driver = { .owner = THIS_MODULE }, 724 .driver = { .owner = THIS_MODULE },
728}; 725}, {
729
730static struct phy_driver bcm5464_driver = {
731 .phy_id = PHY_ID_BCM5464, 726 .phy_id = PHY_ID_BCM5464,
732 .phy_id_mask = 0xfffffff0, 727 .phy_id_mask = 0xfffffff0,
733 .name = "Broadcom BCM5464", 728 .name = "Broadcom BCM5464",
@@ -740,9 +735,7 @@ static struct phy_driver bcm5464_driver = {
740 .ack_interrupt = bcm54xx_ack_interrupt, 735 .ack_interrupt = bcm54xx_ack_interrupt,
741 .config_intr = bcm54xx_config_intr, 736 .config_intr = bcm54xx_config_intr,
742 .driver = { .owner = THIS_MODULE }, 737 .driver = { .owner = THIS_MODULE },
743}; 738}, {
744
745static struct phy_driver bcm5481_driver = {
746 .phy_id = PHY_ID_BCM5481, 739 .phy_id = PHY_ID_BCM5481,
747 .phy_id_mask = 0xfffffff0, 740 .phy_id_mask = 0xfffffff0,
748 .name = "Broadcom BCM5481", 741 .name = "Broadcom BCM5481",
@@ -755,9 +748,7 @@ static struct phy_driver bcm5481_driver = {
755 .ack_interrupt = bcm54xx_ack_interrupt, 748 .ack_interrupt = bcm54xx_ack_interrupt,
756 .config_intr = bcm54xx_config_intr, 749 .config_intr = bcm54xx_config_intr,
757 .driver = { .owner = THIS_MODULE }, 750 .driver = { .owner = THIS_MODULE },
758}; 751}, {
759
760static struct phy_driver bcm5482_driver = {
761 .phy_id = PHY_ID_BCM5482, 752 .phy_id = PHY_ID_BCM5482,
762 .phy_id_mask = 0xfffffff0, 753 .phy_id_mask = 0xfffffff0,
763 .name = "Broadcom BCM5482", 754 .name = "Broadcom BCM5482",
@@ -770,9 +761,7 @@ static struct phy_driver bcm5482_driver = {
770 .ack_interrupt = bcm54xx_ack_interrupt, 761 .ack_interrupt = bcm54xx_ack_interrupt,
771 .config_intr = bcm54xx_config_intr, 762 .config_intr = bcm54xx_config_intr,
772 .driver = { .owner = THIS_MODULE }, 763 .driver = { .owner = THIS_MODULE },
773}; 764}, {
774
775static struct phy_driver bcm50610_driver = {
776 .phy_id = PHY_ID_BCM50610, 765 .phy_id = PHY_ID_BCM50610,
777 .phy_id_mask = 0xfffffff0, 766 .phy_id_mask = 0xfffffff0,
778 .name = "Broadcom BCM50610", 767 .name = "Broadcom BCM50610",
@@ -785,9 +774,7 @@ static struct phy_driver bcm50610_driver = {
785 .ack_interrupt = bcm54xx_ack_interrupt, 774 .ack_interrupt = bcm54xx_ack_interrupt,
786 .config_intr = bcm54xx_config_intr, 775 .config_intr = bcm54xx_config_intr,
787 .driver = { .owner = THIS_MODULE }, 776 .driver = { .owner = THIS_MODULE },
788}; 777}, {
789
790static struct phy_driver bcm50610m_driver = {
791 .phy_id = PHY_ID_BCM50610M, 778 .phy_id = PHY_ID_BCM50610M,
792 .phy_id_mask = 0xfffffff0, 779 .phy_id_mask = 0xfffffff0,
793 .name = "Broadcom BCM50610M", 780 .name = "Broadcom BCM50610M",
@@ -800,9 +787,7 @@ static struct phy_driver bcm50610m_driver = {
800 .ack_interrupt = bcm54xx_ack_interrupt, 787 .ack_interrupt = bcm54xx_ack_interrupt,
801 .config_intr = bcm54xx_config_intr, 788 .config_intr = bcm54xx_config_intr,
802 .driver = { .owner = THIS_MODULE }, 789 .driver = { .owner = THIS_MODULE },
803}; 790}, {
804
805static struct phy_driver bcm57780_driver = {
806 .phy_id = PHY_ID_BCM57780, 791 .phy_id = PHY_ID_BCM57780,
807 .phy_id_mask = 0xfffffff0, 792 .phy_id_mask = 0xfffffff0,
808 .name = "Broadcom BCM57780", 793 .name = "Broadcom BCM57780",
@@ -815,9 +800,7 @@ static struct phy_driver bcm57780_driver = {
815 .ack_interrupt = bcm54xx_ack_interrupt, 800 .ack_interrupt = bcm54xx_ack_interrupt,
816 .config_intr = bcm54xx_config_intr, 801 .config_intr = bcm54xx_config_intr,
817 .driver = { .owner = THIS_MODULE }, 802 .driver = { .owner = THIS_MODULE },
818}; 803}, {
819
820static struct phy_driver bcmac131_driver = {
821 .phy_id = PHY_ID_BCMAC131, 804 .phy_id = PHY_ID_BCMAC131,
822 .phy_id_mask = 0xfffffff0, 805 .phy_id_mask = 0xfffffff0,
823 .name = "Broadcom BCMAC131", 806 .name = "Broadcom BCMAC131",
@@ -830,9 +813,7 @@ static struct phy_driver bcmac131_driver = {
830 .ack_interrupt = brcm_fet_ack_interrupt, 813 .ack_interrupt = brcm_fet_ack_interrupt,
831 .config_intr = brcm_fet_config_intr, 814 .config_intr = brcm_fet_config_intr,
832 .driver = { .owner = THIS_MODULE }, 815 .driver = { .owner = THIS_MODULE },
833}; 816}, {
834
835static struct phy_driver bcm5241_driver = {
836 .phy_id = PHY_ID_BCM5241, 817 .phy_id = PHY_ID_BCM5241,
837 .phy_id_mask = 0xfffffff0, 818 .phy_id_mask = 0xfffffff0,
838 .name = "Broadcom BCM5241", 819 .name = "Broadcom BCM5241",
@@ -845,84 +826,18 @@ static struct phy_driver bcm5241_driver = {
845 .ack_interrupt = brcm_fet_ack_interrupt, 826 .ack_interrupt = brcm_fet_ack_interrupt,
846 .config_intr = brcm_fet_config_intr, 827 .config_intr = brcm_fet_config_intr,
847 .driver = { .owner = THIS_MODULE }, 828 .driver = { .owner = THIS_MODULE },
848}; 829} };
849 830
850static int __init broadcom_init(void) 831static int __init broadcom_init(void)
851{ 832{
852 int ret; 833 return phy_drivers_register(broadcom_drivers,
853 834 ARRAY_SIZE(broadcom_drivers));
854 ret = phy_driver_register(&bcm5411_driver);
855 if (ret)
856 goto out_5411;
857 ret = phy_driver_register(&bcm5421_driver);
858 if (ret)
859 goto out_5421;
860 ret = phy_driver_register(&bcm5461_driver);
861 if (ret)
862 goto out_5461;
863 ret = phy_driver_register(&bcm5464_driver);
864 if (ret)
865 goto out_5464;
866 ret = phy_driver_register(&bcm5481_driver);
867 if (ret)
868 goto out_5481;
869 ret = phy_driver_register(&bcm5482_driver);
870 if (ret)
871 goto out_5482;
872 ret = phy_driver_register(&bcm50610_driver);
873 if (ret)
874 goto out_50610;
875 ret = phy_driver_register(&bcm50610m_driver);
876 if (ret)
877 goto out_50610m;
878 ret = phy_driver_register(&bcm57780_driver);
879 if (ret)
880 goto out_57780;
881 ret = phy_driver_register(&bcmac131_driver);
882 if (ret)
883 goto out_ac131;
884 ret = phy_driver_register(&bcm5241_driver);
885 if (ret)
886 goto out_5241;
887 return ret;
888
889out_5241:
890 phy_driver_unregister(&bcmac131_driver);
891out_ac131:
892 phy_driver_unregister(&bcm57780_driver);
893out_57780:
894 phy_driver_unregister(&bcm50610m_driver);
895out_50610m:
896 phy_driver_unregister(&bcm50610_driver);
897out_50610:
898 phy_driver_unregister(&bcm5482_driver);
899out_5482:
900 phy_driver_unregister(&bcm5481_driver);
901out_5481:
902 phy_driver_unregister(&bcm5464_driver);
903out_5464:
904 phy_driver_unregister(&bcm5461_driver);
905out_5461:
906 phy_driver_unregister(&bcm5421_driver);
907out_5421:
908 phy_driver_unregister(&bcm5411_driver);
909out_5411:
910 return ret;
911} 835}
912 836
913static void __exit broadcom_exit(void) 837static void __exit broadcom_exit(void)
914{ 838{
915 phy_driver_unregister(&bcm5241_driver); 839 phy_drivers_unregister(broadcom_drivers,
916 phy_driver_unregister(&bcmac131_driver); 840 ARRAY_SIZE(broadcom_drivers));
917 phy_driver_unregister(&bcm57780_driver);
918 phy_driver_unregister(&bcm50610m_driver);
919 phy_driver_unregister(&bcm50610_driver);
920 phy_driver_unregister(&bcm5482_driver);
921 phy_driver_unregister(&bcm5481_driver);
922 phy_driver_unregister(&bcm5464_driver);
923 phy_driver_unregister(&bcm5461_driver);
924 phy_driver_unregister(&bcm5421_driver);
925 phy_driver_unregister(&bcm5411_driver);
926} 841}
927 842
928module_init(broadcom_init); 843module_init(broadcom_init);
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index d28173161c2..db472ffb6e8 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -102,7 +102,8 @@ static int cis820x_config_intr(struct phy_device *phydev)
102} 102}
103 103
104/* Cicada 8201, a.k.a Vitesse VSC8201 */ 104/* Cicada 8201, a.k.a Vitesse VSC8201 */
105static struct phy_driver cis8201_driver = { 105static struct phy_driver cis820x_driver[] = {
106{
106 .phy_id = 0x000fc410, 107 .phy_id = 0x000fc410,
107 .name = "Cicada Cis8201", 108 .name = "Cicada Cis8201",
108 .phy_id_mask = 0x000ffff0, 109 .phy_id_mask = 0x000ffff0,
@@ -113,11 +114,8 @@ static struct phy_driver cis8201_driver = {
113 .read_status = &genphy_read_status, 114 .read_status = &genphy_read_status,
114 .ack_interrupt = &cis820x_ack_interrupt, 115 .ack_interrupt = &cis820x_ack_interrupt,
115 .config_intr = &cis820x_config_intr, 116 .config_intr = &cis820x_config_intr,
116 .driver = { .owner = THIS_MODULE,}, 117 .driver = { .owner = THIS_MODULE,},
117}; 118}, {
118
119/* Cicada 8204 */
120static struct phy_driver cis8204_driver = {
121 .phy_id = 0x000fc440, 119 .phy_id = 0x000fc440,
122 .name = "Cicada Cis8204", 120 .name = "Cicada Cis8204",
123 .phy_id_mask = 0x000fffc0, 121 .phy_id_mask = 0x000fffc0,
@@ -128,32 +126,19 @@ static struct phy_driver cis8204_driver = {
128 .read_status = &genphy_read_status, 126 .read_status = &genphy_read_status,
129 .ack_interrupt = &cis820x_ack_interrupt, 127 .ack_interrupt = &cis820x_ack_interrupt,
130 .config_intr = &cis820x_config_intr, 128 .config_intr = &cis820x_config_intr,
131 .driver = { .owner = THIS_MODULE,}, 129 .driver = { .owner = THIS_MODULE,},
132}; 130} };
133 131
134static int __init cicada_init(void) 132static int __init cicada_init(void)
135{ 133{
136 int ret; 134 return phy_drivers_register(cis820x_driver,
137 135 ARRAY_SIZE(cis820x_driver));
138 ret = phy_driver_register(&cis8204_driver);
139 if (ret)
140 goto err1;
141
142 ret = phy_driver_register(&cis8201_driver);
143 if (ret)
144 goto err2;
145 return 0;
146
147err2:
148 phy_driver_unregister(&cis8204_driver);
149err1:
150 return ret;
151} 136}
152 137
153static void __exit cicada_exit(void) 138static void __exit cicada_exit(void)
154{ 139{
155 phy_driver_unregister(&cis8204_driver); 140 phy_drivers_unregister(cis820x_driver,
156 phy_driver_unregister(&cis8201_driver); 141 ARRAY_SIZE(cis820x_driver));
157} 142}
158 143
159module_init(cicada_init); 144module_init(cicada_init);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 5f59cc06477..81c7bc010dd 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -144,7 +144,8 @@ static int dm9161_ack_interrupt(struct phy_device *phydev)
144 return (err < 0) ? err : 0; 144 return (err < 0) ? err : 0;
145} 145}
146 146
147static struct phy_driver dm9161e_driver = { 147static struct phy_driver dm91xx_driver[] = {
148{
148 .phy_id = 0x0181b880, 149 .phy_id = 0x0181b880,
149 .name = "Davicom DM9161E", 150 .name = "Davicom DM9161E",
150 .phy_id_mask = 0x0ffffff0, 151 .phy_id_mask = 0x0ffffff0,
@@ -153,9 +154,7 @@ static struct phy_driver dm9161e_driver = {
153 .config_aneg = dm9161_config_aneg, 154 .config_aneg = dm9161_config_aneg,
154 .read_status = genphy_read_status, 155 .read_status = genphy_read_status,
155 .driver = { .owner = THIS_MODULE,}, 156 .driver = { .owner = THIS_MODULE,},
156}; 157}, {
157
158static struct phy_driver dm9161a_driver = {
159 .phy_id = 0x0181b8a0, 158 .phy_id = 0x0181b8a0,
160 .name = "Davicom DM9161A", 159 .name = "Davicom DM9161A",
161 .phy_id_mask = 0x0ffffff0, 160 .phy_id_mask = 0x0ffffff0,
@@ -164,9 +163,7 @@ static struct phy_driver dm9161a_driver = {
164 .config_aneg = dm9161_config_aneg, 163 .config_aneg = dm9161_config_aneg,
165 .read_status = genphy_read_status, 164 .read_status = genphy_read_status,
166 .driver = { .owner = THIS_MODULE,}, 165 .driver = { .owner = THIS_MODULE,},
167}; 166}, {
168
169static struct phy_driver dm9131_driver = {
170 .phy_id = 0x00181b80, 167 .phy_id = 0x00181b80,
171 .name = "Davicom DM9131", 168 .name = "Davicom DM9131",
172 .phy_id_mask = 0x0ffffff0, 169 .phy_id_mask = 0x0ffffff0,
@@ -177,38 +174,18 @@ static struct phy_driver dm9131_driver = {
177 .ack_interrupt = dm9161_ack_interrupt, 174 .ack_interrupt = dm9161_ack_interrupt,
178 .config_intr = dm9161_config_intr, 175 .config_intr = dm9161_config_intr,
179 .driver = { .owner = THIS_MODULE,}, 176 .driver = { .owner = THIS_MODULE,},
180}; 177} };
181 178
182static int __init davicom_init(void) 179static int __init davicom_init(void)
183{ 180{
184 int ret; 181 return phy_drivers_register(dm91xx_driver,
185 182 ARRAY_SIZE(dm91xx_driver));
186 ret = phy_driver_register(&dm9161e_driver);
187 if (ret)
188 goto err1;
189
190 ret = phy_driver_register(&dm9161a_driver);
191 if (ret)
192 goto err2;
193
194 ret = phy_driver_register(&dm9131_driver);
195 if (ret)
196 goto err3;
197 return 0;
198
199 err3:
200 phy_driver_unregister(&dm9161a_driver);
201 err2:
202 phy_driver_unregister(&dm9161e_driver);
203 err1:
204 return ret;
205} 183}
206 184
207static void __exit davicom_exit(void) 185static void __exit davicom_exit(void)
208{ 186{
209 phy_driver_unregister(&dm9161e_driver); 187 phy_drivers_unregister(dm91xx_driver,
210 phy_driver_unregister(&dm9161a_driver); 188 ARRAY_SIZE(dm91xx_driver));
211 phy_driver_unregister(&dm9131_driver);
212} 189}
213 190
214module_init(davicom_init); 191module_init(davicom_init);
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 940b29022d0..b0da0226661 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -17,6 +17,9 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 19 */
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
20#include <linux/ethtool.h> 23#include <linux/ethtool.h>
21#include <linux/kernel.h> 24#include <linux/kernel.h>
22#include <linux/list.h> 25#include <linux/list.h>
@@ -453,16 +456,16 @@ static void enable_status_frames(struct phy_device *phydev, bool on)
453 ext_write(0, phydev, PAGE6, PSF_CFG1, ver); 456 ext_write(0, phydev, PAGE6, PSF_CFG1, ver);
454 457
455 if (!phydev->attached_dev) { 458 if (!phydev->attached_dev) {
456 pr_warning("dp83640: expected to find an attached netdevice\n"); 459 pr_warn("expected to find an attached netdevice\n");
457 return; 460 return;
458 } 461 }
459 462
460 if (on) { 463 if (on) {
461 if (dev_mc_add(phydev->attached_dev, status_frame_dst)) 464 if (dev_mc_add(phydev->attached_dev, status_frame_dst))
462 pr_warning("dp83640: failed to add mc address\n"); 465 pr_warn("failed to add mc address\n");
463 } else { 466 } else {
464 if (dev_mc_del(phydev->attached_dev, status_frame_dst)) 467 if (dev_mc_del(phydev->attached_dev, status_frame_dst))
465 pr_warning("dp83640: failed to delete mc address\n"); 468 pr_warn("failed to delete mc address\n");
466 } 469 }
467} 470}
468 471
@@ -582,9 +585,9 @@ static void recalibrate(struct dp83640_clock *clock)
582 * read out and correct offsets 585 * read out and correct offsets
583 */ 586 */
584 val = ext_read(master, PAGE4, PTP_STS); 587 val = ext_read(master, PAGE4, PTP_STS);
585 pr_info("master PTP_STS 0x%04hx", val); 588 pr_info("master PTP_STS 0x%04hx\n", val);
586 val = ext_read(master, PAGE4, PTP_ESTS); 589 val = ext_read(master, PAGE4, PTP_ESTS);
587 pr_info("master PTP_ESTS 0x%04hx", val); 590 pr_info("master PTP_ESTS 0x%04hx\n", val);
588 event_ts.ns_lo = ext_read(master, PAGE4, PTP_EDATA); 591 event_ts.ns_lo = ext_read(master, PAGE4, PTP_EDATA);
589 event_ts.ns_hi = ext_read(master, PAGE4, PTP_EDATA); 592 event_ts.ns_hi = ext_read(master, PAGE4, PTP_EDATA);
590 event_ts.sec_lo = ext_read(master, PAGE4, PTP_EDATA); 593 event_ts.sec_lo = ext_read(master, PAGE4, PTP_EDATA);
@@ -594,9 +597,9 @@ static void recalibrate(struct dp83640_clock *clock)
594 list_for_each(this, &clock->phylist) { 597 list_for_each(this, &clock->phylist) {
595 tmp = list_entry(this, struct dp83640_private, list); 598 tmp = list_entry(this, struct dp83640_private, list);
596 val = ext_read(tmp->phydev, PAGE4, PTP_STS); 599 val = ext_read(tmp->phydev, PAGE4, PTP_STS);
597 pr_info("slave PTP_STS 0x%04hx", val); 600 pr_info("slave PTP_STS 0x%04hx\n", val);
598 val = ext_read(tmp->phydev, PAGE4, PTP_ESTS); 601 val = ext_read(tmp->phydev, PAGE4, PTP_ESTS);
599 pr_info("slave PTP_ESTS 0x%04hx", val); 602 pr_info("slave PTP_ESTS 0x%04hx\n", val);
600 event_ts.ns_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA); 603 event_ts.ns_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
601 event_ts.ns_hi = ext_read(tmp->phydev, PAGE4, PTP_EDATA); 604 event_ts.ns_hi = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
602 event_ts.sec_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA); 605 event_ts.sec_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
@@ -686,7 +689,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
686 prune_rx_ts(dp83640); 689 prune_rx_ts(dp83640);
687 690
688 if (list_empty(&dp83640->rxpool)) { 691 if (list_empty(&dp83640->rxpool)) {
689 pr_debug("dp83640: rx timestamp pool is empty\n"); 692 pr_debug("rx timestamp pool is empty\n");
690 goto out; 693 goto out;
691 } 694 }
692 rxts = list_first_entry(&dp83640->rxpool, struct rxts, list); 695 rxts = list_first_entry(&dp83640->rxpool, struct rxts, list);
@@ -709,7 +712,7 @@ static void decode_txts(struct dp83640_private *dp83640,
709 skb = skb_dequeue(&dp83640->tx_queue); 712 skb = skb_dequeue(&dp83640->tx_queue);
710 713
711 if (!skb) { 714 if (!skb) {
712 pr_debug("dp83640: have timestamp but tx_queue empty\n"); 715 pr_debug("have timestamp but tx_queue empty\n");
713 return; 716 return;
714 } 717 }
715 ns = phy2txts(phy_txts); 718 ns = phy2txts(phy_txts);
@@ -847,7 +850,7 @@ static void dp83640_free_clocks(void)
847 list_for_each_safe(this, next, &phyter_clocks) { 850 list_for_each_safe(this, next, &phyter_clocks) {
848 clock = list_entry(this, struct dp83640_clock, list); 851 clock = list_entry(this, struct dp83640_clock, list);
849 if (!list_empty(&clock->phylist)) { 852 if (!list_empty(&clock->phylist)) {
850 pr_warning("phy list non-empty while unloading"); 853 pr_warn("phy list non-empty while unloading\n");
851 BUG(); 854 BUG();
852 } 855 }
853 list_del(&clock->list); 856 list_del(&clock->list);
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index 633680d0828..ba55adfc7aa 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -70,7 +70,7 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
70 lpa |= LPA_10FULL; 70 lpa |= LPA_10FULL;
71 break; 71 break;
72 default: 72 default:
73 printk(KERN_WARNING "fixed phy: unknown speed\n"); 73 pr_warn("fixed phy: unknown speed\n");
74 return -EINVAL; 74 return -EINVAL;
75 } 75 }
76 } else { 76 } else {
@@ -90,7 +90,7 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
90 lpa |= LPA_10HALF; 90 lpa |= LPA_10HALF;
91 break; 91 break;
92 default: 92 default:
93 printk(KERN_WARNING "fixed phy: unknown speed\n"); 93 pr_warn("fixed phy: unknown speed\n");
94 return -EINVAL; 94 return -EINVAL;
95 } 95 }
96 } 96 }
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 47f8e893926..d5199cb4cae 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -202,7 +202,8 @@ static int ip101a_g_ack_interrupt(struct phy_device *phydev)
202 return 0; 202 return 0;
203} 203}
204 204
205static struct phy_driver ip175c_driver = { 205static struct phy_driver icplus_driver[] = {
206{
206 .phy_id = 0x02430d80, 207 .phy_id = 0x02430d80,
207 .name = "ICPlus IP175C", 208 .name = "ICPlus IP175C",
208 .phy_id_mask = 0x0ffffff0, 209 .phy_id_mask = 0x0ffffff0,
@@ -213,9 +214,7 @@ static struct phy_driver ip175c_driver = {
213 .suspend = genphy_suspend, 214 .suspend = genphy_suspend,
214 .resume = genphy_resume, 215 .resume = genphy_resume,
215 .driver = { .owner = THIS_MODULE,}, 216 .driver = { .owner = THIS_MODULE,},
216}; 217}, {
217
218static struct phy_driver ip1001_driver = {
219 .phy_id = 0x02430d90, 218 .phy_id = 0x02430d90,
220 .name = "ICPlus IP1001", 219 .name = "ICPlus IP1001",
221 .phy_id_mask = 0x0ffffff0, 220 .phy_id_mask = 0x0ffffff0,
@@ -227,9 +226,7 @@ static struct phy_driver ip1001_driver = {
227 .suspend = genphy_suspend, 226 .suspend = genphy_suspend,
228 .resume = genphy_resume, 227 .resume = genphy_resume,
229 .driver = { .owner = THIS_MODULE,}, 228 .driver = { .owner = THIS_MODULE,},
230}; 229}, {
231
232static struct phy_driver ip101a_g_driver = {
233 .phy_id = 0x02430c54, 230 .phy_id = 0x02430c54,
234 .name = "ICPlus IP101A/G", 231 .name = "ICPlus IP101A/G",
235 .phy_id_mask = 0x0ffffff0, 232 .phy_id_mask = 0x0ffffff0,
@@ -243,28 +240,18 @@ static struct phy_driver ip101a_g_driver = {
243 .suspend = genphy_suspend, 240 .suspend = genphy_suspend,
244 .resume = genphy_resume, 241 .resume = genphy_resume,
245 .driver = { .owner = THIS_MODULE,}, 242 .driver = { .owner = THIS_MODULE,},
246}; 243} };
247 244
248static int __init icplus_init(void) 245static int __init icplus_init(void)
249{ 246{
250 int ret = 0; 247 return phy_drivers_register(icplus_driver,
251 248 ARRAY_SIZE(icplus_driver));
252 ret = phy_driver_register(&ip1001_driver);
253 if (ret < 0)
254 return -ENODEV;
255
256 ret = phy_driver_register(&ip101a_g_driver);
257 if (ret < 0)
258 return -ENODEV;
259
260 return phy_driver_register(&ip175c_driver);
261} 249}
262 250
263static void __exit icplus_exit(void) 251static void __exit icplus_exit(void)
264{ 252{
265 phy_driver_unregister(&ip1001_driver); 253 phy_drivers_unregister(icplus_driver,
266 phy_driver_unregister(&ip101a_g_driver); 254 ARRAY_SIZE(icplus_driver));
267 phy_driver_unregister(&ip175c_driver);
268} 255}
269 256
270module_init(icplus_init); 257module_init(icplus_init);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 6f6e8b616a6..6d1e3fcc43e 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -149,7 +149,8 @@ static int lxt973_config_aneg(struct phy_device *phydev)
149 return phydev->priv ? 0 : genphy_config_aneg(phydev); 149 return phydev->priv ? 0 : genphy_config_aneg(phydev);
150} 150}
151 151
152static struct phy_driver lxt970_driver = { 152static struct phy_driver lxt97x_driver[] = {
153{
153 .phy_id = 0x78100000, 154 .phy_id = 0x78100000,
154 .name = "LXT970", 155 .name = "LXT970",
155 .phy_id_mask = 0xfffffff0, 156 .phy_id_mask = 0xfffffff0,
@@ -160,10 +161,8 @@ static struct phy_driver lxt970_driver = {
160 .read_status = genphy_read_status, 161 .read_status = genphy_read_status,
161 .ack_interrupt = lxt970_ack_interrupt, 162 .ack_interrupt = lxt970_ack_interrupt,
162 .config_intr = lxt970_config_intr, 163 .config_intr = lxt970_config_intr,
163 .driver = { .owner = THIS_MODULE,}, 164 .driver = { .owner = THIS_MODULE,},
164}; 165}, {
165
166static struct phy_driver lxt971_driver = {
167 .phy_id = 0x001378e0, 166 .phy_id = 0x001378e0,
168 .name = "LXT971", 167 .name = "LXT971",
169 .phy_id_mask = 0xfffffff0, 168 .phy_id_mask = 0xfffffff0,
@@ -173,10 +172,8 @@ static struct phy_driver lxt971_driver = {
173 .read_status = genphy_read_status, 172 .read_status = genphy_read_status,
174 .ack_interrupt = lxt971_ack_interrupt, 173 .ack_interrupt = lxt971_ack_interrupt,
175 .config_intr = lxt971_config_intr, 174 .config_intr = lxt971_config_intr,
176 .driver = { .owner = THIS_MODULE,}, 175 .driver = { .owner = THIS_MODULE,},
177}; 176}, {
178
179static struct phy_driver lxt973_driver = {
180 .phy_id = 0x00137a10, 177 .phy_id = 0x00137a10,
181 .name = "LXT973", 178 .name = "LXT973",
182 .phy_id_mask = 0xfffffff0, 179 .phy_id_mask = 0xfffffff0,
@@ -185,39 +182,19 @@ static struct phy_driver lxt973_driver = {
185 .probe = lxt973_probe, 182 .probe = lxt973_probe,
186 .config_aneg = lxt973_config_aneg, 183 .config_aneg = lxt973_config_aneg,
187 .read_status = genphy_read_status, 184 .read_status = genphy_read_status,
188 .driver = { .owner = THIS_MODULE,}, 185 .driver = { .owner = THIS_MODULE,},
189}; 186} };
190 187
191static int __init lxt_init(void) 188static int __init lxt_init(void)
192{ 189{
193 int ret; 190 return phy_drivers_register(lxt97x_driver,
194 191 ARRAY_SIZE(lxt97x_driver));
195 ret = phy_driver_register(&lxt970_driver);
196 if (ret)
197 goto err1;
198
199 ret = phy_driver_register(&lxt971_driver);
200 if (ret)
201 goto err2;
202
203 ret = phy_driver_register(&lxt973_driver);
204 if (ret)
205 goto err3;
206 return 0;
207
208 err3:
209 phy_driver_unregister(&lxt971_driver);
210 err2:
211 phy_driver_unregister(&lxt970_driver);
212 err1:
213 return ret;
214} 192}
215 193
216static void __exit lxt_exit(void) 194static void __exit lxt_exit(void)
217{ 195{
218 phy_driver_unregister(&lxt970_driver); 196 phy_drivers_unregister(lxt97x_driver,
219 phy_driver_unregister(&lxt971_driver); 197 ARRAY_SIZE(lxt97x_driver));
220 phy_driver_unregister(&lxt973_driver);
221} 198}
222 199
223module_init(lxt_init); 200module_init(lxt_init);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 418928d644b..5d2a3f21588 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -826,28 +826,14 @@ static struct phy_driver marvell_drivers[] = {
826 826
827static int __init marvell_init(void) 827static int __init marvell_init(void)
828{ 828{
829 int ret; 829 return phy_drivers_register(marvell_drivers,
830 int i; 830 ARRAY_SIZE(marvell_drivers));
831
832 for (i = 0; i < ARRAY_SIZE(marvell_drivers); i++) {
833 ret = phy_driver_register(&marvell_drivers[i]);
834
835 if (ret) {
836 while (i-- > 0)
837 phy_driver_unregister(&marvell_drivers[i]);
838 return ret;
839 }
840 }
841
842 return 0;
843} 831}
844 832
845static void __exit marvell_exit(void) 833static void __exit marvell_exit(void)
846{ 834{
847 int i; 835 phy_drivers_unregister(marvell_drivers,
848 836 ARRAY_SIZE(marvell_drivers));
849 for (i = 0; i < ARRAY_SIZE(marvell_drivers); i++)
850 phy_driver_unregister(&marvell_drivers[i]);
851} 837}
852 838
853module_init(marvell_init); 839module_init(marvell_init);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 5061608f408..170eb411ab5 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -13,6 +13,9 @@
13 * option) any later version. 13 * option) any later version.
14 * 14 *
15 */ 15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
16#include <linux/kernel.h> 19#include <linux/kernel.h>
17#include <linux/string.h> 20#include <linux/string.h>
18#include <linux/errno.h> 21#include <linux/errno.h>
@@ -22,6 +25,7 @@
22#include <linux/init.h> 25#include <linux/init.h>
23#include <linux/delay.h> 26#include <linux/delay.h>
24#include <linux/device.h> 27#include <linux/device.h>
28#include <linux/of_device.h>
25#include <linux/netdevice.h> 29#include <linux/netdevice.h>
26#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
27#include <linux/skbuff.h> 31#include <linux/skbuff.h>
@@ -148,7 +152,7 @@ int mdiobus_register(struct mii_bus *bus)
148 152
149 err = device_register(&bus->dev); 153 err = device_register(&bus->dev);
150 if (err) { 154 if (err) {
151 printk(KERN_ERR "mii_bus %s failed to register\n", bus->id); 155 pr_err("mii_bus %s failed to register\n", bus->id);
152 return -EINVAL; 156 return -EINVAL;
153 } 157 }
154 158
@@ -229,7 +233,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
229 struct phy_device *phydev; 233 struct phy_device *phydev;
230 int err; 234 int err;
231 235
232 phydev = get_phy_device(bus, addr); 236 phydev = get_phy_device(bus, addr, false);
233 if (IS_ERR(phydev) || phydev == NULL) 237 if (IS_ERR(phydev) || phydev == NULL)
234 return phydev; 238 return phydev;
235 239
@@ -305,6 +309,12 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
305 struct phy_device *phydev = to_phy_device(dev); 309 struct phy_device *phydev = to_phy_device(dev);
306 struct phy_driver *phydrv = to_phy_driver(drv); 310 struct phy_driver *phydrv = to_phy_driver(drv);
307 311
312 if (of_driver_match_device(dev, drv))
313 return 1;
314
315 if (phydrv->match_phy_device)
316 return phydrv->match_phy_device(phydev);
317
308 return ((phydrv->phy_id & phydrv->phy_id_mask) == 318 return ((phydrv->phy_id & phydrv->phy_id_mask) ==
309 (phydev->phy_id & phydrv->phy_id_mask)); 319 (phydev->phy_id & phydrv->phy_id_mask));
310} 320}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 9d6c80c8a0c..cf287e0eb40 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -114,7 +114,8 @@ static int ks8051_config_init(struct phy_device *phydev)
114 return 0; 114 return 0;
115} 115}
116 116
117static struct phy_driver ks8737_driver = { 117static struct phy_driver ksphy_driver[] = {
118{
118 .phy_id = PHY_ID_KS8737, 119 .phy_id = PHY_ID_KS8737,
119 .phy_id_mask = 0x00fffff0, 120 .phy_id_mask = 0x00fffff0,
120 .name = "Micrel KS8737", 121 .name = "Micrel KS8737",
@@ -126,9 +127,7 @@ static struct phy_driver ks8737_driver = {
126 .ack_interrupt = kszphy_ack_interrupt, 127 .ack_interrupt = kszphy_ack_interrupt,
127 .config_intr = ks8737_config_intr, 128 .config_intr = ks8737_config_intr,
128 .driver = { .owner = THIS_MODULE,}, 129 .driver = { .owner = THIS_MODULE,},
129}; 130}, {
130
131static struct phy_driver ks8041_driver = {
132 .phy_id = PHY_ID_KS8041, 131 .phy_id = PHY_ID_KS8041,
133 .phy_id_mask = 0x00fffff0, 132 .phy_id_mask = 0x00fffff0,
134 .name = "Micrel KS8041", 133 .name = "Micrel KS8041",
@@ -141,9 +140,7 @@ static struct phy_driver ks8041_driver = {
141 .ack_interrupt = kszphy_ack_interrupt, 140 .ack_interrupt = kszphy_ack_interrupt,
142 .config_intr = kszphy_config_intr, 141 .config_intr = kszphy_config_intr,
143 .driver = { .owner = THIS_MODULE,}, 142 .driver = { .owner = THIS_MODULE,},
144}; 143}, {
145
146static struct phy_driver ks8051_driver = {
147 .phy_id = PHY_ID_KS8051, 144 .phy_id = PHY_ID_KS8051,
148 .phy_id_mask = 0x00fffff0, 145 .phy_id_mask = 0x00fffff0,
149 .name = "Micrel KS8051", 146 .name = "Micrel KS8051",
@@ -156,9 +153,7 @@ static struct phy_driver ks8051_driver = {
156 .ack_interrupt = kszphy_ack_interrupt, 153 .ack_interrupt = kszphy_ack_interrupt,
157 .config_intr = kszphy_config_intr, 154 .config_intr = kszphy_config_intr,
158 .driver = { .owner = THIS_MODULE,}, 155 .driver = { .owner = THIS_MODULE,},
159}; 156}, {
160
161static struct phy_driver ks8001_driver = {
162 .phy_id = PHY_ID_KS8001, 157 .phy_id = PHY_ID_KS8001,
163 .name = "Micrel KS8001 or KS8721", 158 .name = "Micrel KS8001 or KS8721",
164 .phy_id_mask = 0x00ffffff, 159 .phy_id_mask = 0x00ffffff,
@@ -170,9 +165,7 @@ static struct phy_driver ks8001_driver = {
170 .ack_interrupt = kszphy_ack_interrupt, 165 .ack_interrupt = kszphy_ack_interrupt,
171 .config_intr = kszphy_config_intr, 166 .config_intr = kszphy_config_intr,
172 .driver = { .owner = THIS_MODULE,}, 167 .driver = { .owner = THIS_MODULE,},
173}; 168}, {
174
175static struct phy_driver ksz9021_driver = {
176 .phy_id = PHY_ID_KSZ9021, 169 .phy_id = PHY_ID_KSZ9021,
177 .phy_id_mask = 0x000ffffe, 170 .phy_id_mask = 0x000ffffe,
178 .name = "Micrel KSZ9021 Gigabit PHY", 171 .name = "Micrel KSZ9021 Gigabit PHY",
@@ -185,51 +178,18 @@ static struct phy_driver ksz9021_driver = {
185 .ack_interrupt = kszphy_ack_interrupt, 178 .ack_interrupt = kszphy_ack_interrupt,
186 .config_intr = ksz9021_config_intr, 179 .config_intr = ksz9021_config_intr,
187 .driver = { .owner = THIS_MODULE, }, 180 .driver = { .owner = THIS_MODULE, },
188}; 181} };
189 182
190static int __init ksphy_init(void) 183static int __init ksphy_init(void)
191{ 184{
192 int ret; 185 return phy_drivers_register(ksphy_driver,
193 186 ARRAY_SIZE(ksphy_driver));
194 ret = phy_driver_register(&ks8001_driver);
195 if (ret)
196 goto err1;
197
198 ret = phy_driver_register(&ksz9021_driver);
199 if (ret)
200 goto err2;
201
202 ret = phy_driver_register(&ks8737_driver);
203 if (ret)
204 goto err3;
205 ret = phy_driver_register(&ks8041_driver);
206 if (ret)
207 goto err4;
208 ret = phy_driver_register(&ks8051_driver);
209 if (ret)
210 goto err5;
211
212 return 0;
213
214err5:
215 phy_driver_unregister(&ks8041_driver);
216err4:
217 phy_driver_unregister(&ks8737_driver);
218err3:
219 phy_driver_unregister(&ksz9021_driver);
220err2:
221 phy_driver_unregister(&ks8001_driver);
222err1:
223 return ret;
224} 187}
225 188
226static void __exit ksphy_exit(void) 189static void __exit ksphy_exit(void)
227{ 190{
228 phy_driver_unregister(&ks8001_driver); 191 phy_drivers_unregister(ksphy_driver,
229 phy_driver_unregister(&ks8737_driver); 192 ARRAY_SIZE(ksphy_driver));
230 phy_driver_unregister(&ksz9021_driver);
231 phy_driver_unregister(&ks8041_driver);
232 phy_driver_unregister(&ks8051_driver);
233} 193}
234 194
235module_init(ksphy_init); 195module_init(ksphy_init);
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 04bb8fcc0cb..9a5f234d95b 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -15,6 +15,8 @@
15 * 15 *
16 */ 16 */
17 17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
18#include <linux/kernel.h> 20#include <linux/kernel.h>
19#include <linux/module.h> 21#include <linux/module.h>
20#include <linux/mii.h> 22#include <linux/mii.h>
@@ -22,6 +24,8 @@
22#include <linux/phy.h> 24#include <linux/phy.h>
23#include <linux/netdevice.h> 25#include <linux/netdevice.h>
24 26
27#define DEBUG
28
25/* DP83865 phy identifier values */ 29/* DP83865 phy identifier values */
26#define DP83865_PHY_ID 0x20005c7a 30#define DP83865_PHY_ID 0x20005c7a
27 31
@@ -112,8 +116,8 @@ static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
112 ns_exp_write(phydev, 0x1c0, 116 ns_exp_write(phydev, 0x1c0,
113 ns_exp_read(phydev, 0x1c0) & 0xfffe); 117 ns_exp_read(phydev, 0x1c0) & 0xfffe);
114 118
115 printk(KERN_DEBUG "DP83865 PHY: 10BASE-T HDX loopback %s\n", 119 pr_debug("10BASE-T HDX loopback %s\n",
116 (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on"); 120 (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
117} 121}
118 122
119static int ns_config_init(struct phy_device *phydev) 123static int ns_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3cbda0851f8..7ca2ff97c36 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -15,6 +15,9 @@
15 * option) any later version. 15 * option) any later version.
16 * 16 *
17 */ 17 */
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
18#include <linux/kernel.h> 21#include <linux/kernel.h>
19#include <linux/string.h> 22#include <linux/string.h>
20#include <linux/errno.h> 23#include <linux/errno.h>
@@ -32,6 +35,7 @@
32#include <linux/phy.h> 35#include <linux/phy.h>
33#include <linux/timer.h> 36#include <linux/timer.h>
34#include <linux/workqueue.h> 37#include <linux/workqueue.h>
38#include <linux/mdio.h>
35 39
36#include <linux/atomic.h> 40#include <linux/atomic.h>
37#include <asm/io.h> 41#include <asm/io.h>
@@ -44,18 +48,16 @@
44 */ 48 */
45void phy_print_status(struct phy_device *phydev) 49void phy_print_status(struct phy_device *phydev)
46{ 50{
47 pr_info("PHY: %s - Link is %s", dev_name(&phydev->dev),
48 phydev->link ? "Up" : "Down");
49 if (phydev->link) 51 if (phydev->link)
50 printk(KERN_CONT " - %d/%s", phydev->speed, 52 pr_info("%s - Link is Up - %d/%s\n",
51 DUPLEX_FULL == phydev->duplex ? 53 dev_name(&phydev->dev),
52 "Full" : "Half"); 54 phydev->speed,
53 55 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
54 printk(KERN_CONT "\n"); 56 else
57 pr_info("%s - Link is Down\n", dev_name(&phydev->dev));
55} 58}
56EXPORT_SYMBOL(phy_print_status); 59EXPORT_SYMBOL(phy_print_status);
57 60
58
59/** 61/**
60 * phy_clear_interrupt - Ack the phy device's interrupt 62 * phy_clear_interrupt - Ack the phy device's interrupt
61 * @phydev: the phy_device struct 63 * @phydev: the phy_device struct
@@ -482,9 +484,8 @@ static void phy_force_reduction(struct phy_device *phydev)
482 phydev->speed = settings[idx].speed; 484 phydev->speed = settings[idx].speed;
483 phydev->duplex = settings[idx].duplex; 485 phydev->duplex = settings[idx].duplex;
484 486
485 pr_info("Trying %d/%s\n", phydev->speed, 487 pr_info("Trying %d/%s\n",
486 DUPLEX_FULL == phydev->duplex ? 488 phydev->speed, DUPLEX_FULL == phydev->duplex ? "FULL" : "HALF");
487 "FULL" : "HALF");
488} 489}
489 490
490 491
@@ -598,9 +599,8 @@ int phy_start_interrupts(struct phy_device *phydev)
598 IRQF_SHARED, 599 IRQF_SHARED,
599 "phy_interrupt", 600 "phy_interrupt",
600 phydev) < 0) { 601 phydev) < 0) {
601 printk(KERN_WARNING "%s: Can't get IRQ %d (PHY)\n", 602 pr_warn("%s: Can't get IRQ %d (PHY)\n",
602 phydev->bus->name, 603 phydev->bus->name, phydev->irq);
603 phydev->irq);
604 phydev->irq = PHY_POLL; 604 phydev->irq = PHY_POLL;
605 return 0; 605 return 0;
606 } 606 }
@@ -838,10 +838,10 @@ void phy_state_machine(struct work_struct *work)
838 838
839 phydev->autoneg = AUTONEG_DISABLE; 839 phydev->autoneg = AUTONEG_DISABLE;
840 840
841 pr_info("Trying %d/%s\n", phydev->speed, 841 pr_info("Trying %d/%s\n",
842 DUPLEX_FULL == 842 phydev->speed,
843 phydev->duplex ? 843 DUPLEX_FULL == phydev->duplex ?
844 "FULL" : "HALF"); 844 "FULL" : "HALF");
845 } 845 }
846 break; 846 break;
847 case PHY_NOLINK: 847 case PHY_NOLINK:
@@ -968,3 +968,283 @@ void phy_state_machine(struct work_struct *work)
968 968
969 schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ); 969 schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ);
970} 970}
971
972static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
973 int addr)
974{
975 /* Write the desired MMD Devad */
976 bus->write(bus, addr, MII_MMD_CTRL, devad);
977
978 /* Write the desired MMD register address */
979 bus->write(bus, addr, MII_MMD_DATA, prtad);
980
981 /* Select the Function : DATA with no post increment */
982 bus->write(bus, addr, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
983}
984
985/**
986 * phy_read_mmd_indirect - reads data from the MMD registers
987 * @bus: the target MII bus
988 * @prtad: MMD Address
989 * @devad: MMD DEVAD
990 * @addr: PHY address on the MII bus
991 *
992 * Description: it reads data from the MMD registers (clause 22 to access to
993 * clause 45) of the specified phy address.
994 * To read these register we have:
995 * 1) Write reg 13 // DEVAD
996 * 2) Write reg 14 // MMD Address
997 * 3) Write reg 13 // MMD Data Command for MMD DEVAD
998 * 3) Read reg 14 // Read MMD data
999 */
1000static int phy_read_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
1001 int addr)
1002{
1003 u32 ret;
1004
1005 mmd_phy_indirect(bus, prtad, devad, addr);
1006
1007 /* Read the content of the MMD's selected register */
1008 ret = bus->read(bus, addr, MII_MMD_DATA);
1009
1010 return ret;
1011}
1012
1013/**
1014 * phy_write_mmd_indirect - writes data to the MMD registers
1015 * @bus: the target MII bus
1016 * @prtad: MMD Address
1017 * @devad: MMD DEVAD
1018 * @addr: PHY address on the MII bus
1019 * @data: data to write in the MMD register
1020 *
1021 * Description: Write data from the MMD registers of the specified
1022 * phy address.
1023 * To write these register we have:
1024 * 1) Write reg 13 // DEVAD
1025 * 2) Write reg 14 // MMD Address
1026 * 3) Write reg 13 // MMD Data Command for MMD DEVAD
1027 * 3) Write reg 14 // Write MMD data
1028 */
1029static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
1030 int addr, u32 data)
1031{
1032 mmd_phy_indirect(bus, prtad, devad, addr);
1033
1034 /* Write the data into MMD's selected register */
1035 bus->write(bus, addr, MII_MMD_DATA, data);
1036}
1037
1038static u32 phy_eee_to_adv(u16 eee_adv)
1039{
1040 u32 adv = 0;
1041
1042 if (eee_adv & MDIO_EEE_100TX)
1043 adv |= ADVERTISED_100baseT_Full;
1044 if (eee_adv & MDIO_EEE_1000T)
1045 adv |= ADVERTISED_1000baseT_Full;
1046 if (eee_adv & MDIO_EEE_10GT)
1047 adv |= ADVERTISED_10000baseT_Full;
1048 if (eee_adv & MDIO_EEE_1000KX)
1049 adv |= ADVERTISED_1000baseKX_Full;
1050 if (eee_adv & MDIO_EEE_10GKX4)
1051 adv |= ADVERTISED_10000baseKX4_Full;
1052 if (eee_adv & MDIO_EEE_10GKR)
1053 adv |= ADVERTISED_10000baseKR_Full;
1054
1055 return adv;
1056}
1057
1058static u32 phy_eee_to_supported(u16 eee_caported)
1059{
1060 u32 supported = 0;
1061
1062 if (eee_caported & MDIO_EEE_100TX)
1063 supported |= SUPPORTED_100baseT_Full;
1064 if (eee_caported & MDIO_EEE_1000T)
1065 supported |= SUPPORTED_1000baseT_Full;
1066 if (eee_caported & MDIO_EEE_10GT)
1067 supported |= SUPPORTED_10000baseT_Full;
1068 if (eee_caported & MDIO_EEE_1000KX)
1069 supported |= SUPPORTED_1000baseKX_Full;
1070 if (eee_caported & MDIO_EEE_10GKX4)
1071 supported |= SUPPORTED_10000baseKX4_Full;
1072 if (eee_caported & MDIO_EEE_10GKR)
1073 supported |= SUPPORTED_10000baseKR_Full;
1074
1075 return supported;
1076}
1077
1078static u16 phy_adv_to_eee(u32 adv)
1079{
1080 u16 reg = 0;
1081
1082 if (adv & ADVERTISED_100baseT_Full)
1083 reg |= MDIO_EEE_100TX;
1084 if (adv & ADVERTISED_1000baseT_Full)
1085 reg |= MDIO_EEE_1000T;
1086 if (adv & ADVERTISED_10000baseT_Full)
1087 reg |= MDIO_EEE_10GT;
1088 if (adv & ADVERTISED_1000baseKX_Full)
1089 reg |= MDIO_EEE_1000KX;
1090 if (adv & ADVERTISED_10000baseKX4_Full)
1091 reg |= MDIO_EEE_10GKX4;
1092 if (adv & ADVERTISED_10000baseKR_Full)
1093 reg |= MDIO_EEE_10GKR;
1094
1095 return reg;
1096}
1097
1098/**
1099 * phy_init_eee - init and check the EEE feature
1100 * @phydev: target phy_device struct
1101 * @clk_stop_enable: PHY may stop the clock during LPI
1102 *
1103 * Description: it checks if the Energy-Efficient Ethernet (EEE)
1104 * is supported by looking at the MMD registers 3.20 and 7.60/61
1105 * and it programs the MMD register 3.0 setting the "Clock stop enable"
1106 * bit if required.
1107 */
1108int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1109{
1110 int ret = -EPROTONOSUPPORT;
1111
1112 /* According to 802.3az,the EEE is supported only in full duplex-mode.
1113 * Also EEE feature is active when core is operating with MII, GMII
1114 * or RGMII.
1115 */
1116 if ((phydev->duplex == DUPLEX_FULL) &&
1117 ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
1118 (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
1119 (phydev->interface == PHY_INTERFACE_MODE_RGMII))) {
1120 int eee_lp, eee_cap, eee_adv;
1121 u32 lp, cap, adv;
1122 int idx, status;
1123
1124 /* Read phy status to properly get the right settings */
1125 status = phy_read_status(phydev);
1126 if (status)
1127 return status;
1128
1129 /* First check if the EEE ability is supported */
1130 eee_cap = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE,
1131 MDIO_MMD_PCS, phydev->addr);
1132 if (eee_cap < 0)
1133 return eee_cap;
1134
1135 cap = phy_eee_to_supported(eee_cap);
1136 if (!cap)
1137 goto eee_exit;
1138
1139 /* Check which link settings negotiated and verify it in
1140 * the EEE advertising registers.
1141 */
1142 eee_lp = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
1143 MDIO_MMD_AN, phydev->addr);
1144 if (eee_lp < 0)
1145 return eee_lp;
1146
1147 eee_adv = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
1148 MDIO_MMD_AN, phydev->addr);
1149 if (eee_adv < 0)
1150 return eee_adv;
1151
1152 adv = phy_eee_to_adv(eee_adv);
1153 lp = phy_eee_to_adv(eee_lp);
1154 idx = phy_find_setting(phydev->speed, phydev->duplex);
1155 if ((lp & adv & settings[idx].setting))
1156 goto eee_exit;
1157
1158 if (clk_stop_enable) {
1159 /* Configure the PHY to stop receiving xMII
1160 * clock while it is signaling LPI.
1161 */
1162 int val = phy_read_mmd_indirect(phydev->bus, MDIO_CTRL1,
1163 MDIO_MMD_PCS,
1164 phydev->addr);
1165 if (val < 0)
1166 return val;
1167
1168 val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
1169 phy_write_mmd_indirect(phydev->bus, MDIO_CTRL1,
1170 MDIO_MMD_PCS, phydev->addr, val);
1171 }
1172
1173 ret = 0; /* EEE supported */
1174 }
1175
1176eee_exit:
1177 return ret;
1178}
1179EXPORT_SYMBOL(phy_init_eee);
1180
1181/**
1182 * phy_get_eee_err - report the EEE wake error count
1183 * @phydev: target phy_device struct
1184 *
1185 * Description: it is to report the number of time where the PHY
1186 * failed to complete its normal wake sequence.
1187 */
1188int phy_get_eee_err(struct phy_device *phydev)
1189{
1190 return phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_WK_ERR,
1191 MDIO_MMD_PCS, phydev->addr);
1192
1193}
1194EXPORT_SYMBOL(phy_get_eee_err);
1195
1196/**
1197 * phy_ethtool_get_eee - get EEE supported and status
1198 * @phydev: target phy_device struct
1199 * @data: ethtool_eee data
1200 *
1201 * Description: it reportes the Supported/Advertisement/LP Advertisement
1202 * capabilities.
1203 */
1204int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
1205{
1206 int val;
1207
1208 /* Get Supported EEE */
1209 val = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE,
1210 MDIO_MMD_PCS, phydev->addr);
1211 if (val < 0)
1212 return val;
1213 data->supported = phy_eee_to_supported(val);
1214
1215 /* Get advertisement EEE */
1216 val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
1217 MDIO_MMD_AN, phydev->addr);
1218 if (val < 0)
1219 return val;
1220 data->advertised = phy_eee_to_adv(val);
1221
1222 /* Get LP advertisement EEE */
1223 val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
1224 MDIO_MMD_AN, phydev->addr);
1225 if (val < 0)
1226 return val;
1227 data->lp_advertised = phy_eee_to_adv(val);
1228
1229 return 0;
1230}
1231EXPORT_SYMBOL(phy_ethtool_get_eee);
1232
1233/**
1234 * phy_ethtool_set_eee - set EEE supported and status
1235 * @phydev: target phy_device struct
1236 * @data: ethtool_eee data
1237 *
1238 * Description: it is to program the Advertisement EEE register.
1239 */
1240int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
1241{
1242 int val;
1243
1244 val = phy_adv_to_eee(data->advertised);
1245 phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
1246 phydev->addr, val);
1247
1248 return 0;
1249}
1250EXPORT_SYMBOL(phy_ethtool_set_eee);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index de86a558222..8af46e88a18 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -14,6 +14,9 @@
14 * option) any later version. 14 * option) any later version.
15 * 15 *
16 */ 16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
17#include <linux/kernel.h> 20#include <linux/kernel.h>
18#include <linux/string.h> 21#include <linux/string.h>
19#include <linux/errno.h> 22#include <linux/errno.h>
@@ -149,8 +152,8 @@ int phy_scan_fixups(struct phy_device *phydev)
149} 152}
150EXPORT_SYMBOL(phy_scan_fixups); 153EXPORT_SYMBOL(phy_scan_fixups);
151 154
152static struct phy_device* phy_device_create(struct mii_bus *bus, 155struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
153 int addr, int phy_id) 156 bool is_c45, struct phy_c45_device_ids *c45_ids)
154{ 157{
155 struct phy_device *dev; 158 struct phy_device *dev;
156 159
@@ -171,8 +174,11 @@ static struct phy_device* phy_device_create(struct mii_bus *bus,
171 174
172 dev->autoneg = AUTONEG_ENABLE; 175 dev->autoneg = AUTONEG_ENABLE;
173 176
177 dev->is_c45 = is_c45;
174 dev->addr = addr; 178 dev->addr = addr;
175 dev->phy_id = phy_id; 179 dev->phy_id = phy_id;
180 if (c45_ids)
181 dev->c45_ids = *c45_ids;
176 dev->bus = bus; 182 dev->bus = bus;
177 dev->dev.parent = bus->parent; 183 dev->dev.parent = bus->parent;
178 dev->dev.bus = &mdio_bus_type; 184 dev->dev.bus = &mdio_bus_type;
@@ -197,20 +203,99 @@ static struct phy_device* phy_device_create(struct mii_bus *bus,
197 203
198 return dev; 204 return dev;
199} 205}
206EXPORT_SYMBOL(phy_device_create);
207
208/**
209 * get_phy_c45_ids - reads the specified addr for its 802.3-c45 IDs.
210 * @bus: the target MII bus
211 * @addr: PHY address on the MII bus
212 * @phy_id: where to store the ID retrieved.
213 * @c45_ids: where to store the c45 ID information.
214 *
215 * If the PHY devices-in-package appears to be valid, it and the
216 * corresponding identifiers are stored in @c45_ids, zero is stored
217 * in @phy_id. Otherwise 0xffffffff is stored in @phy_id. Returns
218 * zero on success.
219 *
220 */
221static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
222 struct phy_c45_device_ids *c45_ids) {
223 int phy_reg;
224 int i, reg_addr;
225 const int num_ids = ARRAY_SIZE(c45_ids->device_ids);
226
227 /* Find first non-zero Devices In package. Device
228 * zero is reserved, so don't probe it.
229 */
230 for (i = 1;
231 i < num_ids && c45_ids->devices_in_package == 0;
232 i++) {
233 reg_addr = MII_ADDR_C45 | i << 16 | 6;
234 phy_reg = mdiobus_read(bus, addr, reg_addr);
235 if (phy_reg < 0)
236 return -EIO;
237 c45_ids->devices_in_package = (phy_reg & 0xffff) << 16;
238
239 reg_addr = MII_ADDR_C45 | i << 16 | 5;
240 phy_reg = mdiobus_read(bus, addr, reg_addr);
241 if (phy_reg < 0)
242 return -EIO;
243 c45_ids->devices_in_package |= (phy_reg & 0xffff);
244
245 /* If mostly Fs, there is no device there,
246 * let's get out of here.
247 */
248 if ((c45_ids->devices_in_package & 0x1fffffff) == 0x1fffffff) {
249 *phy_id = 0xffffffff;
250 return 0;
251 }
252 }
253
254 /* Now probe Device Identifiers for each device present. */
255 for (i = 1; i < num_ids; i++) {
256 if (!(c45_ids->devices_in_package & (1 << i)))
257 continue;
258
259 reg_addr = MII_ADDR_C45 | i << 16 | MII_PHYSID1;
260 phy_reg = mdiobus_read(bus, addr, reg_addr);
261 if (phy_reg < 0)
262 return -EIO;
263 c45_ids->device_ids[i] = (phy_reg & 0xffff) << 16;
264
265 reg_addr = MII_ADDR_C45 | i << 16 | MII_PHYSID2;
266 phy_reg = mdiobus_read(bus, addr, reg_addr);
267 if (phy_reg < 0)
268 return -EIO;
269 c45_ids->device_ids[i] |= (phy_reg & 0xffff);
270 }
271 *phy_id = 0;
272 return 0;
273}
200 274
201/** 275/**
202 * get_phy_id - reads the specified addr for its ID. 276 * get_phy_id - reads the specified addr for its ID.
203 * @bus: the target MII bus 277 * @bus: the target MII bus
204 * @addr: PHY address on the MII bus 278 * @addr: PHY address on the MII bus
205 * @phy_id: where to store the ID retrieved. 279 * @phy_id: where to store the ID retrieved.
280 * @is_c45: If true the PHY uses the 802.3 clause 45 protocol
281 * @c45_ids: where to store the c45 ID information.
282 *
283 * Description: In the case of a 802.3-c22 PHY, reads the ID registers
284 * of the PHY at @addr on the @bus, stores it in @phy_id and returns
285 * zero on success.
286 *
287 * In the case of a 802.3-c45 PHY, get_phy_c45_ids() is invoked, and
288 * its return value is in turn returned.
206 * 289 *
207 * Description: Reads the ID registers of the PHY at @addr on the
208 * @bus, stores it in @phy_id and returns zero on success.
209 */ 290 */
210static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id) 291static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
292 bool is_c45, struct phy_c45_device_ids *c45_ids)
211{ 293{
212 int phy_reg; 294 int phy_reg;
213 295
296 if (is_c45)
297 return get_phy_c45_ids(bus, addr, phy_id, c45_ids);
298
214 /* Grab the bits from PHYIR1, and put them 299 /* Grab the bits from PHYIR1, and put them
215 * in the upper half */ 300 * in the upper half */
216 phy_reg = mdiobus_read(bus, addr, MII_PHYSID1); 301 phy_reg = mdiobus_read(bus, addr, MII_PHYSID1);
@@ -235,17 +320,19 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
235 * get_phy_device - reads the specified PHY device and returns its @phy_device struct 320 * get_phy_device - reads the specified PHY device and returns its @phy_device struct
236 * @bus: the target MII bus 321 * @bus: the target MII bus
237 * @addr: PHY address on the MII bus 322 * @addr: PHY address on the MII bus
323 * @is_c45: If true the PHY uses the 802.3 clause 45 protocol
238 * 324 *
239 * Description: Reads the ID registers of the PHY at @addr on the 325 * Description: Reads the ID registers of the PHY at @addr on the
240 * @bus, then allocates and returns the phy_device to represent it. 326 * @bus, then allocates and returns the phy_device to represent it.
241 */ 327 */
242struct phy_device * get_phy_device(struct mii_bus *bus, int addr) 328struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
243{ 329{
330 struct phy_c45_device_ids c45_ids = {0};
244 struct phy_device *dev = NULL; 331 struct phy_device *dev = NULL;
245 u32 phy_id; 332 u32 phy_id = 0;
246 int r; 333 int r;
247 334
248 r = get_phy_id(bus, addr, &phy_id); 335 r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
249 if (r) 336 if (r)
250 return ERR_PTR(r); 337 return ERR_PTR(r);
251 338
@@ -253,7 +340,7 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
253 if ((phy_id & 0x1fffffff) == 0x1fffffff) 340 if ((phy_id & 0x1fffffff) == 0x1fffffff)
254 return NULL; 341 return NULL;
255 342
256 dev = phy_device_create(bus, addr, phy_id); 343 dev = phy_device_create(bus, addr, phy_id, is_c45, &c45_ids);
257 344
258 return dev; 345 return dev;
259} 346}
@@ -446,6 +533,11 @@ static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
446 /* Assume that if there is no driver, that it doesn't 533 /* Assume that if there is no driver, that it doesn't
447 * exist, and we should use the genphy driver. */ 534 * exist, and we should use the genphy driver. */
448 if (NULL == d->driver) { 535 if (NULL == d->driver) {
536 if (phydev->is_c45) {
537 pr_err("No driver for phy %x\n", phydev->phy_id);
538 return -ENODEV;
539 }
540
449 d->driver = &genphy_driver.driver; 541 d->driver = &genphy_driver.driver;
450 542
451 err = d->driver->probe(d); 543 err = d->driver->probe(d);
@@ -975,8 +1067,8 @@ int phy_driver_register(struct phy_driver *new_driver)
975 retval = driver_register(&new_driver->driver); 1067 retval = driver_register(&new_driver->driver);
976 1068
977 if (retval) { 1069 if (retval) {
978 printk(KERN_ERR "%s: Error %d in registering driver\n", 1070 pr_err("%s: Error %d in registering driver\n",
979 new_driver->name, retval); 1071 new_driver->name, retval);
980 1072
981 return retval; 1073 return retval;
982 } 1074 }
@@ -987,12 +1079,37 @@ int phy_driver_register(struct phy_driver *new_driver)
987} 1079}
988EXPORT_SYMBOL(phy_driver_register); 1080EXPORT_SYMBOL(phy_driver_register);
989 1081
1082int phy_drivers_register(struct phy_driver *new_driver, int n)
1083{
1084 int i, ret = 0;
1085
1086 for (i = 0; i < n; i++) {
1087 ret = phy_driver_register(new_driver + i);
1088 if (ret) {
1089 while (i-- > 0)
1090 phy_driver_unregister(new_driver + i);
1091 break;
1092 }
1093 }
1094 return ret;
1095}
1096EXPORT_SYMBOL(phy_drivers_register);
1097
990void phy_driver_unregister(struct phy_driver *drv) 1098void phy_driver_unregister(struct phy_driver *drv)
991{ 1099{
992 driver_unregister(&drv->driver); 1100 driver_unregister(&drv->driver);
993} 1101}
994EXPORT_SYMBOL(phy_driver_unregister); 1102EXPORT_SYMBOL(phy_driver_unregister);
995 1103
1104void phy_drivers_unregister(struct phy_driver *drv, int n)
1105{
1106 int i;
1107 for (i = 0; i < n; i++) {
1108 phy_driver_unregister(drv + i);
1109 }
1110}
1111EXPORT_SYMBOL(phy_drivers_unregister);
1112
996static struct phy_driver genphy_driver = { 1113static struct phy_driver genphy_driver = {
997 .phy_id = 0xffffffff, 1114 .phy_id = 0xffffffff,
998 .phy_id_mask = 0xffffffff, 1115 .phy_id_mask = 0xffffffff,
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index f414ffb5b72..72f93470ea3 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -65,11 +65,7 @@ static struct phy_driver rtl821x_driver = {
65 65
66static int __init realtek_init(void) 66static int __init realtek_init(void)
67{ 67{
68 int ret; 68 return phy_driver_register(&rtl821x_driver);
69
70 ret = phy_driver_register(&rtl821x_driver);
71
72 return ret;
73} 69}
74 70
75static void __exit realtek_exit(void) 71static void __exit realtek_exit(void)
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index fc3e7e96c88..c6b06d311fe 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -61,7 +61,8 @@ static int lan911x_config_init(struct phy_device *phydev)
61 return smsc_phy_ack_interrupt(phydev); 61 return smsc_phy_ack_interrupt(phydev);
62} 62}
63 63
64static struct phy_driver lan83c185_driver = { 64static struct phy_driver smsc_phy_driver[] = {
65{
65 .phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */ 66 .phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
66 .phy_id_mask = 0xfffffff0, 67 .phy_id_mask = 0xfffffff0,
67 .name = "SMSC LAN83C185", 68 .name = "SMSC LAN83C185",
@@ -83,9 +84,7 @@ static struct phy_driver lan83c185_driver = {
83 .resume = genphy_resume, 84 .resume = genphy_resume,
84 85
85 .driver = { .owner = THIS_MODULE, } 86 .driver = { .owner = THIS_MODULE, }
86}; 87}, {
87
88static struct phy_driver lan8187_driver = {
89 .phy_id = 0x0007c0b0, /* OUI=0x00800f, Model#=0x0b */ 88 .phy_id = 0x0007c0b0, /* OUI=0x00800f, Model#=0x0b */
90 .phy_id_mask = 0xfffffff0, 89 .phy_id_mask = 0xfffffff0,
91 .name = "SMSC LAN8187", 90 .name = "SMSC LAN8187",
@@ -107,9 +106,7 @@ static struct phy_driver lan8187_driver = {
107 .resume = genphy_resume, 106 .resume = genphy_resume,
108 107
109 .driver = { .owner = THIS_MODULE, } 108 .driver = { .owner = THIS_MODULE, }
110}; 109}, {
111
112static struct phy_driver lan8700_driver = {
113 .phy_id = 0x0007c0c0, /* OUI=0x00800f, Model#=0x0c */ 110 .phy_id = 0x0007c0c0, /* OUI=0x00800f, Model#=0x0c */
114 .phy_id_mask = 0xfffffff0, 111 .phy_id_mask = 0xfffffff0,
115 .name = "SMSC LAN8700", 112 .name = "SMSC LAN8700",
@@ -131,9 +128,7 @@ static struct phy_driver lan8700_driver = {
131 .resume = genphy_resume, 128 .resume = genphy_resume,
132 129
133 .driver = { .owner = THIS_MODULE, } 130 .driver = { .owner = THIS_MODULE, }
134}; 131}, {
135
136static struct phy_driver lan911x_int_driver = {
137 .phy_id = 0x0007c0d0, /* OUI=0x00800f, Model#=0x0d */ 132 .phy_id = 0x0007c0d0, /* OUI=0x00800f, Model#=0x0d */
138 .phy_id_mask = 0xfffffff0, 133 .phy_id_mask = 0xfffffff0,
139 .name = "SMSC LAN911x Internal PHY", 134 .name = "SMSC LAN911x Internal PHY",
@@ -155,9 +150,7 @@ static struct phy_driver lan911x_int_driver = {
155 .resume = genphy_resume, 150 .resume = genphy_resume,
156 151
157 .driver = { .owner = THIS_MODULE, } 152 .driver = { .owner = THIS_MODULE, }
158}; 153}, {
159
160static struct phy_driver lan8710_driver = {
161 .phy_id = 0x0007c0f0, /* OUI=0x00800f, Model#=0x0f */ 154 .phy_id = 0x0007c0f0, /* OUI=0x00800f, Model#=0x0f */
162 .phy_id_mask = 0xfffffff0, 155 .phy_id_mask = 0xfffffff0,
163 .name = "SMSC LAN8710/LAN8720", 156 .name = "SMSC LAN8710/LAN8720",
@@ -179,53 +172,18 @@ static struct phy_driver lan8710_driver = {
179 .resume = genphy_resume, 172 .resume = genphy_resume,
180 173
181 .driver = { .owner = THIS_MODULE, } 174 .driver = { .owner = THIS_MODULE, }
182}; 175} };
183 176
184static int __init smsc_init(void) 177static int __init smsc_init(void)
185{ 178{
186 int ret; 179 return phy_drivers_register(smsc_phy_driver,
187 180 ARRAY_SIZE(smsc_phy_driver));
188 ret = phy_driver_register (&lan83c185_driver);
189 if (ret)
190 goto err1;
191
192 ret = phy_driver_register (&lan8187_driver);
193 if (ret)
194 goto err2;
195
196 ret = phy_driver_register (&lan8700_driver);
197 if (ret)
198 goto err3;
199
200 ret = phy_driver_register (&lan911x_int_driver);
201 if (ret)
202 goto err4;
203
204 ret = phy_driver_register (&lan8710_driver);
205 if (ret)
206 goto err5;
207
208 return 0;
209
210err5:
211 phy_driver_unregister (&lan911x_int_driver);
212err4:
213 phy_driver_unregister (&lan8700_driver);
214err3:
215 phy_driver_unregister (&lan8187_driver);
216err2:
217 phy_driver_unregister (&lan83c185_driver);
218err1:
219 return ret;
220} 181}
221 182
222static void __exit smsc_exit(void) 183static void __exit smsc_exit(void)
223{ 184{
224 phy_driver_unregister (&lan8710_driver); 185 return phy_drivers_unregister(smsc_phy_driver,
225 phy_driver_unregister (&lan911x_int_driver); 186 ARRAY_SIZE(smsc_phy_driver));
226 phy_driver_unregister (&lan8700_driver);
227 phy_driver_unregister (&lan8187_driver);
228 phy_driver_unregister (&lan83c185_driver);
229} 187}
230 188
231MODULE_DESCRIPTION("SMSC PHY driver"); 189MODULE_DESCRIPTION("SMSC PHY driver");
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 4eb98bc52a0..1c3abce78b6 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -11,6 +11,8 @@
11 * by the Free Software Foundation. 11 * by the Free Software Foundation.
12 */ 12 */
13 13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
14#include <linux/types.h> 16#include <linux/types.h>
15#include <linux/kernel.h> 17#include <linux/kernel.h>
16#include <linux/init.h> 18#include <linux/init.h>
@@ -356,7 +358,7 @@ static struct spi_driver ks8995_driver = {
356 358
357static int __init ks8995_init(void) 359static int __init ks8995_init(void)
358{ 360{
359 printk(KERN_INFO DRV_DESC " version " DRV_VERSION"\n"); 361 pr_info(DRV_DESC " version " DRV_VERSION "\n");
360 362
361 return spi_register_driver(&ks8995_driver); 363 return spi_register_driver(&ks8995_driver);
362} 364}
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 187a2fa814f..5e1eb138916 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -81,7 +81,8 @@ static int ste10Xp_ack_interrupt(struct phy_device *phydev)
81 return 0; 81 return 0;
82} 82}
83 83
84static struct phy_driver ste101p_pdriver = { 84static struct phy_driver ste10xp_pdriver[] = {
85{
85 .phy_id = STE101P_PHY_ID, 86 .phy_id = STE101P_PHY_ID,
86 .phy_id_mask = 0xfffffff0, 87 .phy_id_mask = 0xfffffff0,
87 .name = "STe101p", 88 .name = "STe101p",
@@ -95,9 +96,7 @@ static struct phy_driver ste101p_pdriver = {
95 .suspend = genphy_suspend, 96 .suspend = genphy_suspend,
96 .resume = genphy_resume, 97 .resume = genphy_resume,
97 .driver = {.owner = THIS_MODULE,} 98 .driver = {.owner = THIS_MODULE,}
98}; 99}, {
99
100static struct phy_driver ste100p_pdriver = {
101 .phy_id = STE100P_PHY_ID, 100 .phy_id = STE100P_PHY_ID,
102 .phy_id_mask = 0xffffffff, 101 .phy_id_mask = 0xffffffff,
103 .name = "STe100p", 102 .name = "STe100p",
@@ -111,22 +110,18 @@ static struct phy_driver ste100p_pdriver = {
111 .suspend = genphy_suspend, 110 .suspend = genphy_suspend,
112 .resume = genphy_resume, 111 .resume = genphy_resume,
113 .driver = {.owner = THIS_MODULE,} 112 .driver = {.owner = THIS_MODULE,}
114}; 113} };
115 114
116static int __init ste10Xp_init(void) 115static int __init ste10Xp_init(void)
117{ 116{
118 int retval; 117 return phy_drivers_register(ste10xp_pdriver,
119 118 ARRAY_SIZE(ste10xp_pdriver));
120 retval = phy_driver_register(&ste100p_pdriver);
121 if (retval < 0)
122 return retval;
123 return phy_driver_register(&ste101p_pdriver);
124} 119}
125 120
126static void __exit ste10Xp_exit(void) 121static void __exit ste10Xp_exit(void)
127{ 122{
128 phy_driver_unregister(&ste100p_pdriver); 123 phy_drivers_unregister(ste10xp_pdriver,
129 phy_driver_unregister(&ste101p_pdriver); 124 ARRAY_SIZE(ste10xp_pdriver));
130} 125}
131 126
132module_init(ste10Xp_init); 127module_init(ste10Xp_init);
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 0ec8e09cc2a..2585c383e62 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -138,21 +138,6 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
138 return err; 138 return err;
139} 139}
140 140
141/* Vitesse 824x */
142static struct phy_driver vsc8244_driver = {
143 .phy_id = PHY_ID_VSC8244,
144 .name = "Vitesse VSC8244",
145 .phy_id_mask = 0x000fffc0,
146 .features = PHY_GBIT_FEATURES,
147 .flags = PHY_HAS_INTERRUPT,
148 .config_init = &vsc824x_config_init,
149 .config_aneg = &genphy_config_aneg,
150 .read_status = &genphy_read_status,
151 .ack_interrupt = &vsc824x_ack_interrupt,
152 .config_intr = &vsc82xx_config_intr,
153 .driver = { .owner = THIS_MODULE,},
154};
155
156static int vsc8221_config_init(struct phy_device *phydev) 141static int vsc8221_config_init(struct phy_device *phydev)
157{ 142{
158 int err; 143 int err;
@@ -165,8 +150,22 @@ static int vsc8221_config_init(struct phy_device *phydev)
165 Options are 802.3Z SerDes or SGMII */ 150 Options are 802.3Z SerDes or SGMII */
166} 151}
167 152
168/* Vitesse 8221 */ 153/* Vitesse 824x */
169static struct phy_driver vsc8221_driver = { 154static struct phy_driver vsc82xx_driver[] = {
155{
156 .phy_id = PHY_ID_VSC8244,
157 .name = "Vitesse VSC8244",
158 .phy_id_mask = 0x000fffc0,
159 .features = PHY_GBIT_FEATURES,
160 .flags = PHY_HAS_INTERRUPT,
161 .config_init = &vsc824x_config_init,
162 .config_aneg = &genphy_config_aneg,
163 .read_status = &genphy_read_status,
164 .ack_interrupt = &vsc824x_ack_interrupt,
165 .config_intr = &vsc82xx_config_intr,
166 .driver = { .owner = THIS_MODULE,},
167}, {
168 /* Vitesse 8221 */
170 .phy_id = PHY_ID_VSC8221, 169 .phy_id = PHY_ID_VSC8221,
171 .phy_id_mask = 0x000ffff0, 170 .phy_id_mask = 0x000ffff0,
172 .name = "Vitesse VSC8221", 171 .name = "Vitesse VSC8221",
@@ -177,26 +176,19 @@ static struct phy_driver vsc8221_driver = {
177 .read_status = &genphy_read_status, 176 .read_status = &genphy_read_status,
178 .ack_interrupt = &vsc824x_ack_interrupt, 177 .ack_interrupt = &vsc824x_ack_interrupt,
179 .config_intr = &vsc82xx_config_intr, 178 .config_intr = &vsc82xx_config_intr,
180 .driver = { .owner = THIS_MODULE,}, 179 .driver = { .owner = THIS_MODULE,},
181}; 180} };
182 181
183static int __init vsc82xx_init(void) 182static int __init vsc82xx_init(void)
184{ 183{
185 int err; 184 return phy_drivers_register(vsc82xx_driver,
186 185 ARRAY_SIZE(vsc82xx_driver));
187 err = phy_driver_register(&vsc8244_driver);
188 if (err < 0)
189 return err;
190 err = phy_driver_register(&vsc8221_driver);
191 if (err < 0)
192 phy_driver_unregister(&vsc8244_driver);
193 return err;
194} 186}
195 187
196static void __exit vsc82xx_exit(void) 188static void __exit vsc82xx_exit(void)
197{ 189{
198 phy_driver_unregister(&vsc8244_driver); 190 return phy_drivers_unregister(vsc82xx_driver,
199 phy_driver_unregister(&vsc8221_driver); 191 ARRAY_SIZE(vsc82xx_driver));
200} 192}
201 193
202module_init(vsc82xx_init); 194module_init(vsc82xx_init);
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index d4c9db3da22..a34d6bf5e43 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -390,10 +390,10 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
390#endif 390#endif
391#ifdef CONFIG_SLIP_MODE_SLIP6 391#ifdef CONFIG_SLIP_MODE_SLIP6
392 if (sl->mode & SL_MODE_SLIP6) 392 if (sl->mode & SL_MODE_SLIP6)
393 count = slip_esc6(p, (unsigned char *) sl->xbuff, len); 393 count = slip_esc6(p, sl->xbuff, len);
394 else 394 else
395#endif 395#endif
396 count = slip_esc(p, (unsigned char *) sl->xbuff, len); 396 count = slip_esc(p, sl->xbuff, len);
397 397
398 /* Order of next two lines is *very* important. 398 /* Order of next two lines is *very* important.
399 * When we are sending a little amount of data, 399 * When we are sending a little amount of data,
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index c61ae35a53c..9b94f53a9d4 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * net/drivers/team/team.c - Network team device driver 2 * drivers/net/team/team.c - Network team device driver
3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> 3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -82,14 +82,16 @@ static void team_refresh_port_linkup(struct team_port *port)
82 port->state.linkup; 82 port->state.linkup;
83} 83}
84 84
85
85/******************* 86/*******************
86 * Options handling 87 * Options handling
87 *******************/ 88 *******************/
88 89
89struct team_option_inst { /* One for each option instance */ 90struct team_option_inst { /* One for each option instance */
90 struct list_head list; 91 struct list_head list;
92 struct list_head tmp_list;
91 struct team_option *option; 93 struct team_option *option;
92 struct team_port *port; /* != NULL if per-port */ 94 struct team_option_inst_info info;
93 bool changed; 95 bool changed;
94 bool removed; 96 bool removed;
95}; 97};
@@ -106,22 +108,6 @@ static struct team_option *__team_find_option(struct team *team,
106 return NULL; 108 return NULL;
107} 109}
108 110
109static int __team_option_inst_add(struct team *team, struct team_option *option,
110 struct team_port *port)
111{
112 struct team_option_inst *opt_inst;
113
114 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
115 if (!opt_inst)
116 return -ENOMEM;
117 opt_inst->option = option;
118 opt_inst->port = port;
119 opt_inst->changed = true;
120 opt_inst->removed = false;
121 list_add_tail(&opt_inst->list, &team->option_inst_list);
122 return 0;
123}
124
125static void __team_option_inst_del(struct team_option_inst *opt_inst) 111static void __team_option_inst_del(struct team_option_inst *opt_inst)
126{ 112{
127 list_del(&opt_inst->list); 113 list_del(&opt_inst->list);
@@ -139,14 +125,49 @@ static void __team_option_inst_del_option(struct team *team,
139 } 125 }
140} 126}
141 127
128static int __team_option_inst_add(struct team *team, struct team_option *option,
129 struct team_port *port)
130{
131 struct team_option_inst *opt_inst;
132 unsigned int array_size;
133 unsigned int i;
134 int err;
135
136 array_size = option->array_size;
137 if (!array_size)
138 array_size = 1; /* No array but still need one instance */
139
140 for (i = 0; i < array_size; i++) {
141 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
142 if (!opt_inst)
143 return -ENOMEM;
144 opt_inst->option = option;
145 opt_inst->info.port = port;
146 opt_inst->info.array_index = i;
147 opt_inst->changed = true;
148 opt_inst->removed = false;
149 list_add_tail(&opt_inst->list, &team->option_inst_list);
150 if (option->init) {
151 err = option->init(team, &opt_inst->info);
152 if (err)
153 return err;
154 }
155
156 }
157 return 0;
158}
159
142static int __team_option_inst_add_option(struct team *team, 160static int __team_option_inst_add_option(struct team *team,
143 struct team_option *option) 161 struct team_option *option)
144{ 162{
145 struct team_port *port; 163 struct team_port *port;
146 int err; 164 int err;
147 165
148 if (!option->per_port) 166 if (!option->per_port) {
149 return __team_option_inst_add(team, option, 0); 167 err = __team_option_inst_add(team, option, NULL);
168 if (err)
169 goto inst_del_option;
170 }
150 171
151 list_for_each_entry(port, &team->port_list, list) { 172 list_for_each_entry(port, &team->port_list, list) {
152 err = __team_option_inst_add(team, option, port); 173 err = __team_option_inst_add(team, option, port);
@@ -180,7 +201,7 @@ static void __team_option_inst_del_port(struct team *team,
180 201
181 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) { 202 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
182 if (opt_inst->option->per_port && 203 if (opt_inst->option->per_port &&
183 opt_inst->port == port) 204 opt_inst->info.port == port)
184 __team_option_inst_del(opt_inst); 205 __team_option_inst_del(opt_inst);
185 } 206 }
186} 207}
@@ -211,7 +232,7 @@ static void __team_option_inst_mark_removed_port(struct team *team,
211 struct team_option_inst *opt_inst; 232 struct team_option_inst *opt_inst;
212 233
213 list_for_each_entry(opt_inst, &team->option_inst_list, list) { 234 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
214 if (opt_inst->port == port) { 235 if (opt_inst->info.port == port) {
215 opt_inst->changed = true; 236 opt_inst->changed = true;
216 opt_inst->removed = true; 237 opt_inst->removed = true;
217 } 238 }
@@ -324,28 +345,12 @@ void team_options_unregister(struct team *team,
324} 345}
325EXPORT_SYMBOL(team_options_unregister); 346EXPORT_SYMBOL(team_options_unregister);
326 347
327static int team_option_port_add(struct team *team, struct team_port *port)
328{
329 int err;
330
331 err = __team_option_inst_add_port(team, port);
332 if (err)
333 return err;
334 __team_options_change_check(team);
335 return 0;
336}
337
338static void team_option_port_del(struct team *team, struct team_port *port)
339{
340 __team_option_inst_mark_removed_port(team, port);
341 __team_options_change_check(team);
342 __team_option_inst_del_port(team, port);
343}
344
345static int team_option_get(struct team *team, 348static int team_option_get(struct team *team,
346 struct team_option_inst *opt_inst, 349 struct team_option_inst *opt_inst,
347 struct team_gsetter_ctx *ctx) 350 struct team_gsetter_ctx *ctx)
348{ 351{
352 if (!opt_inst->option->getter)
353 return -EOPNOTSUPP;
349 return opt_inst->option->getter(team, ctx); 354 return opt_inst->option->getter(team, ctx);
350} 355}
351 356
@@ -353,16 +358,26 @@ static int team_option_set(struct team *team,
353 struct team_option_inst *opt_inst, 358 struct team_option_inst *opt_inst,
354 struct team_gsetter_ctx *ctx) 359 struct team_gsetter_ctx *ctx)
355{ 360{
356 int err; 361 if (!opt_inst->option->setter)
362 return -EOPNOTSUPP;
363 return opt_inst->option->setter(team, ctx);
364}
357 365
358 err = opt_inst->option->setter(team, ctx); 366void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
359 if (err) 367{
360 return err; 368 struct team_option_inst *opt_inst;
361 369
370 opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
362 opt_inst->changed = true; 371 opt_inst->changed = true;
372}
373EXPORT_SYMBOL(team_option_inst_set_change);
374
375void team_options_change_check(struct team *team)
376{
363 __team_options_change_check(team); 377 __team_options_change_check(team);
364 return err;
365} 378}
379EXPORT_SYMBOL(team_options_change_check);
380
366 381
367/**************** 382/****************
368 * Mode handling 383 * Mode handling
@@ -371,13 +386,18 @@ static int team_option_set(struct team *team,
371static LIST_HEAD(mode_list); 386static LIST_HEAD(mode_list);
372static DEFINE_SPINLOCK(mode_list_lock); 387static DEFINE_SPINLOCK(mode_list_lock);
373 388
374static struct team_mode *__find_mode(const char *kind) 389struct team_mode_item {
390 struct list_head list;
391 const struct team_mode *mode;
392};
393
394static struct team_mode_item *__find_mode(const char *kind)
375{ 395{
376 struct team_mode *mode; 396 struct team_mode_item *mitem;
377 397
378 list_for_each_entry(mode, &mode_list, list) { 398 list_for_each_entry(mitem, &mode_list, list) {
379 if (strcmp(mode->kind, kind) == 0) 399 if (strcmp(mitem->mode->kind, kind) == 0)
380 return mode; 400 return mitem;
381 } 401 }
382 return NULL; 402 return NULL;
383} 403}
@@ -392,49 +412,65 @@ static bool is_good_mode_name(const char *name)
392 return true; 412 return true;
393} 413}
394 414
395int team_mode_register(struct team_mode *mode) 415int team_mode_register(const struct team_mode *mode)
396{ 416{
397 int err = 0; 417 int err = 0;
418 struct team_mode_item *mitem;
398 419
399 if (!is_good_mode_name(mode->kind) || 420 if (!is_good_mode_name(mode->kind) ||
400 mode->priv_size > TEAM_MODE_PRIV_SIZE) 421 mode->priv_size > TEAM_MODE_PRIV_SIZE)
401 return -EINVAL; 422 return -EINVAL;
423
424 mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
425 if (!mitem)
426 return -ENOMEM;
427
402 spin_lock(&mode_list_lock); 428 spin_lock(&mode_list_lock);
403 if (__find_mode(mode->kind)) { 429 if (__find_mode(mode->kind)) {
404 err = -EEXIST; 430 err = -EEXIST;
431 kfree(mitem);
405 goto unlock; 432 goto unlock;
406 } 433 }
407 list_add_tail(&mode->list, &mode_list); 434 mitem->mode = mode;
435 list_add_tail(&mitem->list, &mode_list);
408unlock: 436unlock:
409 spin_unlock(&mode_list_lock); 437 spin_unlock(&mode_list_lock);
410 return err; 438 return err;
411} 439}
412EXPORT_SYMBOL(team_mode_register); 440EXPORT_SYMBOL(team_mode_register);
413 441
414int team_mode_unregister(struct team_mode *mode) 442void team_mode_unregister(const struct team_mode *mode)
415{ 443{
444 struct team_mode_item *mitem;
445
416 spin_lock(&mode_list_lock); 446 spin_lock(&mode_list_lock);
417 list_del_init(&mode->list); 447 mitem = __find_mode(mode->kind);
448 if (mitem) {
449 list_del_init(&mitem->list);
450 kfree(mitem);
451 }
418 spin_unlock(&mode_list_lock); 452 spin_unlock(&mode_list_lock);
419 return 0;
420} 453}
421EXPORT_SYMBOL(team_mode_unregister); 454EXPORT_SYMBOL(team_mode_unregister);
422 455
423static struct team_mode *team_mode_get(const char *kind) 456static const struct team_mode *team_mode_get(const char *kind)
424{ 457{
425 struct team_mode *mode; 458 struct team_mode_item *mitem;
459 const struct team_mode *mode = NULL;
426 460
427 spin_lock(&mode_list_lock); 461 spin_lock(&mode_list_lock);
428 mode = __find_mode(kind); 462 mitem = __find_mode(kind);
429 if (!mode) { 463 if (!mitem) {
430 spin_unlock(&mode_list_lock); 464 spin_unlock(&mode_list_lock);
431 request_module("team-mode-%s", kind); 465 request_module("team-mode-%s", kind);
432 spin_lock(&mode_list_lock); 466 spin_lock(&mode_list_lock);
433 mode = __find_mode(kind); 467 mitem = __find_mode(kind);
434 } 468 }
435 if (mode) 469 if (mitem) {
470 mode = mitem->mode;
436 if (!try_module_get(mode->owner)) 471 if (!try_module_get(mode->owner))
437 mode = NULL; 472 mode = NULL;
473 }
438 474
439 spin_unlock(&mode_list_lock); 475 spin_unlock(&mode_list_lock);
440 return mode; 476 return mode;
@@ -458,26 +494,45 @@ rx_handler_result_t team_dummy_receive(struct team *team,
458 return RX_HANDLER_ANOTHER; 494 return RX_HANDLER_ANOTHER;
459} 495}
460 496
461static void team_adjust_ops(struct team *team) 497static const struct team_mode __team_no_mode = {
498 .kind = "*NOMODE*",
499};
500
501static bool team_is_mode_set(struct team *team)
502{
503 return team->mode != &__team_no_mode;
504}
505
506static void team_set_no_mode(struct team *team)
507{
508 team->mode = &__team_no_mode;
509}
510
511static void __team_adjust_ops(struct team *team, int en_port_count)
462{ 512{
463 /* 513 /*
464 * To avoid checks in rx/tx skb paths, ensure here that non-null and 514 * To avoid checks in rx/tx skb paths, ensure here that non-null and
465 * correct ops are always set. 515 * correct ops are always set.
466 */ 516 */
467 517
468 if (list_empty(&team->port_list) || 518 if (!en_port_count || !team_is_mode_set(team) ||
469 !team->mode || !team->mode->ops->transmit) 519 !team->mode->ops->transmit)
470 team->ops.transmit = team_dummy_transmit; 520 team->ops.transmit = team_dummy_transmit;
471 else 521 else
472 team->ops.transmit = team->mode->ops->transmit; 522 team->ops.transmit = team->mode->ops->transmit;
473 523
474 if (list_empty(&team->port_list) || 524 if (!en_port_count || !team_is_mode_set(team) ||
475 !team->mode || !team->mode->ops->receive) 525 !team->mode->ops->receive)
476 team->ops.receive = team_dummy_receive; 526 team->ops.receive = team_dummy_receive;
477 else 527 else
478 team->ops.receive = team->mode->ops->receive; 528 team->ops.receive = team->mode->ops->receive;
479} 529}
480 530
531static void team_adjust_ops(struct team *team)
532{
533 __team_adjust_ops(team, team->en_port_count);
534}
535
481/* 536/*
482 * We can benefit from the fact that it's ensured no port is present 537 * We can benefit from the fact that it's ensured no port is present
483 * at the time of mode change. Therefore no packets are in fly so there's no 538 * at the time of mode change. Therefore no packets are in fly so there's no
@@ -487,7 +542,7 @@ static int __team_change_mode(struct team *team,
487 const struct team_mode *new_mode) 542 const struct team_mode *new_mode)
488{ 543{
489 /* Check if mode was previously set and do cleanup if so */ 544 /* Check if mode was previously set and do cleanup if so */
490 if (team->mode) { 545 if (team_is_mode_set(team)) {
491 void (*exit_op)(struct team *team) = team->ops.exit; 546 void (*exit_op)(struct team *team) = team->ops.exit;
492 547
493 /* Clear ops area so no callback is called any longer */ 548 /* Clear ops area so no callback is called any longer */
@@ -497,7 +552,7 @@ static int __team_change_mode(struct team *team,
497 if (exit_op) 552 if (exit_op)
498 exit_op(team); 553 exit_op(team);
499 team_mode_put(team->mode); 554 team_mode_put(team->mode);
500 team->mode = NULL; 555 team_set_no_mode(team);
501 /* zero private data area */ 556 /* zero private data area */
502 memset(&team->mode_priv, 0, 557 memset(&team->mode_priv, 0,
503 sizeof(struct team) - offsetof(struct team, mode_priv)); 558 sizeof(struct team) - offsetof(struct team, mode_priv));
@@ -523,7 +578,7 @@ static int __team_change_mode(struct team *team,
523 578
524static int team_change_mode(struct team *team, const char *kind) 579static int team_change_mode(struct team *team, const char *kind)
525{ 580{
526 struct team_mode *new_mode; 581 const struct team_mode *new_mode;
527 struct net_device *dev = team->dev; 582 struct net_device *dev = team->dev;
528 int err; 583 int err;
529 584
@@ -532,7 +587,7 @@ static int team_change_mode(struct team *team, const char *kind)
532 return -EBUSY; 587 return -EBUSY;
533 } 588 }
534 589
535 if (team->mode && strcmp(team->mode->kind, kind) == 0) { 590 if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
536 netdev_err(dev, "Unable to change to the same mode the team is in\n"); 591 netdev_err(dev, "Unable to change to the same mode the team is in\n");
537 return -EINVAL; 592 return -EINVAL;
538 } 593 }
@@ -559,8 +614,6 @@ static int team_change_mode(struct team *team, const char *kind)
559 * Rx path frame handler 614 * Rx path frame handler
560 ************************/ 615 ************************/
561 616
562static bool team_port_enabled(struct team_port *port);
563
564/* note: already called with rcu_read_lock */ 617/* note: already called with rcu_read_lock */
565static rx_handler_result_t team_handle_frame(struct sk_buff **pskb) 618static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
566{ 619{
@@ -618,10 +671,11 @@ static bool team_port_find(const struct team *team,
618 return false; 671 return false;
619} 672}
620 673
621static bool team_port_enabled(struct team_port *port) 674bool team_port_enabled(struct team_port *port)
622{ 675{
623 return port->index != -1; 676 return port->index != -1;
624} 677}
678EXPORT_SYMBOL(team_port_enabled);
625 679
626/* 680/*
627 * Enable/disable port by adding to enabled port hashlist and setting 681 * Enable/disable port by adding to enabled port hashlist and setting
@@ -637,6 +691,9 @@ static void team_port_enable(struct team *team,
637 port->index = team->en_port_count++; 691 port->index = team->en_port_count++;
638 hlist_add_head_rcu(&port->hlist, 692 hlist_add_head_rcu(&port->hlist,
639 team_port_index_hash(team, port->index)); 693 team_port_index_hash(team, port->index));
694 team_adjust_ops(team);
695 if (team->ops.port_enabled)
696 team->ops.port_enabled(team, port);
640} 697}
641 698
642static void __reconstruct_port_hlist(struct team *team, int rm_index) 699static void __reconstruct_port_hlist(struct team *team, int rm_index)
@@ -656,14 +713,20 @@ static void __reconstruct_port_hlist(struct team *team, int rm_index)
656static void team_port_disable(struct team *team, 713static void team_port_disable(struct team *team,
657 struct team_port *port) 714 struct team_port *port)
658{ 715{
659 int rm_index = port->index;
660
661 if (!team_port_enabled(port)) 716 if (!team_port_enabled(port))
662 return; 717 return;
718 if (team->ops.port_disabled)
719 team->ops.port_disabled(team, port);
663 hlist_del_rcu(&port->hlist); 720 hlist_del_rcu(&port->hlist);
664 __reconstruct_port_hlist(team, rm_index); 721 __reconstruct_port_hlist(team, port->index);
665 team->en_port_count--;
666 port->index = -1; 722 port->index = -1;
723 __team_adjust_ops(team, team->en_port_count - 1);
724 /*
725 * Wait until readers see adjusted ops. This ensures that
726 * readers never see team->en_port_count == 0
727 */
728 synchronize_rcu();
729 team->en_port_count--;
667} 730}
668 731
669#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ 732#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
@@ -758,7 +821,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
758 return -EBUSY; 821 return -EBUSY;
759 } 822 }
760 823
761 port = kzalloc(sizeof(struct team_port), GFP_KERNEL); 824 port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
825 GFP_KERNEL);
762 if (!port) 826 if (!port)
763 return -ENOMEM; 827 return -ENOMEM;
764 828
@@ -809,7 +873,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
809 goto err_handler_register; 873 goto err_handler_register;
810 } 874 }
811 875
812 err = team_option_port_add(team, port); 876 err = __team_option_inst_add_port(team, port);
813 if (err) { 877 if (err) {
814 netdev_err(dev, "Device %s failed to add per-port options\n", 878 netdev_err(dev, "Device %s failed to add per-port options\n",
815 portname); 879 portname);
@@ -819,9 +883,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
819 port->index = -1; 883 port->index = -1;
820 team_port_enable(team, port); 884 team_port_enable(team, port);
821 list_add_tail_rcu(&port->list, &team->port_list); 885 list_add_tail_rcu(&port->list, &team->port_list);
822 team_adjust_ops(team);
823 __team_compute_features(team); 886 __team_compute_features(team);
824 __team_port_change_check(port, !!netif_carrier_ok(port_dev)); 887 __team_port_change_check(port, !!netif_carrier_ok(port_dev));
888 __team_options_change_check(team);
825 889
826 netdev_info(dev, "Port device %s added\n", portname); 890 netdev_info(dev, "Port device %s added\n", portname);
827 891
@@ -865,12 +929,13 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
865 return -ENOENT; 929 return -ENOENT;
866 } 930 }
867 931
932 __team_option_inst_mark_removed_port(team, port);
933 __team_options_change_check(team);
934 __team_option_inst_del_port(team, port);
868 port->removed = true; 935 port->removed = true;
869 __team_port_change_check(port, false); 936 __team_port_change_check(port, false);
870 team_port_disable(team, port); 937 team_port_disable(team, port);
871 list_del_rcu(&port->list); 938 list_del_rcu(&port->list);
872 team_adjust_ops(team);
873 team_option_port_del(team, port);
874 netdev_rx_handler_unregister(port_dev); 939 netdev_rx_handler_unregister(port_dev);
875 netdev_set_master(port_dev, NULL); 940 netdev_set_master(port_dev, NULL);
876 vlan_vids_del_by_dev(port_dev, dev); 941 vlan_vids_del_by_dev(port_dev, dev);
@@ -891,11 +956,9 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
891 * Net device ops 956 * Net device ops
892 *****************/ 957 *****************/
893 958
894static const char team_no_mode_kind[] = "*NOMODE*";
895
896static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx) 959static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
897{ 960{
898 ctx->data.str_val = team->mode ? team->mode->kind : team_no_mode_kind; 961 ctx->data.str_val = team->mode->kind;
899 return 0; 962 return 0;
900} 963}
901 964
@@ -907,39 +970,47 @@ static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
907static int team_port_en_option_get(struct team *team, 970static int team_port_en_option_get(struct team *team,
908 struct team_gsetter_ctx *ctx) 971 struct team_gsetter_ctx *ctx)
909{ 972{
910 ctx->data.bool_val = team_port_enabled(ctx->port); 973 struct team_port *port = ctx->info->port;
974
975 ctx->data.bool_val = team_port_enabled(port);
911 return 0; 976 return 0;
912} 977}
913 978
914static int team_port_en_option_set(struct team *team, 979static int team_port_en_option_set(struct team *team,
915 struct team_gsetter_ctx *ctx) 980 struct team_gsetter_ctx *ctx)
916{ 981{
982 struct team_port *port = ctx->info->port;
983
917 if (ctx->data.bool_val) 984 if (ctx->data.bool_val)
918 team_port_enable(team, ctx->port); 985 team_port_enable(team, port);
919 else 986 else
920 team_port_disable(team, ctx->port); 987 team_port_disable(team, port);
921 return 0; 988 return 0;
922} 989}
923 990
924static int team_user_linkup_option_get(struct team *team, 991static int team_user_linkup_option_get(struct team *team,
925 struct team_gsetter_ctx *ctx) 992 struct team_gsetter_ctx *ctx)
926{ 993{
927 ctx->data.bool_val = ctx->port->user.linkup; 994 struct team_port *port = ctx->info->port;
995
996 ctx->data.bool_val = port->user.linkup;
928 return 0; 997 return 0;
929} 998}
930 999
931static int team_user_linkup_option_set(struct team *team, 1000static int team_user_linkup_option_set(struct team *team,
932 struct team_gsetter_ctx *ctx) 1001 struct team_gsetter_ctx *ctx)
933{ 1002{
934 ctx->port->user.linkup = ctx->data.bool_val; 1003 struct team_port *port = ctx->info->port;
935 team_refresh_port_linkup(ctx->port); 1004
1005 port->user.linkup = ctx->data.bool_val;
1006 team_refresh_port_linkup(port);
936 return 0; 1007 return 0;
937} 1008}
938 1009
939static int team_user_linkup_en_option_get(struct team *team, 1010static int team_user_linkup_en_option_get(struct team *team,
940 struct team_gsetter_ctx *ctx) 1011 struct team_gsetter_ctx *ctx)
941{ 1012{
942 struct team_port *port = ctx->port; 1013 struct team_port *port = ctx->info->port;
943 1014
944 ctx->data.bool_val = port->user.linkup_enabled; 1015 ctx->data.bool_val = port->user.linkup_enabled;
945 return 0; 1016 return 0;
@@ -948,10 +1019,10 @@ static int team_user_linkup_en_option_get(struct team *team,
948static int team_user_linkup_en_option_set(struct team *team, 1019static int team_user_linkup_en_option_set(struct team *team,
949 struct team_gsetter_ctx *ctx) 1020 struct team_gsetter_ctx *ctx)
950{ 1021{
951 struct team_port *port = ctx->port; 1022 struct team_port *port = ctx->info->port;
952 1023
953 port->user.linkup_enabled = ctx->data.bool_val; 1024 port->user.linkup_enabled = ctx->data.bool_val;
954 team_refresh_port_linkup(ctx->port); 1025 team_refresh_port_linkup(port);
955 return 0; 1026 return 0;
956} 1027}
957 1028
@@ -993,6 +1064,7 @@ static int team_init(struct net_device *dev)
993 1064
994 team->dev = dev; 1065 team->dev = dev;
995 mutex_init(&team->lock); 1066 mutex_init(&team->lock);
1067 team_set_no_mode(team);
996 1068
997 team->pcpu_stats = alloc_percpu(struct team_pcpu_stats); 1069 team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
998 if (!team->pcpu_stats) 1070 if (!team->pcpu_stats)
@@ -1116,10 +1188,11 @@ static int team_set_mac_address(struct net_device *dev, void *p)
1116{ 1188{
1117 struct team *team = netdev_priv(dev); 1189 struct team *team = netdev_priv(dev);
1118 struct team_port *port; 1190 struct team_port *port;
1119 struct sockaddr *addr = p; 1191 int err;
1120 1192
1121 dev->addr_assign_type &= ~NET_ADDR_RANDOM; 1193 err = eth_mac_addr(dev, p);
1122 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 1194 if (err)
1195 return err;
1123 rcu_read_lock(); 1196 rcu_read_lock();
1124 list_for_each_entry_rcu(port, &team->port_list, list) 1197 list_for_each_entry_rcu(port, &team->port_list, list)
1125 if (team->ops.port_change_mac) 1198 if (team->ops.port_change_mac)
@@ -1321,7 +1394,7 @@ static void team_setup(struct net_device *dev)
1321 * bring us to promisc mode in case a unicast addr is added. 1394 * bring us to promisc mode in case a unicast addr is added.
1322 * Let this up to underlay drivers. 1395 * Let this up to underlay drivers.
1323 */ 1396 */
1324 dev->priv_flags |= IFF_UNICAST_FLT; 1397 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
1325 1398
1326 dev->features |= NETIF_F_LLTX; 1399 dev->features |= NETIF_F_LLTX;
1327 dev->features |= NETIF_F_GRO; 1400 dev->features |= NETIF_F_GRO;
@@ -1404,7 +1477,7 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
1404 void *hdr; 1477 void *hdr;
1405 int err; 1478 int err;
1406 1479
1407 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1480 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1408 if (!msg) 1481 if (!msg)
1409 return -ENOMEM; 1482 return -ENOMEM;
1410 1483
@@ -1466,7 +1539,7 @@ static int team_nl_send_generic(struct genl_info *info, struct team *team,
1466 struct sk_buff *skb; 1539 struct sk_buff *skb;
1467 int err; 1540 int err;
1468 1541
1469 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1542 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1470 if (!skb) 1543 if (!skb)
1471 return -ENOMEM; 1544 return -ENOMEM;
1472 1545
@@ -1482,16 +1555,128 @@ err_fill:
1482 return err; 1555 return err;
1483} 1556}
1484 1557
1485static int team_nl_fill_options_get(struct sk_buff *skb, 1558typedef int team_nl_send_func_t(struct sk_buff *skb,
1486 u32 pid, u32 seq, int flags, 1559 struct team *team, u32 pid);
1487 struct team *team, bool fillall) 1560
1561static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 pid)
1562{
1563 return genlmsg_unicast(dev_net(team->dev), skb, pid);
1564}
1565
1566static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
1567 struct team_option_inst *opt_inst)
1568{
1569 struct nlattr *option_item;
1570 struct team_option *option = opt_inst->option;
1571 struct team_option_inst_info *opt_inst_info = &opt_inst->info;
1572 struct team_gsetter_ctx ctx;
1573 int err;
1574
1575 ctx.info = opt_inst_info;
1576 err = team_option_get(team, opt_inst, &ctx);
1577 if (err)
1578 return err;
1579
1580 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
1581 if (!option_item)
1582 return -EMSGSIZE;
1583
1584 if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
1585 goto nest_cancel;
1586 if (opt_inst_info->port &&
1587 nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
1588 opt_inst_info->port->dev->ifindex))
1589 goto nest_cancel;
1590 if (opt_inst->option->array_size &&
1591 nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
1592 opt_inst_info->array_index))
1593 goto nest_cancel;
1594
1595 switch (option->type) {
1596 case TEAM_OPTION_TYPE_U32:
1597 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
1598 goto nest_cancel;
1599 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
1600 goto nest_cancel;
1601 break;
1602 case TEAM_OPTION_TYPE_STRING:
1603 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
1604 goto nest_cancel;
1605 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
1606 ctx.data.str_val))
1607 goto nest_cancel;
1608 break;
1609 case TEAM_OPTION_TYPE_BINARY:
1610 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
1611 goto nest_cancel;
1612 if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
1613 ctx.data.bin_val.ptr))
1614 goto nest_cancel;
1615 break;
1616 case TEAM_OPTION_TYPE_BOOL:
1617 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
1618 goto nest_cancel;
1619 if (ctx.data.bool_val &&
1620 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
1621 goto nest_cancel;
1622 break;
1623 default:
1624 BUG();
1625 }
1626 if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
1627 goto nest_cancel;
1628 if (opt_inst->changed) {
1629 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
1630 goto nest_cancel;
1631 opt_inst->changed = false;
1632 }
1633 nla_nest_end(skb, option_item);
1634 return 0;
1635
1636nest_cancel:
1637 nla_nest_cancel(skb, option_item);
1638 return -EMSGSIZE;
1639}
1640
1641static int __send_and_alloc_skb(struct sk_buff **pskb,
1642 struct team *team, u32 pid,
1643 team_nl_send_func_t *send_func)
1644{
1645 int err;
1646
1647 if (*pskb) {
1648 err = send_func(*pskb, team, pid);
1649 if (err)
1650 return err;
1651 }
1652 *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
1653 if (!*pskb)
1654 return -ENOMEM;
1655 return 0;
1656}
1657
1658static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq,
1659 int flags, team_nl_send_func_t *send_func,
1660 struct list_head *sel_opt_inst_list)
1488{ 1661{
1489 struct nlattr *option_list; 1662 struct nlattr *option_list;
1663 struct nlmsghdr *nlh;
1490 void *hdr; 1664 void *hdr;
1491 struct team_option_inst *opt_inst; 1665 struct team_option_inst *opt_inst;
1492 int err; 1666 int err;
1667 struct sk_buff *skb = NULL;
1668 bool incomplete;
1669 int i;
1493 1670
1494 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, 1671 opt_inst = list_first_entry(sel_opt_inst_list,
1672 struct team_option_inst, tmp_list);
1673
1674start_again:
1675 err = __send_and_alloc_skb(&skb, team, pid, send_func);
1676 if (err)
1677 return err;
1678
1679 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI,
1495 TEAM_CMD_OPTIONS_GET); 1680 TEAM_CMD_OPTIONS_GET);
1496 if (IS_ERR(hdr)) 1681 if (IS_ERR(hdr))
1497 return PTR_ERR(hdr); 1682 return PTR_ERR(hdr);
@@ -1500,122 +1685,80 @@ static int team_nl_fill_options_get(struct sk_buff *skb,
1500 goto nla_put_failure; 1685 goto nla_put_failure;
1501 option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION); 1686 option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
1502 if (!option_list) 1687 if (!option_list)
1503 return -EMSGSIZE; 1688 goto nla_put_failure;
1504
1505 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
1506 struct nlattr *option_item;
1507 struct team_option *option = opt_inst->option;
1508 struct team_gsetter_ctx ctx;
1509 1689
1510 /* Include only changed options if fill all mode is not on */ 1690 i = 0;
1511 if (!fillall && !opt_inst->changed) 1691 incomplete = false;
1512 continue; 1692 list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
1513 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION); 1693 err = team_nl_fill_one_option_get(skb, team, opt_inst);
1514 if (!option_item) 1694 if (err) {
1515 goto nla_put_failure; 1695 if (err == -EMSGSIZE) {
1516 if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name)) 1696 if (!i)
1517 goto nla_put_failure; 1697 goto errout;
1518 if (opt_inst->changed) { 1698 incomplete = true;
1519 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED)) 1699 break;
1520 goto nla_put_failure; 1700 }
1521 opt_inst->changed = false; 1701 goto errout;
1522 }
1523 if (opt_inst->removed &&
1524 nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
1525 goto nla_put_failure;
1526 if (opt_inst->port &&
1527 nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
1528 opt_inst->port->dev->ifindex))
1529 goto nla_put_failure;
1530 ctx.port = opt_inst->port;
1531 switch (option->type) {
1532 case TEAM_OPTION_TYPE_U32:
1533 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
1534 goto nla_put_failure;
1535 err = team_option_get(team, opt_inst, &ctx);
1536 if (err)
1537 goto errout;
1538 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA,
1539 ctx.data.u32_val))
1540 goto nla_put_failure;
1541 break;
1542 case TEAM_OPTION_TYPE_STRING:
1543 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
1544 goto nla_put_failure;
1545 err = team_option_get(team, opt_inst, &ctx);
1546 if (err)
1547 goto errout;
1548 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
1549 ctx.data.str_val))
1550 goto nla_put_failure;
1551 break;
1552 case TEAM_OPTION_TYPE_BINARY:
1553 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
1554 goto nla_put_failure;
1555 err = team_option_get(team, opt_inst, &ctx);
1556 if (err)
1557 goto errout;
1558 if (nla_put(skb, TEAM_ATTR_OPTION_DATA,
1559 ctx.data.bin_val.len, ctx.data.bin_val.ptr))
1560 goto nla_put_failure;
1561 break;
1562 case TEAM_OPTION_TYPE_BOOL:
1563 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
1564 goto nla_put_failure;
1565 err = team_option_get(team, opt_inst, &ctx);
1566 if (err)
1567 goto errout;
1568 if (ctx.data.bool_val &&
1569 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
1570 goto nla_put_failure;
1571 break;
1572 default:
1573 BUG();
1574 } 1702 }
1575 nla_nest_end(skb, option_item); 1703 i++;
1576 } 1704 }
1577 1705
1578 nla_nest_end(skb, option_list); 1706 nla_nest_end(skb, option_list);
1579 return genlmsg_end(skb, hdr); 1707 genlmsg_end(skb, hdr);
1708 if (incomplete)
1709 goto start_again;
1710
1711send_done:
1712 nlh = nlmsg_put(skb, pid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
1713 if (!nlh) {
1714 err = __send_and_alloc_skb(&skb, team, pid, send_func);
1715 if (err)
1716 goto errout;
1717 goto send_done;
1718 }
1719
1720 return send_func(skb, team, pid);
1580 1721
1581nla_put_failure: 1722nla_put_failure:
1582 err = -EMSGSIZE; 1723 err = -EMSGSIZE;
1583errout: 1724errout:
1584 genlmsg_cancel(skb, hdr); 1725 genlmsg_cancel(skb, hdr);
1726 nlmsg_free(skb);
1585 return err; 1727 return err;
1586} 1728}
1587 1729
1588static int team_nl_fill_options_get_all(struct sk_buff *skb,
1589 struct genl_info *info, int flags,
1590 struct team *team)
1591{
1592 return team_nl_fill_options_get(skb, info->snd_pid,
1593 info->snd_seq, NLM_F_ACK,
1594 team, true);
1595}
1596
1597static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info) 1730static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
1598{ 1731{
1599 struct team *team; 1732 struct team *team;
1733 struct team_option_inst *opt_inst;
1600 int err; 1734 int err;
1735 LIST_HEAD(sel_opt_inst_list);
1601 1736
1602 team = team_nl_team_get(info); 1737 team = team_nl_team_get(info);
1603 if (!team) 1738 if (!team)
1604 return -EINVAL; 1739 return -EINVAL;
1605 1740
1606 err = team_nl_send_generic(info, team, team_nl_fill_options_get_all); 1741 list_for_each_entry(opt_inst, &team->option_inst_list, list)
1742 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
1743 err = team_nl_send_options_get(team, info->snd_pid, info->snd_seq,
1744 NLM_F_ACK, team_nl_send_unicast,
1745 &sel_opt_inst_list);
1607 1746
1608 team_nl_team_put(team); 1747 team_nl_team_put(team);
1609 1748
1610 return err; 1749 return err;
1611} 1750}
1612 1751
1752static int team_nl_send_event_options_get(struct team *team,
1753 struct list_head *sel_opt_inst_list);
1754
1613static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) 1755static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1614{ 1756{
1615 struct team *team; 1757 struct team *team;
1616 int err = 0; 1758 int err = 0;
1617 int i; 1759 int i;
1618 struct nlattr *nl_option; 1760 struct nlattr *nl_option;
1761 LIST_HEAD(opt_inst_list);
1619 1762
1620 team = team_nl_team_get(info); 1763 team = team_nl_team_get(info);
1621 if (!team) 1764 if (!team)
@@ -1629,10 +1772,12 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1629 1772
1630 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) { 1773 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
1631 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; 1774 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
1632 struct nlattr *attr_port_ifindex; 1775 struct nlattr *attr;
1633 struct nlattr *attr_data; 1776 struct nlattr *attr_data;
1634 enum team_option_type opt_type; 1777 enum team_option_type opt_type;
1635 int opt_port_ifindex = 0; /* != 0 for per-port options */ 1778 int opt_port_ifindex = 0; /* != 0 for per-port options */
1779 u32 opt_array_index = 0;
1780 bool opt_is_array = false;
1636 struct team_option_inst *opt_inst; 1781 struct team_option_inst *opt_inst;
1637 char *opt_name; 1782 char *opt_name;
1638 bool opt_found = false; 1783 bool opt_found = false;
@@ -1674,23 +1819,33 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1674 } 1819 }
1675 1820
1676 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]); 1821 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
1677 attr_port_ifindex = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX]; 1822 attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
1678 if (attr_port_ifindex) 1823 if (attr)
1679 opt_port_ifindex = nla_get_u32(attr_port_ifindex); 1824 opt_port_ifindex = nla_get_u32(attr);
1825
1826 attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
1827 if (attr) {
1828 opt_is_array = true;
1829 opt_array_index = nla_get_u32(attr);
1830 }
1680 1831
1681 list_for_each_entry(opt_inst, &team->option_inst_list, list) { 1832 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
1682 struct team_option *option = opt_inst->option; 1833 struct team_option *option = opt_inst->option;
1683 struct team_gsetter_ctx ctx; 1834 struct team_gsetter_ctx ctx;
1835 struct team_option_inst_info *opt_inst_info;
1684 int tmp_ifindex; 1836 int tmp_ifindex;
1685 1837
1686 tmp_ifindex = opt_inst->port ? 1838 opt_inst_info = &opt_inst->info;
1687 opt_inst->port->dev->ifindex : 0; 1839 tmp_ifindex = opt_inst_info->port ?
1840 opt_inst_info->port->dev->ifindex : 0;
1688 if (option->type != opt_type || 1841 if (option->type != opt_type ||
1689 strcmp(option->name, opt_name) || 1842 strcmp(option->name, opt_name) ||
1690 tmp_ifindex != opt_port_ifindex) 1843 tmp_ifindex != opt_port_ifindex ||
1844 (option->array_size && !opt_is_array) ||
1845 opt_inst_info->array_index != opt_array_index)
1691 continue; 1846 continue;
1692 opt_found = true; 1847 opt_found = true;
1693 ctx.port = opt_inst->port; 1848 ctx.info = opt_inst_info;
1694 switch (opt_type) { 1849 switch (opt_type) {
1695 case TEAM_OPTION_TYPE_U32: 1850 case TEAM_OPTION_TYPE_U32:
1696 ctx.data.u32_val = nla_get_u32(attr_data); 1851 ctx.data.u32_val = nla_get_u32(attr_data);
@@ -1715,6 +1870,8 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1715 err = team_option_set(team, opt_inst, &ctx); 1870 err = team_option_set(team, opt_inst, &ctx);
1716 if (err) 1871 if (err)
1717 goto team_put; 1872 goto team_put;
1873 opt_inst->changed = true;
1874 list_add(&opt_inst->tmp_list, &opt_inst_list);
1718 } 1875 }
1719 if (!opt_found) { 1876 if (!opt_found) {
1720 err = -ENOENT; 1877 err = -ENOENT;
@@ -1722,6 +1879,8 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1722 } 1879 }
1723 } 1880 }
1724 1881
1882 err = team_nl_send_event_options_get(team, &opt_inst_list);
1883
1725team_put: 1884team_put:
1726 team_nl_team_put(team); 1885 team_nl_team_put(team);
1727 1886
@@ -1746,7 +1905,7 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
1746 goto nla_put_failure; 1905 goto nla_put_failure;
1747 port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT); 1906 port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
1748 if (!port_list) 1907 if (!port_list)
1749 return -EMSGSIZE; 1908 goto nla_put_failure;
1750 1909
1751 list_for_each_entry(port, &team->port_list, list) { 1910 list_for_each_entry(port, &team->port_list, list) {
1752 struct nlattr *port_item; 1911 struct nlattr *port_item;
@@ -1838,27 +1997,18 @@ static struct genl_multicast_group team_change_event_mcgrp = {
1838 .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, 1997 .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
1839}; 1998};
1840 1999
1841static int team_nl_send_event_options_get(struct team *team) 2000static int team_nl_send_multicast(struct sk_buff *skb,
2001 struct team *team, u32 pid)
1842{ 2002{
1843 struct sk_buff *skb; 2003 return genlmsg_multicast_netns(dev_net(team->dev), skb, 0,
1844 int err; 2004 team_change_event_mcgrp.id, GFP_KERNEL);
1845 struct net *net = dev_net(team->dev); 2005}
1846
1847 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1848 if (!skb)
1849 return -ENOMEM;
1850
1851 err = team_nl_fill_options_get(skb, 0, 0, 0, team, false);
1852 if (err < 0)
1853 goto err_fill;
1854
1855 err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
1856 GFP_KERNEL);
1857 return err;
1858 2006
1859err_fill: 2007static int team_nl_send_event_options_get(struct team *team,
1860 nlmsg_free(skb); 2008 struct list_head *sel_opt_inst_list)
1861 return err; 2009{
2010 return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
2011 sel_opt_inst_list);
1862} 2012}
1863 2013
1864static int team_nl_send_event_port_list_get(struct team *team) 2014static int team_nl_send_event_port_list_get(struct team *team)
@@ -1867,7 +2017,7 @@ static int team_nl_send_event_port_list_get(struct team *team)
1867 int err; 2017 int err;
1868 struct net *net = dev_net(team->dev); 2018 struct net *net = dev_net(team->dev);
1869 2019
1870 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2020 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1871 if (!skb) 2021 if (!skb)
1872 return -ENOMEM; 2022 return -ENOMEM;
1873 2023
@@ -1918,10 +2068,17 @@ static void team_nl_fini(void)
1918static void __team_options_change_check(struct team *team) 2068static void __team_options_change_check(struct team *team)
1919{ 2069{
1920 int err; 2070 int err;
2071 struct team_option_inst *opt_inst;
2072 LIST_HEAD(sel_opt_inst_list);
1921 2073
1922 err = team_nl_send_event_options_get(team); 2074 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2075 if (opt_inst->changed)
2076 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2077 }
2078 err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
1923 if (err) 2079 if (err)
1924 netdev_warn(team->dev, "Failed to send options change via netlink\n"); 2080 netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2081 err);
1925} 2082}
1926 2083
1927/* rtnl lock is held */ 2084/* rtnl lock is held */
@@ -1965,6 +2122,7 @@ static void team_port_change_check(struct team_port *port, bool linkup)
1965 mutex_unlock(&team->lock); 2122 mutex_unlock(&team->lock);
1966} 2123}
1967 2124
2125
1968/************************************ 2126/************************************
1969 * Net device notifier event handler 2127 * Net device notifier event handler
1970 ************************************/ 2128 ************************************/
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
index fd6bd03aaa8..253b8a5f342 100644
--- a/drivers/net/team/team_mode_activebackup.c
+++ b/drivers/net/team/team_mode_activebackup.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * net/drivers/team/team_mode_activebackup.c - Active-backup mode for team 2 * drivers/net/team/team_mode_activebackup.c - Active-backup mode for team
3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> 3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -40,7 +40,7 @@ static bool ab_transmit(struct team *team, struct sk_buff *skb)
40{ 40{
41 struct team_port *active_port; 41 struct team_port *active_port;
42 42
43 active_port = rcu_dereference(ab_priv(team)->active_port); 43 active_port = rcu_dereference_bh(ab_priv(team)->active_port);
44 if (unlikely(!active_port)) 44 if (unlikely(!active_port))
45 goto drop; 45 goto drop;
46 skb->dev = active_port->dev; 46 skb->dev = active_port->dev;
@@ -61,8 +61,12 @@ static void ab_port_leave(struct team *team, struct team_port *port)
61 61
62static int ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx) 62static int ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx)
63{ 63{
64 if (ab_priv(team)->active_port) 64 struct team_port *active_port;
65 ctx->data.u32_val = ab_priv(team)->active_port->dev->ifindex; 65
66 active_port = rcu_dereference_protected(ab_priv(team)->active_port,
67 lockdep_is_held(&team->lock));
68 if (active_port)
69 ctx->data.u32_val = active_port->dev->ifindex;
66 else 70 else
67 ctx->data.u32_val = 0; 71 ctx->data.u32_val = 0;
68 return 0; 72 return 0;
@@ -108,7 +112,7 @@ static const struct team_mode_ops ab_mode_ops = {
108 .port_leave = ab_port_leave, 112 .port_leave = ab_port_leave,
109}; 113};
110 114
111static struct team_mode ab_mode = { 115static const struct team_mode ab_mode = {
112 .kind = "activebackup", 116 .kind = "activebackup",
113 .owner = THIS_MODULE, 117 .owner = THIS_MODULE,
114 .priv_size = sizeof(struct ab_priv), 118 .priv_size = sizeof(struct ab_priv),
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index 86e8183c8e3..51a4b199c75 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -17,34 +17,210 @@
17#include <linux/filter.h> 17#include <linux/filter.h>
18#include <linux/if_team.h> 18#include <linux/if_team.h>
19 19
20struct lb_priv;
21
22typedef struct team_port *lb_select_tx_port_func_t(struct team *,
23 struct lb_priv *,
24 struct sk_buff *,
25 unsigned char);
26
27#define LB_TX_HASHTABLE_SIZE 256 /* hash is a char */
28
29struct lb_stats {
30 u64 tx_bytes;
31};
32
33struct lb_pcpu_stats {
34 struct lb_stats hash_stats[LB_TX_HASHTABLE_SIZE];
35 struct u64_stats_sync syncp;
36};
37
38struct lb_stats_info {
39 struct lb_stats stats;
40 struct lb_stats last_stats;
41 struct team_option_inst_info *opt_inst_info;
42};
43
44struct lb_port_mapping {
45 struct team_port __rcu *port;
46 struct team_option_inst_info *opt_inst_info;
47};
48
49struct lb_priv_ex {
50 struct team *team;
51 struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE];
52 struct sock_fprog *orig_fprog;
53 struct {
54 unsigned int refresh_interval; /* in tenths of second */
55 struct delayed_work refresh_dw;
56 struct lb_stats_info info[LB_TX_HASHTABLE_SIZE];
57 } stats;
58};
59
20struct lb_priv { 60struct lb_priv {
21 struct sk_filter __rcu *fp; 61 struct sk_filter __rcu *fp;
22 struct sock_fprog *orig_fprog; 62 lb_select_tx_port_func_t __rcu *select_tx_port_func;
63 struct lb_pcpu_stats __percpu *pcpu_stats;
64 struct lb_priv_ex *ex; /* priv extension */
23}; 65};
24 66
25static struct lb_priv *lb_priv(struct team *team) 67static struct lb_priv *get_lb_priv(struct team *team)
26{ 68{
27 return (struct lb_priv *) &team->mode_priv; 69 return (struct lb_priv *) &team->mode_priv;
28} 70}
29 71
30static bool lb_transmit(struct team *team, struct sk_buff *skb) 72struct lb_port_priv {
73 struct lb_stats __percpu *pcpu_stats;
74 struct lb_stats_info stats_info;
75};
76
77static struct lb_port_priv *get_lb_port_priv(struct team_port *port)
78{
79 return (struct lb_port_priv *) &port->mode_priv;
80}
81
82#define LB_HTPM_PORT_BY_HASH(lp_priv, hash) \
83 (lb_priv)->ex->tx_hash_to_port_mapping[hash].port
84
85#define LB_HTPM_OPT_INST_INFO_BY_HASH(lp_priv, hash) \
86 (lb_priv)->ex->tx_hash_to_port_mapping[hash].opt_inst_info
87
88static void lb_tx_hash_to_port_mapping_null_port(struct team *team,
89 struct team_port *port)
90{
91 struct lb_priv *lb_priv = get_lb_priv(team);
92 bool changed = false;
93 int i;
94
95 for (i = 0; i < LB_TX_HASHTABLE_SIZE; i++) {
96 struct lb_port_mapping *pm;
97
98 pm = &lb_priv->ex->tx_hash_to_port_mapping[i];
99 if (rcu_access_pointer(pm->port) == port) {
100 RCU_INIT_POINTER(pm->port, NULL);
101 team_option_inst_set_change(pm->opt_inst_info);
102 changed = true;
103 }
104 }
105 if (changed)
106 team_options_change_check(team);
107}
108
109/* Basic tx selection based solely by hash */
110static struct team_port *lb_hash_select_tx_port(struct team *team,
111 struct lb_priv *lb_priv,
112 struct sk_buff *skb,
113 unsigned char hash)
31{ 114{
32 struct sk_filter *fp;
33 struct team_port *port;
34 unsigned int hash;
35 int port_index; 115 int port_index;
36 116
37 fp = rcu_dereference(lb_priv(team)->fp);
38 if (unlikely(!fp))
39 goto drop;
40 hash = SK_RUN_FILTER(fp, skb);
41 port_index = hash % team->en_port_count; 117 port_index = hash % team->en_port_count;
42 port = team_get_port_by_index_rcu(team, port_index); 118 return team_get_port_by_index_rcu(team, port_index);
119}
120
121/* Hash to port mapping select tx port */
122static struct team_port *lb_htpm_select_tx_port(struct team *team,
123 struct lb_priv *lb_priv,
124 struct sk_buff *skb,
125 unsigned char hash)
126{
127 return rcu_dereference_bh(LB_HTPM_PORT_BY_HASH(lb_priv, hash));
128}
129
130struct lb_select_tx_port {
131 char *name;
132 lb_select_tx_port_func_t *func;
133};
134
135static const struct lb_select_tx_port lb_select_tx_port_list[] = {
136 {
137 .name = "hash",
138 .func = lb_hash_select_tx_port,
139 },
140 {
141 .name = "hash_to_port_mapping",
142 .func = lb_htpm_select_tx_port,
143 },
144};
145#define LB_SELECT_TX_PORT_LIST_COUNT ARRAY_SIZE(lb_select_tx_port_list)
146
147static char *lb_select_tx_port_get_name(lb_select_tx_port_func_t *func)
148{
149 int i;
150
151 for (i = 0; i < LB_SELECT_TX_PORT_LIST_COUNT; i++) {
152 const struct lb_select_tx_port *item;
153
154 item = &lb_select_tx_port_list[i];
155 if (item->func == func)
156 return item->name;
157 }
158 return NULL;
159}
160
161static lb_select_tx_port_func_t *lb_select_tx_port_get_func(const char *name)
162{
163 int i;
164
165 for (i = 0; i < LB_SELECT_TX_PORT_LIST_COUNT; i++) {
166 const struct lb_select_tx_port *item;
167
168 item = &lb_select_tx_port_list[i];
169 if (!strcmp(item->name, name))
170 return item->func;
171 }
172 return NULL;
173}
174
175static unsigned int lb_get_skb_hash(struct lb_priv *lb_priv,
176 struct sk_buff *skb)
177{
178 struct sk_filter *fp;
179 uint32_t lhash;
180 unsigned char *c;
181
182 fp = rcu_dereference_bh(lb_priv->fp);
183 if (unlikely(!fp))
184 return 0;
185 lhash = SK_RUN_FILTER(fp, skb);
186 c = (char *) &lhash;
187 return c[0] ^ c[1] ^ c[2] ^ c[3];
188}
189
190static void lb_update_tx_stats(unsigned int tx_bytes, struct lb_priv *lb_priv,
191 struct lb_port_priv *lb_port_priv,
192 unsigned char hash)
193{
194 struct lb_pcpu_stats *pcpu_stats;
195 struct lb_stats *port_stats;
196 struct lb_stats *hash_stats;
197
198 pcpu_stats = this_cpu_ptr(lb_priv->pcpu_stats);
199 port_stats = this_cpu_ptr(lb_port_priv->pcpu_stats);
200 hash_stats = &pcpu_stats->hash_stats[hash];
201 u64_stats_update_begin(&pcpu_stats->syncp);
202 port_stats->tx_bytes += tx_bytes;
203 hash_stats->tx_bytes += tx_bytes;
204 u64_stats_update_end(&pcpu_stats->syncp);
205}
206
207static bool lb_transmit(struct team *team, struct sk_buff *skb)
208{
209 struct lb_priv *lb_priv = get_lb_priv(team);
210 lb_select_tx_port_func_t *select_tx_port_func;
211 struct team_port *port;
212 unsigned char hash;
213 unsigned int tx_bytes = skb->len;
214
215 hash = lb_get_skb_hash(lb_priv, skb);
216 select_tx_port_func = rcu_dereference_bh(lb_priv->select_tx_port_func);
217 port = select_tx_port_func(team, lb_priv, skb, hash);
43 if (unlikely(!port)) 218 if (unlikely(!port))
44 goto drop; 219 goto drop;
45 skb->dev = port->dev; 220 skb->dev = port->dev;
46 if (dev_queue_xmit(skb)) 221 if (dev_queue_xmit(skb))
47 return false; 222 return false;
223 lb_update_tx_stats(tx_bytes, lb_priv, get_lb_port_priv(port), hash);
48 return true; 224 return true;
49 225
50drop: 226drop:
@@ -54,14 +230,16 @@ drop:
54 230
55static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx) 231static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
56{ 232{
57 if (!lb_priv(team)->orig_fprog) { 233 struct lb_priv *lb_priv = get_lb_priv(team);
234
235 if (!lb_priv->ex->orig_fprog) {
58 ctx->data.bin_val.len = 0; 236 ctx->data.bin_val.len = 0;
59 ctx->data.bin_val.ptr = NULL; 237 ctx->data.bin_val.ptr = NULL;
60 return 0; 238 return 0;
61 } 239 }
62 ctx->data.bin_val.len = lb_priv(team)->orig_fprog->len * 240 ctx->data.bin_val.len = lb_priv->ex->orig_fprog->len *
63 sizeof(struct sock_filter); 241 sizeof(struct sock_filter);
64 ctx->data.bin_val.ptr = lb_priv(team)->orig_fprog->filter; 242 ctx->data.bin_val.ptr = lb_priv->ex->orig_fprog->filter;
65 return 0; 243 return 0;
66} 244}
67 245
@@ -94,7 +272,9 @@ static void __fprog_destroy(struct sock_fprog *fprog)
94 272
95static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx) 273static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
96{ 274{
275 struct lb_priv *lb_priv = get_lb_priv(team);
97 struct sk_filter *fp = NULL; 276 struct sk_filter *fp = NULL;
277 struct sk_filter *orig_fp;
98 struct sock_fprog *fprog = NULL; 278 struct sock_fprog *fprog = NULL;
99 int err; 279 int err;
100 280
@@ -110,14 +290,238 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
110 } 290 }
111 } 291 }
112 292
113 if (lb_priv(team)->orig_fprog) { 293 if (lb_priv->ex->orig_fprog) {
114 /* Clear old filter data */ 294 /* Clear old filter data */
115 __fprog_destroy(lb_priv(team)->orig_fprog); 295 __fprog_destroy(lb_priv->ex->orig_fprog);
116 sk_unattached_filter_destroy(lb_priv(team)->fp); 296 orig_fp = rcu_dereference_protected(lb_priv->fp,
297 lockdep_is_held(&team->lock));
298 sk_unattached_filter_destroy(orig_fp);
117 } 299 }
118 300
119 rcu_assign_pointer(lb_priv(team)->fp, fp); 301 rcu_assign_pointer(lb_priv->fp, fp);
120 lb_priv(team)->orig_fprog = fprog; 302 lb_priv->ex->orig_fprog = fprog;
303 return 0;
304}
305
306static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx)
307{
308 struct lb_priv *lb_priv = get_lb_priv(team);
309 lb_select_tx_port_func_t *func;
310 char *name;
311
312 func = rcu_dereference_protected(lb_priv->select_tx_port_func,
313 lockdep_is_held(&team->lock));
314 name = lb_select_tx_port_get_name(func);
315 BUG_ON(!name);
316 ctx->data.str_val = name;
317 return 0;
318}
319
320static int lb_tx_method_set(struct team *team, struct team_gsetter_ctx *ctx)
321{
322 struct lb_priv *lb_priv = get_lb_priv(team);
323 lb_select_tx_port_func_t *func;
324
325 func = lb_select_tx_port_get_func(ctx->data.str_val);
326 if (!func)
327 return -EINVAL;
328 rcu_assign_pointer(lb_priv->select_tx_port_func, func);
329 return 0;
330}
331
332static int lb_tx_hash_to_port_mapping_init(struct team *team,
333 struct team_option_inst_info *info)
334{
335 struct lb_priv *lb_priv = get_lb_priv(team);
336 unsigned char hash = info->array_index;
337
338 LB_HTPM_OPT_INST_INFO_BY_HASH(lb_priv, hash) = info;
339 return 0;
340}
341
342static int lb_tx_hash_to_port_mapping_get(struct team *team,
343 struct team_gsetter_ctx *ctx)
344{
345 struct lb_priv *lb_priv = get_lb_priv(team);
346 struct team_port *port;
347 unsigned char hash = ctx->info->array_index;
348
349 port = LB_HTPM_PORT_BY_HASH(lb_priv, hash);
350 ctx->data.u32_val = port ? port->dev->ifindex : 0;
351 return 0;
352}
353
354static int lb_tx_hash_to_port_mapping_set(struct team *team,
355 struct team_gsetter_ctx *ctx)
356{
357 struct lb_priv *lb_priv = get_lb_priv(team);
358 struct team_port *port;
359 unsigned char hash = ctx->info->array_index;
360
361 list_for_each_entry(port, &team->port_list, list) {
362 if (ctx->data.u32_val == port->dev->ifindex &&
363 team_port_enabled(port)) {
364 rcu_assign_pointer(LB_HTPM_PORT_BY_HASH(lb_priv, hash),
365 port);
366 return 0;
367 }
368 }
369 return -ENODEV;
370}
371
372static int lb_hash_stats_init(struct team *team,
373 struct team_option_inst_info *info)
374{
375 struct lb_priv *lb_priv = get_lb_priv(team);
376 unsigned char hash = info->array_index;
377
378 lb_priv->ex->stats.info[hash].opt_inst_info = info;
379 return 0;
380}
381
382static int lb_hash_stats_get(struct team *team, struct team_gsetter_ctx *ctx)
383{
384 struct lb_priv *lb_priv = get_lb_priv(team);
385 unsigned char hash = ctx->info->array_index;
386
387 ctx->data.bin_val.ptr = &lb_priv->ex->stats.info[hash].stats;
388 ctx->data.bin_val.len = sizeof(struct lb_stats);
389 return 0;
390}
391
392static int lb_port_stats_init(struct team *team,
393 struct team_option_inst_info *info)
394{
395 struct team_port *port = info->port;
396 struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
397
398 lb_port_priv->stats_info.opt_inst_info = info;
399 return 0;
400}
401
402static int lb_port_stats_get(struct team *team, struct team_gsetter_ctx *ctx)
403{
404 struct team_port *port = ctx->info->port;
405 struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
406
407 ctx->data.bin_val.ptr = &lb_port_priv->stats_info.stats;
408 ctx->data.bin_val.len = sizeof(struct lb_stats);
409 return 0;
410}
411
412static void __lb_stats_info_refresh_prepare(struct lb_stats_info *s_info)
413{
414 memcpy(&s_info->last_stats, &s_info->stats, sizeof(struct lb_stats));
415 memset(&s_info->stats, 0, sizeof(struct lb_stats));
416}
417
418static bool __lb_stats_info_refresh_check(struct lb_stats_info *s_info,
419 struct team *team)
420{
421 if (memcmp(&s_info->last_stats, &s_info->stats,
422 sizeof(struct lb_stats))) {
423 team_option_inst_set_change(s_info->opt_inst_info);
424 return true;
425 }
426 return false;
427}
428
429static void __lb_one_cpu_stats_add(struct lb_stats *acc_stats,
430 struct lb_stats *cpu_stats,
431 struct u64_stats_sync *syncp)
432{
433 unsigned int start;
434 struct lb_stats tmp;
435
436 do {
437 start = u64_stats_fetch_begin_bh(syncp);
438 tmp.tx_bytes = cpu_stats->tx_bytes;
439 } while (u64_stats_fetch_retry_bh(syncp, start));
440 acc_stats->tx_bytes += tmp.tx_bytes;
441}
442
443static void lb_stats_refresh(struct work_struct *work)
444{
445 struct team *team;
446 struct lb_priv *lb_priv;
447 struct lb_priv_ex *lb_priv_ex;
448 struct lb_pcpu_stats *pcpu_stats;
449 struct lb_stats *stats;
450 struct lb_stats_info *s_info;
451 struct team_port *port;
452 bool changed = false;
453 int i;
454 int j;
455
456 lb_priv_ex = container_of(work, struct lb_priv_ex,
457 stats.refresh_dw.work);
458
459 team = lb_priv_ex->team;
460 lb_priv = get_lb_priv(team);
461
462 if (!mutex_trylock(&team->lock)) {
463 schedule_delayed_work(&lb_priv_ex->stats.refresh_dw, 0);
464 return;
465 }
466
467 for (j = 0; j < LB_TX_HASHTABLE_SIZE; j++) {
468 s_info = &lb_priv->ex->stats.info[j];
469 __lb_stats_info_refresh_prepare(s_info);
470 for_each_possible_cpu(i) {
471 pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i);
472 stats = &pcpu_stats->hash_stats[j];
473 __lb_one_cpu_stats_add(&s_info->stats, stats,
474 &pcpu_stats->syncp);
475 }
476 changed |= __lb_stats_info_refresh_check(s_info, team);
477 }
478
479 list_for_each_entry(port, &team->port_list, list) {
480 struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
481
482 s_info = &lb_port_priv->stats_info;
483 __lb_stats_info_refresh_prepare(s_info);
484 for_each_possible_cpu(i) {
485 pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i);
486 stats = per_cpu_ptr(lb_port_priv->pcpu_stats, i);
487 __lb_one_cpu_stats_add(&s_info->stats, stats,
488 &pcpu_stats->syncp);
489 }
490 changed |= __lb_stats_info_refresh_check(s_info, team);
491 }
492
493 if (changed)
494 team_options_change_check(team);
495
496 schedule_delayed_work(&lb_priv_ex->stats.refresh_dw,
497 (lb_priv_ex->stats.refresh_interval * HZ) / 10);
498
499 mutex_unlock(&team->lock);
500}
501
502static int lb_stats_refresh_interval_get(struct team *team,
503 struct team_gsetter_ctx *ctx)
504{
505 struct lb_priv *lb_priv = get_lb_priv(team);
506
507 ctx->data.u32_val = lb_priv->ex->stats.refresh_interval;
508 return 0;
509}
510
511static int lb_stats_refresh_interval_set(struct team *team,
512 struct team_gsetter_ctx *ctx)
513{
514 struct lb_priv *lb_priv = get_lb_priv(team);
515 unsigned int interval;
516
517 interval = ctx->data.u32_val;
518 if (lb_priv->ex->stats.refresh_interval == interval)
519 return 0;
520 lb_priv->ex->stats.refresh_interval = interval;
521 if (interval)
522 schedule_delayed_work(&lb_priv->ex->stats.refresh_dw, 0);
523 else
524 cancel_delayed_work(&lb_priv->ex->stats.refresh_dw);
121 return 0; 525 return 0;
122} 526}
123 527
@@ -128,30 +532,125 @@ static const struct team_option lb_options[] = {
128 .getter = lb_bpf_func_get, 532 .getter = lb_bpf_func_get,
129 .setter = lb_bpf_func_set, 533 .setter = lb_bpf_func_set,
130 }, 534 },
535 {
536 .name = "lb_tx_method",
537 .type = TEAM_OPTION_TYPE_STRING,
538 .getter = lb_tx_method_get,
539 .setter = lb_tx_method_set,
540 },
541 {
542 .name = "lb_tx_hash_to_port_mapping",
543 .array_size = LB_TX_HASHTABLE_SIZE,
544 .type = TEAM_OPTION_TYPE_U32,
545 .init = lb_tx_hash_to_port_mapping_init,
546 .getter = lb_tx_hash_to_port_mapping_get,
547 .setter = lb_tx_hash_to_port_mapping_set,
548 },
549 {
550 .name = "lb_hash_stats",
551 .array_size = LB_TX_HASHTABLE_SIZE,
552 .type = TEAM_OPTION_TYPE_BINARY,
553 .init = lb_hash_stats_init,
554 .getter = lb_hash_stats_get,
555 },
556 {
557 .name = "lb_port_stats",
558 .per_port = true,
559 .type = TEAM_OPTION_TYPE_BINARY,
560 .init = lb_port_stats_init,
561 .getter = lb_port_stats_get,
562 },
563 {
564 .name = "lb_stats_refresh_interval",
565 .type = TEAM_OPTION_TYPE_U32,
566 .getter = lb_stats_refresh_interval_get,
567 .setter = lb_stats_refresh_interval_set,
568 },
131}; 569};
132 570
133static int lb_init(struct team *team) 571static int lb_init(struct team *team)
134{ 572{
135 return team_options_register(team, lb_options, 573 struct lb_priv *lb_priv = get_lb_priv(team);
136 ARRAY_SIZE(lb_options)); 574 lb_select_tx_port_func_t *func;
575 int err;
576
577 /* set default tx port selector */
578 func = lb_select_tx_port_get_func("hash");
579 BUG_ON(!func);
580 rcu_assign_pointer(lb_priv->select_tx_port_func, func);
581
582 lb_priv->ex = kzalloc(sizeof(*lb_priv->ex), GFP_KERNEL);
583 if (!lb_priv->ex)
584 return -ENOMEM;
585 lb_priv->ex->team = team;
586
587 lb_priv->pcpu_stats = alloc_percpu(struct lb_pcpu_stats);
588 if (!lb_priv->pcpu_stats) {
589 err = -ENOMEM;
590 goto err_alloc_pcpu_stats;
591 }
592
593 INIT_DELAYED_WORK(&lb_priv->ex->stats.refresh_dw, lb_stats_refresh);
594
595 err = team_options_register(team, lb_options, ARRAY_SIZE(lb_options));
596 if (err)
597 goto err_options_register;
598 return 0;
599
600err_options_register:
601 free_percpu(lb_priv->pcpu_stats);
602err_alloc_pcpu_stats:
603 kfree(lb_priv->ex);
604 return err;
137} 605}
138 606
139static void lb_exit(struct team *team) 607static void lb_exit(struct team *team)
140{ 608{
609 struct lb_priv *lb_priv = get_lb_priv(team);
610
141 team_options_unregister(team, lb_options, 611 team_options_unregister(team, lb_options,
142 ARRAY_SIZE(lb_options)); 612 ARRAY_SIZE(lb_options));
613 cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw);
614 free_percpu(lb_priv->pcpu_stats);
615 kfree(lb_priv->ex);
616}
617
618static int lb_port_enter(struct team *team, struct team_port *port)
619{
620 struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
621
622 lb_port_priv->pcpu_stats = alloc_percpu(struct lb_stats);
623 if (!lb_port_priv->pcpu_stats)
624 return -ENOMEM;
625 return 0;
626}
627
628static void lb_port_leave(struct team *team, struct team_port *port)
629{
630 struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
631
632 free_percpu(lb_port_priv->pcpu_stats);
633}
634
635static void lb_port_disabled(struct team *team, struct team_port *port)
636{
637 lb_tx_hash_to_port_mapping_null_port(team, port);
143} 638}
144 639
145static const struct team_mode_ops lb_mode_ops = { 640static const struct team_mode_ops lb_mode_ops = {
146 .init = lb_init, 641 .init = lb_init,
147 .exit = lb_exit, 642 .exit = lb_exit,
643 .port_enter = lb_port_enter,
644 .port_leave = lb_port_leave,
645 .port_disabled = lb_port_disabled,
148 .transmit = lb_transmit, 646 .transmit = lb_transmit,
149}; 647};
150 648
151static struct team_mode lb_mode = { 649static const struct team_mode lb_mode = {
152 .kind = "loadbalance", 650 .kind = "loadbalance",
153 .owner = THIS_MODULE, 651 .owner = THIS_MODULE,
154 .priv_size = sizeof(struct lb_priv), 652 .priv_size = sizeof(struct lb_priv),
653 .port_priv_size = sizeof(struct lb_port_priv),
155 .ops = &lb_mode_ops, 654 .ops = &lb_mode_ops,
156}; 655};
157 656
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index 6abfbdc96be..52dd0ec9cd1 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * net/drivers/team/team_mode_roundrobin.c - Round-robin mode for team 2 * drivers/net/team/team_mode_roundrobin.c - Round-robin mode for team
3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> 3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -81,7 +81,7 @@ static const struct team_mode_ops rr_mode_ops = {
81 .port_change_mac = rr_port_change_mac, 81 .port_change_mac = rr_port_change_mac,
82}; 82};
83 83
84static struct team_mode rr_mode = { 84static const struct team_mode rr_mode = {
85 .kind = "roundrobin", 85 .kind = "roundrobin",
86 .owner = THIS_MODULE, 86 .owner = THIS_MODULE,
87 .priv_size = sizeof(struct rr_priv), 87 .priv_size = sizeof(struct rr_priv),
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 3ae80eccd0e..6564c32d3af 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -358,14 +358,30 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
358 358
359 padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4; 359 padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4;
360 360
361 if ((!skb_cloned(skb)) && 361 /* We need to push 4 bytes in front of frame (packet_len)
362 ((headroom + tailroom) >= (4 + padlen))) { 362 * and maybe add 4 bytes after the end (if padlen is 4)
363 if ((headroom < 4) || (tailroom < padlen)) { 363 *
364 * Avoid skb_copy_expand() expensive call, using following rules :
365 * - We are allowed to push 4 bytes in headroom if skb_header_cloned()
366 * is false (and if we have 4 bytes of headroom)
367 * - We are allowed to put 4 bytes at tail if skb_cloned()
368 * is false (and if we have 4 bytes of tailroom)
369 *
370 * TCP packets for example are cloned, but skb_header_release()
371 * was called in tcp stack, allowing us to use headroom for our needs.
372 */
373 if (!skb_header_cloned(skb) &&
374 !(padlen && skb_cloned(skb)) &&
375 headroom + tailroom >= 4 + padlen) {
376 /* following should not happen, but better be safe */
377 if (headroom < 4 ||
378 tailroom < padlen) {
364 skb->data = memmove(skb->head + 4, skb->data, skb->len); 379 skb->data = memmove(skb->head + 4, skb->data, skb->len);
365 skb_set_tail_pointer(skb, skb->len); 380 skb_set_tail_pointer(skb, skb->len);
366 } 381 }
367 } else { 382 } else {
368 struct sk_buff *skb2; 383 struct sk_buff *skb2;
384
369 skb2 = skb_copy_expand(skb, 4, padlen, flags); 385 skb2 = skb_copy_expand(skb, 4, padlen, flags);
370 dev_kfree_skb_any(skb); 386 dev_kfree_skb_any(skb);
371 skb = skb2; 387 skb = skb2;
@@ -373,8 +389,8 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
373 return NULL; 389 return NULL;
374 } 390 }
375 391
392 packet_len = ((skb->len ^ 0x0000ffff) << 16) + skb->len;
376 skb_push(skb, 4); 393 skb_push(skb, 4);
377 packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4);
378 cpu_to_le32s(&packet_len); 394 cpu_to_le32s(&packet_len);
379 skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len)); 395 skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
380 396
@@ -880,6 +896,8 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
880 896
881 dev->net->netdev_ops = &ax88172_netdev_ops; 897 dev->net->netdev_ops = &ax88172_netdev_ops;
882 dev->net->ethtool_ops = &ax88172_ethtool_ops; 898 dev->net->ethtool_ops = &ax88172_ethtool_ops;
899 dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */
900 dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */
883 901
884 asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET); 902 asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
885 asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, 903 asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
@@ -1075,6 +1093,8 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
1075 1093
1076 dev->net->netdev_ops = &ax88772_netdev_ops; 1094 dev->net->netdev_ops = &ax88772_netdev_ops;
1077 dev->net->ethtool_ops = &ax88772_ethtool_ops; 1095 dev->net->ethtool_ops = &ax88772_ethtool_ops;
1096 dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */
1097 dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */
1078 1098
1079 embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0); 1099 embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
1080 1100
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index d848d4dd575..187c144c5e5 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -394,7 +394,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
394 SET_NETDEV_DEV(dev, &intf->dev); 394 SET_NETDEV_DEV(dev, &intf->dev);
395 395
396 pnd->dev = dev; 396 pnd->dev = dev;
397 pnd->usb = usb_get_dev(usbdev); 397 pnd->usb = usbdev;
398 pnd->intf = intf; 398 pnd->intf = intf;
399 pnd->data_intf = data_intf; 399 pnd->data_intf = data_intf;
400 spin_lock_init(&pnd->tx_lock); 400 spin_lock_init(&pnd->tx_lock);
@@ -440,7 +440,6 @@ out:
440static void usbpn_disconnect(struct usb_interface *intf) 440static void usbpn_disconnect(struct usb_interface *intf)
441{ 441{
442 struct usbpn_dev *pnd = usb_get_intfdata(intf); 442 struct usbpn_dev *pnd = usb_get_intfdata(intf);
443 struct usb_device *usb = pnd->usb;
444 443
445 if (pnd->disconnected) 444 if (pnd->disconnected)
446 return; 445 return;
@@ -449,7 +448,6 @@ static void usbpn_disconnect(struct usb_interface *intf)
449 usb_driver_release_interface(&usbpn_driver, 448 usb_driver_release_interface(&usbpn_driver,
450 (pnd->intf == intf) ? pnd->data_intf : pnd->intf); 449 (pnd->intf == intf) ? pnd->data_intf : pnd->intf);
451 unregister_netdev(pnd->dev); 450 unregister_netdev(pnd->dev);
452 usb_put_dev(usb);
453} 451}
454 452
455static struct usb_driver usbpn_driver = { 453static struct usb_driver usbpn_driver = {
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 7023220456c..a0b5807b30d 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1329,8 +1329,6 @@ static int pegasus_probe(struct usb_interface *intf,
1329 } 1329 }
1330 pegasus_count++; 1330 pegasus_count++;
1331 1331
1332 usb_get_dev(dev);
1333
1334 net = alloc_etherdev(sizeof(struct pegasus)); 1332 net = alloc_etherdev(sizeof(struct pegasus));
1335 if (!net) 1333 if (!net)
1336 goto out; 1334 goto out;
@@ -1407,7 +1405,6 @@ out2:
1407out1: 1405out1:
1408 free_netdev(net); 1406 free_netdev(net);
1409out: 1407out:
1410 usb_put_dev(dev);
1411 pegasus_dec_workqueue(); 1408 pegasus_dec_workqueue();
1412 return res; 1409 return res;
1413} 1410}
@@ -1425,7 +1422,6 @@ static void pegasus_disconnect(struct usb_interface *intf)
1425 pegasus->flags |= PEGASUS_UNPLUG; 1422 pegasus->flags |= PEGASUS_UNPLUG;
1426 cancel_delayed_work(&pegasus->carrier_check); 1423 cancel_delayed_work(&pegasus->carrier_check);
1427 unregister_netdev(pegasus->net); 1424 unregister_netdev(pegasus->net);
1428 usb_put_dev(interface_to_usbdev(intf));
1429 unlink_all_urbs(pegasus); 1425 unlink_all_urbs(pegasus);
1430 free_all_urbs(pegasus); 1426 free_all_urbs(pegasus);
1431 free_skb_pool(pegasus); 1427 free_skb_pool(pegasus);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index a051cedd64b..85c983d5252 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1,6 +1,10 @@
1/* 1/*
2 * Copyright (c) 2012 Bjørn Mork <bjorn@mork.no> 2 * Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
3 * 3 *
4 * The probing code is heavily inspired by cdc_ether, which is:
5 * Copyright (C) 2003-2005 by David Brownell
6 * Copyright (C) 2006 by Ole Andre Vadla Ravnas (ActiveSync)
7 *
4 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
6 * version 2 as published by the Free Software Foundation. 10 * version 2 as published by the Free Software Foundation.
@@ -15,11 +19,7 @@
15#include <linux/usb/usbnet.h> 19#include <linux/usb/usbnet.h>
16#include <linux/usb/cdc-wdm.h> 20#include <linux/usb/cdc-wdm.h>
17 21
18/* The name of the CDC Device Management driver */ 22/* This driver supports wwan (3G/LTE/?) devices using a vendor
19#define DM_DRIVER "cdc_wdm"
20
21/*
22 * This driver supports wwan (3G/LTE/?) devices using a vendor
23 * specific management protocol called Qualcomm MSM Interface (QMI) - 23 * specific management protocol called Qualcomm MSM Interface (QMI) -
24 * in addition to the more common AT commands over serial interface 24 * in addition to the more common AT commands over serial interface
25 * management 25 * management
@@ -31,59 +31,117 @@
31 * management protocol is used in place of the standard CDC 31 * management protocol is used in place of the standard CDC
32 * notifications NOTIFY_NETWORK_CONNECTION and NOTIFY_SPEED_CHANGE 32 * notifications NOTIFY_NETWORK_CONNECTION and NOTIFY_SPEED_CHANGE
33 * 33 *
34 * Alternatively, control and data functions can be combined in a
35 * single USB interface.
36 *
34 * Handling a protocol like QMI is out of the scope for any driver. 37 * Handling a protocol like QMI is out of the scope for any driver.
35 * It can be exported as a character device using the cdc-wdm driver, 38 * It is exported as a character device using the cdc-wdm driver as
36 * which will enable userspace applications ("modem managers") to 39 * a subdriver, enabling userspace applications ("modem managers") to
37 * handle it. This may be required to use the network interface 40 * handle it.
38 * provided by the driver.
39 * 41 *
40 * These devices may alternatively/additionally be configured using AT 42 * These devices may alternatively/additionally be configured using AT
41 * commands on any of the serial interfaces driven by the option driver 43 * commands on a serial interface
42 *
43 * This driver binds only to the data ("slave") interface to enable
44 * the cdc-wdm driver to bind to the control interface. It still
45 * parses the CDC functional descriptors on the control interface to
46 * a) verify that this is indeed a handled interface (CDC Union
47 * header lists it as slave)
48 * b) get MAC address and other ethernet config from the CDC Ethernet
49 * header
50 * c) enable user bind requests against the control interface, which
51 * is the common way to bind to CDC Ethernet Control Model type
52 * interfaces
53 * d) provide a hint to the user about which interface is the
54 * corresponding management interface
55 */ 44 */
56 45
46/* driver specific data */
47struct qmi_wwan_state {
48 struct usb_driver *subdriver;
49 atomic_t pmcount;
50 unsigned long unused;
51 struct usb_interface *control;
52 struct usb_interface *data;
53};
54
55/* using a counter to merge subdriver requests with our own into a combined state */
56static int qmi_wwan_manage_power(struct usbnet *dev, int on)
57{
58 struct qmi_wwan_state *info = (void *)&dev->data;
59 int rv = 0;
60
61 dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(&info->pmcount), on);
62
63 if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
64 /* need autopm_get/put here to ensure the usbcore sees the new value */
65 rv = usb_autopm_get_interface(dev->intf);
66 if (rv < 0)
67 goto err;
68 dev->intf->needs_remote_wakeup = on;
69 usb_autopm_put_interface(dev->intf);
70 }
71err:
72 return rv;
73}
74
75static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
76{
77 struct usbnet *dev = usb_get_intfdata(intf);
78
79 /* can be called while disconnecting */
80 if (!dev)
81 return 0;
82 return qmi_wwan_manage_power(dev, on);
83}
84
85/* collect all three endpoints and register subdriver */
86static int qmi_wwan_register_subdriver(struct usbnet *dev)
87{
88 int rv;
89 struct usb_driver *subdriver = NULL;
90 struct qmi_wwan_state *info = (void *)&dev->data;
91
92 /* collect bulk endpoints */
93 rv = usbnet_get_endpoints(dev, info->data);
94 if (rv < 0)
95 goto err;
96
97 /* update status endpoint if separate control interface */
98 if (info->control != info->data)
99 dev->status = &info->control->cur_altsetting->endpoint[0];
100
101 /* require interrupt endpoint for subdriver */
102 if (!dev->status) {
103 rv = -EINVAL;
104 goto err;
105 }
106
107 /* for subdriver power management */
108 atomic_set(&info->pmcount, 0);
109
110 /* register subdriver */
111 subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power);
112 if (IS_ERR(subdriver)) {
113 dev_err(&info->control->dev, "subdriver registration failed\n");
114 rv = PTR_ERR(subdriver);
115 goto err;
116 }
117
118 /* prevent usbnet from using status endpoint */
119 dev->status = NULL;
120
121 /* save subdriver struct for suspend/resume wrappers */
122 info->subdriver = subdriver;
123
124err:
125 return rv;
126}
127
57static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) 128static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
58{ 129{
59 int status = -1; 130 int status = -1;
60 struct usb_interface *control = NULL;
61 u8 *buf = intf->cur_altsetting->extra; 131 u8 *buf = intf->cur_altsetting->extra;
62 int len = intf->cur_altsetting->extralen; 132 int len = intf->cur_altsetting->extralen;
63 struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc; 133 struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
64 struct usb_cdc_union_desc *cdc_union = NULL; 134 struct usb_cdc_union_desc *cdc_union = NULL;
65 struct usb_cdc_ether_desc *cdc_ether = NULL; 135 struct usb_cdc_ether_desc *cdc_ether = NULL;
66 u32 required = 1 << USB_CDC_HEADER_TYPE | 1 << USB_CDC_UNION_TYPE;
67 u32 found = 0; 136 u32 found = 0;
68 atomic_t *pmcount = (void *)&dev->data[1]; 137 struct usb_driver *driver = driver_of(intf);
69 138 struct qmi_wwan_state *info = (void *)&dev->data;
70 atomic_set(pmcount, 0);
71 139
72 /* 140 BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
73 * assume a data interface has no additional descriptors and
74 * that the control and data interface are numbered
75 * consecutively - this holds for the Huawei device at least
76 */
77 if (len == 0 && desc->bInterfaceNumber > 0) {
78 control = usb_ifnum_to_if(dev->udev, desc->bInterfaceNumber - 1);
79 if (!control)
80 goto err;
81 141
82 buf = control->cur_altsetting->extra; 142 /* require a single interrupt status endpoint for subdriver */
83 len = control->cur_altsetting->extralen; 143 if (intf->cur_altsetting->desc.bNumEndpoints != 1)
84 dev_dbg(&intf->dev, "guessing \"control\" => %s, \"data\" => this\n", 144 goto err;
85 dev_name(&control->dev));
86 }
87 145
88 while (len > 3) { 146 while (len > 3) {
89 struct usb_descriptor_header *h = (void *)buf; 147 struct usb_descriptor_header *h = (void *)buf;
@@ -142,15 +200,23 @@ next_desc:
142 } 200 }
143 201
144 /* did we find all the required ones? */ 202 /* did we find all the required ones? */
145 if ((found & required) != required) { 203 if (!(found & (1 << USB_CDC_HEADER_TYPE)) ||
204 !(found & (1 << USB_CDC_UNION_TYPE))) {
146 dev_err(&intf->dev, "CDC functional descriptors missing\n"); 205 dev_err(&intf->dev, "CDC functional descriptors missing\n");
147 goto err; 206 goto err;
148 } 207 }
149 208
150 /* give the user a helpful hint if trying to bind to the wrong interface */ 209 /* verify CDC Union */
151 if (cdc_union && desc->bInterfaceNumber == cdc_union->bMasterInterface0) { 210 if (desc->bInterfaceNumber != cdc_union->bMasterInterface0) {
152 dev_err(&intf->dev, "leaving \"control\" interface for " DM_DRIVER " - try binding to %s instead!\n", 211 dev_err(&intf->dev, "bogus CDC Union: master=%u\n", cdc_union->bMasterInterface0);
153 dev_name(&usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0)->dev)); 212 goto err;
213 }
214
215 /* need to save these for unbind */
216 info->control = intf;
217 info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0);
218 if (!info->data) {
219 dev_err(&intf->dev, "bogus CDC Union: slave=%u\n", cdc_union->bSlaveInterface0);
154 goto err; 220 goto err;
155 } 221 }
156 222
@@ -160,63 +226,29 @@ next_desc:
160 usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress); 226 usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress);
161 } 227 }
162 228
163 /* success! point the user to the management interface */ 229 /* claim data interface and set it up */
164 if (control) 230 status = usb_driver_claim_interface(driver, info->data, dev);
165 dev_info(&intf->dev, "Use \"" DM_DRIVER "\" for QMI interface %s\n", 231 if (status < 0)
166 dev_name(&control->dev)); 232 goto err;
167
168 /* XXX: add a sysfs symlink somewhere to help management applications find it? */
169 233
170 /* collect bulk endpoints now that we know intf == "data" interface */ 234 status = qmi_wwan_register_subdriver(dev);
171 status = usbnet_get_endpoints(dev, intf); 235 if (status < 0) {
236 usb_set_intfdata(info->data, NULL);
237 usb_driver_release_interface(driver, info->data);
238 }
172 239
173err: 240err:
174 return status; 241 return status;
175} 242}
176 243
177/* using a counter to merge subdriver requests with our own into a combined state */
178static int qmi_wwan_manage_power(struct usbnet *dev, int on)
179{
180 atomic_t *pmcount = (void *)&dev->data[1];
181 int rv = 0;
182
183 dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(pmcount), on);
184
185 if ((on && atomic_add_return(1, pmcount) == 1) || (!on && atomic_dec_and_test(pmcount))) {
186 /* need autopm_get/put here to ensure the usbcore sees the new value */
187 rv = usb_autopm_get_interface(dev->intf);
188 if (rv < 0)
189 goto err;
190 dev->intf->needs_remote_wakeup = on;
191 usb_autopm_put_interface(dev->intf);
192 }
193err:
194 return rv;
195}
196
197static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
198{
199 struct usbnet *dev = usb_get_intfdata(intf);
200
201 /* can be called while disconnecting */
202 if (!dev)
203 return 0;
204 return qmi_wwan_manage_power(dev, on);
205}
206
207/* Some devices combine the "control" and "data" functions into a 244/* Some devices combine the "control" and "data" functions into a
208 * single interface with all three endpoints: interrupt + bulk in and 245 * single interface with all three endpoints: interrupt + bulk in and
209 * out 246 * out
210 * 247 */
211 * Setting up cdc-wdm as a subdriver owning the interrupt endpoint
212 * will let it provide userspace access to the encapsulated QMI
213 * protocol without interfering with the usbnet operations.
214 */
215static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf) 248static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)
216{ 249{
217 int rv; 250 int rv;
218 struct usb_driver *subdriver = NULL; 251 struct qmi_wwan_state *info = (void *)&dev->data;
219 atomic_t *pmcount = (void *)&dev->data[1];
220 252
221 /* ZTE makes devices where the interface descriptors and endpoint 253 /* ZTE makes devices where the interface descriptors and endpoint
222 * configurations of two or more interfaces are identical, even 254 * configurations of two or more interfaces are identical, even
@@ -232,43 +264,39 @@ static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)
232 goto err; 264 goto err;
233 } 265 }
234 266
235 atomic_set(pmcount, 0); 267 /* control and data is shared */
236 268 info->control = intf;
237 /* collect all three endpoints */ 269 info->data = intf;
238 rv = usbnet_get_endpoints(dev, intf); 270 rv = qmi_wwan_register_subdriver(dev);
239 if (rv < 0)
240 goto err;
241
242 /* require interrupt endpoint for subdriver */
243 if (!dev->status) {
244 rv = -EINVAL;
245 goto err;
246 }
247
248 subdriver = usb_cdc_wdm_register(intf, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power);
249 if (IS_ERR(subdriver)) {
250 rv = PTR_ERR(subdriver);
251 goto err;
252 }
253
254 /* can't let usbnet use the interrupt endpoint */
255 dev->status = NULL;
256
257 /* save subdriver struct for suspend/resume wrappers */
258 dev->data[0] = (unsigned long)subdriver;
259 271
260err: 272err:
261 return rv; 273 return rv;
262} 274}
263 275
264static void qmi_wwan_unbind_shared(struct usbnet *dev, struct usb_interface *intf) 276static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf)
265{ 277{
266 struct usb_driver *subdriver = (void *)dev->data[0]; 278 struct qmi_wwan_state *info = (void *)&dev->data;
267 279 struct usb_driver *driver = driver_of(intf);
268 if (subdriver && subdriver->disconnect) 280 struct usb_interface *other;
269 subdriver->disconnect(intf); 281
282 if (info->subdriver && info->subdriver->disconnect)
283 info->subdriver->disconnect(info->control);
284
285 /* allow user to unbind using either control or data */
286 if (intf == info->control)
287 other = info->data;
288 else
289 other = info->control;
290
291 /* only if not shared */
292 if (other && intf != other) {
293 usb_set_intfdata(other, NULL);
294 usb_driver_release_interface(driver, other);
295 }
270 296
271 dev->data[0] = (unsigned long)NULL; 297 info->subdriver = NULL;
298 info->data = NULL;
299 info->control = NULL;
272} 300}
273 301
274/* suspend/resume wrappers calling both usbnet and the cdc-wdm 302/* suspend/resume wrappers calling both usbnet and the cdc-wdm
@@ -280,15 +308,15 @@ static void qmi_wwan_unbind_shared(struct usbnet *dev, struct usb_interface *int
280static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message) 308static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
281{ 309{
282 struct usbnet *dev = usb_get_intfdata(intf); 310 struct usbnet *dev = usb_get_intfdata(intf);
283 struct usb_driver *subdriver = (void *)dev->data[0]; 311 struct qmi_wwan_state *info = (void *)&dev->data;
284 int ret; 312 int ret;
285 313
286 ret = usbnet_suspend(intf, message); 314 ret = usbnet_suspend(intf, message);
287 if (ret < 0) 315 if (ret < 0)
288 goto err; 316 goto err;
289 317
290 if (subdriver && subdriver->suspend) 318 if (info->subdriver && info->subdriver->suspend)
291 ret = subdriver->suspend(intf, message); 319 ret = info->subdriver->suspend(intf, message);
292 if (ret < 0) 320 if (ret < 0)
293 usbnet_resume(intf); 321 usbnet_resume(intf);
294err: 322err:
@@ -298,33 +326,33 @@ err:
298static int qmi_wwan_resume(struct usb_interface *intf) 326static int qmi_wwan_resume(struct usb_interface *intf)
299{ 327{
300 struct usbnet *dev = usb_get_intfdata(intf); 328 struct usbnet *dev = usb_get_intfdata(intf);
301 struct usb_driver *subdriver = (void *)dev->data[0]; 329 struct qmi_wwan_state *info = (void *)&dev->data;
302 int ret = 0; 330 int ret = 0;
303 331
304 if (subdriver && subdriver->resume) 332 if (info->subdriver && info->subdriver->resume)
305 ret = subdriver->resume(intf); 333 ret = info->subdriver->resume(intf);
306 if (ret < 0) 334 if (ret < 0)
307 goto err; 335 goto err;
308 ret = usbnet_resume(intf); 336 ret = usbnet_resume(intf);
309 if (ret < 0 && subdriver && subdriver->resume && subdriver->suspend) 337 if (ret < 0 && info->subdriver && info->subdriver->resume && info->subdriver->suspend)
310 subdriver->suspend(intf, PMSG_SUSPEND); 338 info->subdriver->suspend(intf, PMSG_SUSPEND);
311err: 339err:
312 return ret; 340 return ret;
313} 341}
314 342
315
316static const struct driver_info qmi_wwan_info = { 343static const struct driver_info qmi_wwan_info = {
317 .description = "QMI speaking wwan device", 344 .description = "WWAN/QMI device",
318 .flags = FLAG_WWAN, 345 .flags = FLAG_WWAN,
319 .bind = qmi_wwan_bind, 346 .bind = qmi_wwan_bind,
347 .unbind = qmi_wwan_unbind,
320 .manage_power = qmi_wwan_manage_power, 348 .manage_power = qmi_wwan_manage_power,
321}; 349};
322 350
323static const struct driver_info qmi_wwan_shared = { 351static const struct driver_info qmi_wwan_shared = {
324 .description = "QMI speaking wwan device with combined interface", 352 .description = "WWAN/QMI device",
325 .flags = FLAG_WWAN, 353 .flags = FLAG_WWAN,
326 .bind = qmi_wwan_bind_shared, 354 .bind = qmi_wwan_bind_shared,
327 .unbind = qmi_wwan_unbind_shared, 355 .unbind = qmi_wwan_unbind,
328 .manage_power = qmi_wwan_manage_power, 356 .manage_power = qmi_wwan_manage_power,
329}; 357};
330 358
@@ -332,7 +360,7 @@ static const struct driver_info qmi_wwan_force_int0 = {
332 .description = "Qualcomm WWAN/QMI device", 360 .description = "Qualcomm WWAN/QMI device",
333 .flags = FLAG_WWAN, 361 .flags = FLAG_WWAN,
334 .bind = qmi_wwan_bind_shared, 362 .bind = qmi_wwan_bind_shared,
335 .unbind = qmi_wwan_unbind_shared, 363 .unbind = qmi_wwan_unbind,
336 .manage_power = qmi_wwan_manage_power, 364 .manage_power = qmi_wwan_manage_power,
337 .data = BIT(0), /* interface whitelist bitmap */ 365 .data = BIT(0), /* interface whitelist bitmap */
338}; 366};
@@ -341,7 +369,7 @@ static const struct driver_info qmi_wwan_force_int1 = {
341 .description = "Qualcomm WWAN/QMI device", 369 .description = "Qualcomm WWAN/QMI device",
342 .flags = FLAG_WWAN, 370 .flags = FLAG_WWAN,
343 .bind = qmi_wwan_bind_shared, 371 .bind = qmi_wwan_bind_shared,
344 .unbind = qmi_wwan_unbind_shared, 372 .unbind = qmi_wwan_unbind,
345 .manage_power = qmi_wwan_manage_power, 373 .manage_power = qmi_wwan_manage_power,
346 .data = BIT(1), /* interface whitelist bitmap */ 374 .data = BIT(1), /* interface whitelist bitmap */
347}; 375};
@@ -350,7 +378,7 @@ static const struct driver_info qmi_wwan_force_int2 = {
350 .description = "Qualcomm WWAN/QMI device", 378 .description = "Qualcomm WWAN/QMI device",
351 .flags = FLAG_WWAN, 379 .flags = FLAG_WWAN,
352 .bind = qmi_wwan_bind_shared, 380 .bind = qmi_wwan_bind_shared,
353 .unbind = qmi_wwan_unbind_shared, 381 .unbind = qmi_wwan_unbind,
354 .manage_power = qmi_wwan_manage_power, 382 .manage_power = qmi_wwan_manage_power,
355 .data = BIT(2), /* interface whitelist bitmap */ 383 .data = BIT(2), /* interface whitelist bitmap */
356}; 384};
@@ -359,7 +387,7 @@ static const struct driver_info qmi_wwan_force_int3 = {
359 .description = "Qualcomm WWAN/QMI device", 387 .description = "Qualcomm WWAN/QMI device",
360 .flags = FLAG_WWAN, 388 .flags = FLAG_WWAN,
361 .bind = qmi_wwan_bind_shared, 389 .bind = qmi_wwan_bind_shared,
362 .unbind = qmi_wwan_unbind_shared, 390 .unbind = qmi_wwan_unbind,
363 .manage_power = qmi_wwan_manage_power, 391 .manage_power = qmi_wwan_manage_power,
364 .data = BIT(3), /* interface whitelist bitmap */ 392 .data = BIT(3), /* interface whitelist bitmap */
365}; 393};
@@ -368,7 +396,7 @@ static const struct driver_info qmi_wwan_force_int4 = {
368 .description = "Qualcomm WWAN/QMI device", 396 .description = "Qualcomm WWAN/QMI device",
369 .flags = FLAG_WWAN, 397 .flags = FLAG_WWAN,
370 .bind = qmi_wwan_bind_shared, 398 .bind = qmi_wwan_bind_shared,
371 .unbind = qmi_wwan_unbind_shared, 399 .unbind = qmi_wwan_unbind,
372 .manage_power = qmi_wwan_manage_power, 400 .manage_power = qmi_wwan_manage_power,
373 .data = BIT(4), /* interface whitelist bitmap */ 401 .data = BIT(4), /* interface whitelist bitmap */
374}; 402};
@@ -390,7 +418,7 @@ static const struct driver_info qmi_wwan_sierra = {
390 .description = "Sierra Wireless wwan/QMI device", 418 .description = "Sierra Wireless wwan/QMI device",
391 .flags = FLAG_WWAN, 419 .flags = FLAG_WWAN,
392 .bind = qmi_wwan_bind_shared, 420 .bind = qmi_wwan_bind_shared,
393 .unbind = qmi_wwan_unbind_shared, 421 .unbind = qmi_wwan_unbind,
394 .manage_power = qmi_wwan_manage_power, 422 .manage_power = qmi_wwan_manage_power,
395 .data = BIT(8) | BIT(19), /* interface whitelist bitmap */ 423 .data = BIT(8) | BIT(19), /* interface whitelist bitmap */
396}; 424};
@@ -413,7 +441,7 @@ static const struct usb_device_id products[] = {
413 .idVendor = HUAWEI_VENDOR_ID, 441 .idVendor = HUAWEI_VENDOR_ID,
414 .bInterfaceClass = USB_CLASS_VENDOR_SPEC, 442 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
415 .bInterfaceSubClass = 1, 443 .bInterfaceSubClass = 1,
416 .bInterfaceProtocol = 8, /* NOTE: This is the *slave* interface of the CDC Union! */ 444 .bInterfaceProtocol = 9, /* CDC Ethernet *control* interface */
417 .driver_info = (unsigned long)&qmi_wwan_info, 445 .driver_info = (unsigned long)&qmi_wwan_info,
418 }, 446 },
419 { /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */ 447 { /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */
@@ -421,7 +449,7 @@ static const struct usb_device_id products[] = {
421 .idVendor = HUAWEI_VENDOR_ID, 449 .idVendor = HUAWEI_VENDOR_ID,
422 .bInterfaceClass = USB_CLASS_VENDOR_SPEC, 450 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
423 .bInterfaceSubClass = 1, 451 .bInterfaceSubClass = 1,
424 .bInterfaceProtocol = 56, /* NOTE: This is the *slave* interface of the CDC Union! */ 452 .bInterfaceProtocol = 57, /* CDC Ethernet *control* interface */
425 .driver_info = (unsigned long)&qmi_wwan_info, 453 .driver_info = (unsigned long)&qmi_wwan_info,
426 }, 454 },
427 { /* Huawei E392, E398 and possibly others in "Windows mode" 455 { /* Huawei E392, E398 and possibly others in "Windows mode"
@@ -584,17 +612,7 @@ static struct usb_driver qmi_wwan_driver = {
584 .disable_hub_initiated_lpm = 1, 612 .disable_hub_initiated_lpm = 1,
585}; 613};
586 614
587static int __init qmi_wwan_init(void) 615module_usb_driver(qmi_wwan_driver);
588{
589 return usb_register(&qmi_wwan_driver);
590}
591module_init(qmi_wwan_init);
592
593static void __exit qmi_wwan_exit(void)
594{
595 usb_deregister(&qmi_wwan_driver);
596}
597module_exit(qmi_wwan_exit);
598 616
599MODULE_AUTHOR("Bjørn Mork <bjorn@mork.no>"); 617MODULE_AUTHOR("Bjørn Mork <bjorn@mork.no>");
600MODULE_DESCRIPTION("Qualcomm MSM Interface (QMI) WWAN driver"); 618MODULE_DESCRIPTION("Qualcomm MSM Interface (QMI) WWAN driver");
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index b1112e75385..05ecf14d659 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -578,6 +578,35 @@ static int smsc95xx_ethtool_set_eeprom(struct net_device *netdev,
578 return smsc95xx_write_eeprom(dev, ee->offset, ee->len, data); 578 return smsc95xx_write_eeprom(dev, ee->offset, ee->len, data);
579} 579}
580 580
581static int smsc95xx_ethtool_getregslen(struct net_device *netdev)
582{
583 /* all smsc95xx registers */
584 return COE_CR - ID_REV + 1;
585}
586
587static void
588smsc95xx_ethtool_getregs(struct net_device *netdev, struct ethtool_regs *regs,
589 void *buf)
590{
591 struct usbnet *dev = netdev_priv(netdev);
592 unsigned int i, j, retval;
593 u32 *data = buf;
594
595 retval = smsc95xx_read_reg(dev, ID_REV, &regs->version);
596 if (retval < 0) {
597 netdev_warn(netdev, "REGS: cannot read ID_REV\n");
598 return;
599 }
600
601 for (i = ID_REV, j = 0; i <= COE_CR; i += (sizeof(u32)), j++) {
602 retval = smsc95xx_read_reg(dev, i, &data[j]);
603 if (retval < 0) {
604 netdev_warn(netdev, "REGS: cannot read reg[%x]\n", i);
605 return;
606 }
607 }
608}
609
581static const struct ethtool_ops smsc95xx_ethtool_ops = { 610static const struct ethtool_ops smsc95xx_ethtool_ops = {
582 .get_link = usbnet_get_link, 611 .get_link = usbnet_get_link,
583 .nway_reset = usbnet_nway_reset, 612 .nway_reset = usbnet_nway_reset,
@@ -589,6 +618,8 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
589 .get_eeprom_len = smsc95xx_ethtool_get_eeprom_len, 618 .get_eeprom_len = smsc95xx_ethtool_get_eeprom_len,
590 .get_eeprom = smsc95xx_ethtool_get_eeprom, 619 .get_eeprom = smsc95xx_ethtool_get_eeprom,
591 .set_eeprom = smsc95xx_ethtool_set_eeprom, 620 .set_eeprom = smsc95xx_ethtool_set_eeprom,
621 .get_regs_len = smsc95xx_ethtool_getregslen,
622 .get_regs = smsc95xx_ethtool_getregs,
592}; 623};
593 624
594static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) 625static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index aba769d7745..e92c057f794 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -180,7 +180,40 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
180} 180}
181EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); 181EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
182 182
183static void intr_complete (struct urb *urb); 183static void intr_complete (struct urb *urb)
184{
185 struct usbnet *dev = urb->context;
186 int status = urb->status;
187
188 switch (status) {
189 /* success */
190 case 0:
191 dev->driver_info->status(dev, urb);
192 break;
193
194 /* software-driven interface shutdown */
195 case -ENOENT: /* urb killed */
196 case -ESHUTDOWN: /* hardware gone */
197 netif_dbg(dev, ifdown, dev->net,
198 "intr shutdown, code %d\n", status);
199 return;
200
201 /* NOTE: not throttling like RX/TX, since this endpoint
202 * already polls infrequently
203 */
204 default:
205 netdev_dbg(dev->net, "intr status %d\n", status);
206 break;
207 }
208
209 if (!netif_running (dev->net))
210 return;
211
212 status = usb_submit_urb (urb, GFP_ATOMIC);
213 if (status != 0)
214 netif_err(dev, timer, dev->net,
215 "intr resubmit --> %d\n", status);
216}
184 217
185static int init_status (struct usbnet *dev, struct usb_interface *intf) 218static int init_status (struct usbnet *dev, struct usb_interface *intf)
186{ 219{
@@ -519,42 +552,6 @@ block:
519 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); 552 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
520} 553}
521 554
522static void intr_complete (struct urb *urb)
523{
524 struct usbnet *dev = urb->context;
525 int status = urb->status;
526
527 switch (status) {
528 /* success */
529 case 0:
530 dev->driver_info->status(dev, urb);
531 break;
532
533 /* software-driven interface shutdown */
534 case -ENOENT: /* urb killed */
535 case -ESHUTDOWN: /* hardware gone */
536 netif_dbg(dev, ifdown, dev->net,
537 "intr shutdown, code %d\n", status);
538 return;
539
540 /* NOTE: not throttling like RX/TX, since this endpoint
541 * already polls infrequently
542 */
543 default:
544 netdev_dbg(dev->net, "intr status %d\n", status);
545 break;
546 }
547
548 if (!netif_running (dev->net))
549 return;
550
551 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
552 status = usb_submit_urb (urb, GFP_ATOMIC);
553 if (status != 0)
554 netif_err(dev, timer, dev->net,
555 "intr resubmit --> %d\n", status);
556}
557
558/*-------------------------------------------------------------------------*/ 555/*-------------------------------------------------------------------------*/
559void usbnet_pause_rx(struct usbnet *dev) 556void usbnet_pause_rx(struct usbnet *dev)
560{ 557{
@@ -1312,7 +1309,6 @@ void usbnet_disconnect (struct usb_interface *intf)
1312 usb_free_urb(dev->interrupt); 1309 usb_free_urb(dev->interrupt);
1313 1310
1314 free_netdev(net); 1311 free_netdev(net);
1315 usb_put_dev (xdev);
1316} 1312}
1317EXPORT_SYMBOL_GPL(usbnet_disconnect); 1313EXPORT_SYMBOL_GPL(usbnet_disconnect);
1318 1314
@@ -1368,8 +1364,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1368 xdev = interface_to_usbdev (udev); 1364 xdev = interface_to_usbdev (udev);
1369 interface = udev->cur_altsetting; 1365 interface = udev->cur_altsetting;
1370 1366
1371 usb_get_dev (xdev);
1372
1373 status = -ENOMEM; 1367 status = -ENOMEM;
1374 1368
1375 // set up our own records 1369 // set up our own records
@@ -1498,7 +1492,6 @@ out3:
1498out1: 1492out1:
1499 free_netdev(net); 1493 free_netdev(net);
1500out: 1494out:
1501 usb_put_dev(xdev);
1502 return status; 1495 return status;
1503} 1496}
1504EXPORT_SYMBOL_GPL(usbnet_probe); 1497EXPORT_SYMBOL_GPL(usbnet_probe);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f18149ae258..1db445b2ecc 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1062,7 +1062,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1062 return -ENOMEM; 1062 return -ENOMEM;
1063 1063
1064 /* Set up network device as normal. */ 1064 /* Set up network device as normal. */
1065 dev->priv_flags |= IFF_UNICAST_FLT; 1065 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
1066 dev->netdev_ops = &virtnet_netdev; 1066 dev->netdev_ops = &virtnet_netdev;
1067 dev->features = NETIF_F_HIGHDMA; 1067 dev->features = NETIF_F_HIGHDMA;
1068 1068
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3f04ba0a545..93e0cfb739b 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1037,7 +1037,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1037#endif 1037#endif
1038 dev_dbg(&adapter->netdev->dev, 1038 dev_dbg(&adapter->netdev->dev,
1039 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", 1039 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1040 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd - 1040 (u32)(ctx.sop_txd -
1041 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr), 1041 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1042 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3])); 1042 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1043 1043
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index d7a65e141d1..44db8b75a53 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -231,7 +231,7 @@ static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len)
231 } 231 }
232 232
233 p = icp; 233 p = icp;
234 count = x25_asy_esc(p, (unsigned char *) sl->xbuff, len); 234 count = x25_asy_esc(p, sl->xbuff, len);
235 235
236 /* Order of next two lines is *very* important. 236 /* Order of next two lines is *very* important.
237 * When we are sending a little amount of data, 237 * When we are sending a little amount of data,
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 7cbd7d231e1..d09e44970e6 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -1268,7 +1268,7 @@ int i2400m_fw_check(struct i2400m *i2400m, const void *bcf, size_t bcf_size)
1268 size_t leftover, offset, header_len, size; 1268 size_t leftover, offset, header_len, size;
1269 1269
1270 leftover = top - itr; 1270 leftover = top - itr;
1271 offset = itr - (const void *) bcf; 1271 offset = itr - bcf;
1272 if (leftover <= sizeof(*bcf_hdr)) { 1272 if (leftover <= sizeof(*bcf_hdr)) {
1273 dev_err(dev, "firmware %s: %zu B left at @%zx, " 1273 dev_err(dev, "firmware %s: %zu B left at @%zx, "
1274 "not enough for BCF header\n", 1274 "not enough for BCF header\n",
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 0ac09a2bd14..97afcec2475 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1738,8 +1738,7 @@ static int adm8211_alloc_rings(struct ieee80211_hw *dev)
1738 return -ENOMEM; 1738 return -ENOMEM;
1739 } 1739 }
1740 1740
1741 priv->tx_ring = (struct adm8211_desc *)(priv->rx_ring + 1741 priv->tx_ring = priv->rx_ring + priv->rx_ring_size;
1742 priv->rx_ring_size);
1743 priv->tx_ring_dma = priv->rx_ring_dma + 1742 priv->tx_ring_dma = priv->rx_ring_dma +
1744 sizeof(struct adm8211_desc) * priv->rx_ring_size; 1743 sizeof(struct adm8211_desc) * priv->rx_ring_size;
1745 1744
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index a747c632597..f9f15bb3f03 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1997,7 +1997,7 @@ static int mpi_send_packet (struct net_device *dev)
1997 * ------------------------------------------------ 1997 * ------------------------------------------------
1998 */ 1998 */
1999 1999
2000 memcpy((char *)ai->txfids[0].virtual_host_addr, 2000 memcpy(ai->txfids[0].virtual_host_addr,
2001 (char *)&wifictlhdr8023, sizeof(wifictlhdr8023)); 2001 (char *)&wifictlhdr8023, sizeof(wifictlhdr8023));
2002 2002
2003 payloadLen = (__le16 *)(ai->txfids[0].virtual_host_addr + 2003 payloadLen = (__le16 *)(ai->txfids[0].virtual_host_addr +
@@ -4212,7 +4212,7 @@ static int PC4500_writerid(struct airo_info *ai, u16 rid,
4212 airo_print_err(ai->dev->name, "%s: len=%d", __func__, len); 4212 airo_print_err(ai->dev->name, "%s: len=%d", __func__, len);
4213 rc = -1; 4213 rc = -1;
4214 } else { 4214 } else {
4215 memcpy((char *)ai->config_desc.virtual_host_addr, 4215 memcpy(ai->config_desc.virtual_host_addr,
4216 pBuf, len); 4216 pBuf, len);
4217 4217
4218 rc = issuecommand(ai, &cmd, &rsp); 4218 rc = issuecommand(ai, &cmd, &rsp);
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index b869a358ce4..fd7dbd4609d 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -53,6 +53,11 @@
53 53
54#define DEFAULT_BG_SCAN_PERIOD 60 54#define DEFAULT_BG_SCAN_PERIOD 60
55 55
56struct ath6kl_cfg80211_match_probe_ssid {
57 struct cfg80211_ssid ssid;
58 u8 flag;
59};
60
56static struct ieee80211_rate ath6kl_rates[] = { 61static struct ieee80211_rate ath6kl_rates[] = {
57 RATETAB_ENT(10, 0x1, 0), 62 RATETAB_ENT(10, 0x1, 0),
58 RATETAB_ENT(20, 0x2, 0), 63 RATETAB_ENT(20, 0x2, 0),
@@ -576,6 +581,9 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
576 581
577 vif->nw_type = vif->next_mode; 582 vif->nw_type = vif->next_mode;
578 583
584 /* enable enhanced bmiss detection if applicable */
585 ath6kl_cfg80211_sta_bmiss_enhance(vif, true);
586
579 if (vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) 587 if (vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)
580 nw_subtype = SUBTYPE_P2PCLIENT; 588 nw_subtype = SUBTYPE_P2PCLIENT;
581 589
@@ -852,20 +860,6 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
852 } 860 }
853 } 861 }
854 862
855 /*
856 * Send a disconnect command to target when a disconnect event is
857 * received with reason code other than 3 (DISCONNECT_CMD - disconnect
858 * request from host) to make the firmware stop trying to connect even
859 * after giving disconnect event. There will be one more disconnect
860 * event for this disconnect command with reason code DISCONNECT_CMD
861 * which will be notified to cfg80211.
862 */
863
864 if (reason != DISCONNECT_CMD) {
865 ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
866 return;
867 }
868
869 clear_bit(CONNECT_PEND, &vif->flags); 863 clear_bit(CONNECT_PEND, &vif->flags);
870 864
871 if (vif->sme_state == SME_CONNECTING) { 865 if (vif->sme_state == SME_CONNECTING) {
@@ -875,32 +869,96 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
875 WLAN_STATUS_UNSPECIFIED_FAILURE, 869 WLAN_STATUS_UNSPECIFIED_FAILURE,
876 GFP_KERNEL); 870 GFP_KERNEL);
877 } else if (vif->sme_state == SME_CONNECTED) { 871 } else if (vif->sme_state == SME_CONNECTED) {
878 cfg80211_disconnected(vif->ndev, reason, 872 cfg80211_disconnected(vif->ndev, proto_reason,
879 NULL, 0, GFP_KERNEL); 873 NULL, 0, GFP_KERNEL);
880 } 874 }
881 875
882 vif->sme_state = SME_DISCONNECTED; 876 vif->sme_state = SME_DISCONNECTED;
877
878 /*
879 * Send a disconnect command to target when a disconnect event is
880 * received with reason code other than 3 (DISCONNECT_CMD - disconnect
881 * request from host) to make the firmware stop trying to connect even
882 * after giving disconnect event. There will be one more disconnect
883 * event for this disconnect command with reason code DISCONNECT_CMD
884 * which won't be notified to cfg80211.
885 */
886 if (reason != DISCONNECT_CMD)
887 ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
883} 888}
884 889
885static int ath6kl_set_probed_ssids(struct ath6kl *ar, 890static int ath6kl_set_probed_ssids(struct ath6kl *ar,
886 struct ath6kl_vif *vif, 891 struct ath6kl_vif *vif,
887 struct cfg80211_ssid *ssids, int n_ssids) 892 struct cfg80211_ssid *ssids, int n_ssids,
893 struct cfg80211_match_set *match_set,
894 int n_match_ssid)
888{ 895{
889 u8 i; 896 u8 i, j, index_to_add, ssid_found = false;
897 struct ath6kl_cfg80211_match_probe_ssid ssid_list[MAX_PROBED_SSIDS];
890 898
891 if (n_ssids > MAX_PROBED_SSID_INDEX) 899 memset(ssid_list, 0, sizeof(ssid_list));
900
901 if (n_ssids > MAX_PROBED_SSIDS ||
902 n_match_ssid > MAX_PROBED_SSIDS)
892 return -EINVAL; 903 return -EINVAL;
893 904
894 for (i = 0; i < n_ssids; i++) { 905 for (i = 0; i < n_ssids; i++) {
906 memcpy(ssid_list[i].ssid.ssid,
907 ssids[i].ssid,
908 ssids[i].ssid_len);
909 ssid_list[i].ssid.ssid_len = ssids[i].ssid_len;
910
911 if (ssids[i].ssid_len)
912 ssid_list[i].flag = SPECIFIC_SSID_FLAG;
913 else
914 ssid_list[i].flag = ANY_SSID_FLAG;
915
916 if (n_match_ssid == 0)
917 ssid_list[i].flag |= MATCH_SSID_FLAG;
918 }
919
920 index_to_add = i;
921
922 for (i = 0; i < n_match_ssid; i++) {
923 ssid_found = false;
924
925 for (j = 0; j < n_ssids; j++) {
926 if ((match_set[i].ssid.ssid_len ==
927 ssid_list[j].ssid.ssid_len) &&
928 (!memcmp(ssid_list[j].ssid.ssid,
929 match_set[i].ssid.ssid,
930 match_set[i].ssid.ssid_len))) {
931 ssid_list[j].flag |= MATCH_SSID_FLAG;
932 ssid_found = true;
933 break;
934 }
935 }
936
937 if (ssid_found)
938 continue;
939
940 if (index_to_add >= MAX_PROBED_SSIDS)
941 continue;
942
943 ssid_list[index_to_add].ssid.ssid_len =
944 match_set[i].ssid.ssid_len;
945 memcpy(ssid_list[index_to_add].ssid.ssid,
946 match_set[i].ssid.ssid,
947 match_set[i].ssid.ssid_len);
948 ssid_list[index_to_add].flag |= MATCH_SSID_FLAG;
949 index_to_add++;
950 }
951
952 for (i = 0; i < index_to_add; i++) {
895 ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i, 953 ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i,
896 ssids[i].ssid_len ? 954 ssid_list[i].flag,
897 SPECIFIC_SSID_FLAG : ANY_SSID_FLAG, 955 ssid_list[i].ssid.ssid_len,
898 ssids[i].ssid_len, 956 ssid_list[i].ssid.ssid);
899 ssids[i].ssid); 957
900 } 958 }
901 959
902 /* Make sure no old entries are left behind */ 960 /* Make sure no old entries are left behind */
903 for (i = n_ssids; i < MAX_PROBED_SSID_INDEX; i++) { 961 for (i = index_to_add; i < MAX_PROBED_SSIDS; i++) {
904 ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i, 962 ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i,
905 DISABLE_SSID_FLAG, 0, NULL); 963 DISABLE_SSID_FLAG, 0, NULL);
906 } 964 }
@@ -934,7 +992,7 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
934 } 992 }
935 993
936 ret = ath6kl_set_probed_ssids(ar, vif, request->ssids, 994 ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
937 request->n_ssids); 995 request->n_ssids, NULL, 0);
938 if (ret < 0) 996 if (ret < 0)
939 return ret; 997 return ret;
940 998
@@ -943,7 +1001,7 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
943 WMI_FRAME_PROBE_REQ, 1001 WMI_FRAME_PROBE_REQ,
944 request->ie, request->ie_len); 1002 request->ie, request->ie_len);
945 if (ret) { 1003 if (ret) {
946 ath6kl_err("failed to set Probe Request appie for scan"); 1004 ath6kl_err("failed to set Probe Request appie for scan\n");
947 return ret; 1005 return ret;
948 } 1006 }
949 1007
@@ -1512,6 +1570,9 @@ static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
1512 } 1570 }
1513 } 1571 }
1514 1572
1573 /* need to clean up enhanced bmiss detection fw state */
1574 ath6kl_cfg80211_sta_bmiss_enhance(vif, false);
1575
1515set_iface_type: 1576set_iface_type:
1516 switch (type) { 1577 switch (type) {
1517 case NL80211_IFTYPE_STATION: 1578 case NL80211_IFTYPE_STATION:
@@ -2074,7 +2135,9 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
2074 if (wow && (wow->n_patterns > WOW_MAX_FILTERS_PER_LIST)) 2135 if (wow && (wow->n_patterns > WOW_MAX_FILTERS_PER_LIST))
2075 return -EINVAL; 2136 return -EINVAL;
2076 2137
2077 if (!test_bit(NETDEV_MCAST_ALL_ON, &vif->flags)) { 2138 if (!test_bit(NETDEV_MCAST_ALL_ON, &vif->flags) &&
2139 test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
2140 ar->fw_capabilities)) {
2078 ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi, 2141 ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi,
2079 vif->fw_vif_idx, false); 2142 vif->fw_vif_idx, false);
2080 if (ret) 2143 if (ret)
@@ -2209,7 +2272,9 @@ static int ath6kl_wow_resume(struct ath6kl *ar)
2209 2272
2210 ar->state = ATH6KL_STATE_ON; 2273 ar->state = ATH6KL_STATE_ON;
2211 2274
2212 if (!test_bit(NETDEV_MCAST_ALL_OFF, &vif->flags)) { 2275 if (!test_bit(NETDEV_MCAST_ALL_OFF, &vif->flags) &&
2276 test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
2277 ar->fw_capabilities)) {
2213 ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi, 2278 ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi,
2214 vif->fw_vif_idx, true); 2279 vif->fw_vif_idx, true);
2215 if (ret) 2280 if (ret)
@@ -2475,7 +2540,7 @@ void ath6kl_check_wow_status(struct ath6kl *ar)
2475static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band, 2540static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band,
2476 bool ht_enable) 2541 bool ht_enable)
2477{ 2542{
2478 struct ath6kl_htcap *htcap = &vif->htcap; 2543 struct ath6kl_htcap *htcap = &vif->htcap[band];
2479 2544
2480 if (htcap->ht_enable == ht_enable) 2545 if (htcap->ht_enable == ht_enable)
2481 return 0; 2546 return 0;
@@ -2585,33 +2650,28 @@ static int ath6kl_set_ies(struct ath6kl_vif *vif,
2585 return 0; 2650 return 0;
2586} 2651}
2587 2652
2588static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev, 2653void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif, bool enable)
2589 struct ieee80211_channel *chan,
2590 enum nl80211_channel_type channel_type)
2591{ 2654{
2592 struct ath6kl_vif *vif; 2655 int err;
2593 2656
2594 /* 2657 if (WARN_ON(!test_bit(WMI_READY, &vif->ar->flag)))
2595 * 'dev' could be NULL if a channel change is required for the hardware 2658 return;
2596 * device itself, instead of a particular VIF.
2597 *
2598 * FIXME: To be handled properly when monitor mode is supported.
2599 */
2600 if (!dev)
2601 return -EBUSY;
2602 2659
2603 vif = netdev_priv(dev); 2660 if (vif->nw_type != INFRA_NETWORK)
2661 return;
2604 2662
2605 if (!ath6kl_cfg80211_ready(vif)) 2663 if (!test_bit(ATH6KL_FW_CAPABILITY_BMISS_ENHANCE,
2606 return -EIO; 2664 vif->ar->fw_capabilities))
2665 return;
2607 2666
2608 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n", 2667 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s fw bmiss enhance\n",
2609 __func__, chan->center_freq, chan->hw_value); 2668 enable ? "enable" : "disable");
2610 vif->next_chan = chan->center_freq;
2611 vif->next_ch_type = channel_type;
2612 vif->next_ch_band = chan->band;
2613 2669
2614 return 0; 2670 err = ath6kl_wmi_sta_bmiss_enhance_cmd(vif->ar->wmi,
2671 vif->fw_vif_idx, enable);
2672 if (err)
2673 ath6kl_err("failed to %s enhanced bmiss detection: %d\n",
2674 enable ? "enable" : "disable", err);
2615} 2675}
2616 2676
2617static int ath6kl_get_rsn_capab(struct cfg80211_beacon_data *beacon, 2677static int ath6kl_get_rsn_capab(struct cfg80211_beacon_data *beacon,
@@ -2694,9 +2754,15 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
2694 2754
2695 /* TODO: 2755 /* TODO:
2696 * info->interval 2756 * info->interval
2697 * info->dtim_period
2698 */ 2757 */
2699 2758
2759 ret = ath6kl_wmi_ap_set_dtim_cmd(ar->wmi, vif->fw_vif_idx,
2760 info->dtim_period);
2761
2762 /* ignore error, just print a warning and continue normally */
2763 if (ret)
2764 ath6kl_warn("Failed to set dtim_period in beacon: %d\n", ret);
2765
2700 if (info->beacon.head == NULL) 2766 if (info->beacon.head == NULL)
2701 return -EINVAL; 2767 return -EINVAL;
2702 mgmt = (struct ieee80211_mgmt *) info->beacon.head; 2768 mgmt = (struct ieee80211_mgmt *) info->beacon.head;
@@ -2791,7 +2857,7 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
2791 p.ssid_len = vif->ssid_len; 2857 p.ssid_len = vif->ssid_len;
2792 memcpy(p.ssid, vif->ssid, vif->ssid_len); 2858 memcpy(p.ssid, vif->ssid, vif->ssid_len);
2793 p.dot11_auth_mode = vif->dot11_auth_mode; 2859 p.dot11_auth_mode = vif->dot11_auth_mode;
2794 p.ch = cpu_to_le16(vif->next_chan); 2860 p.ch = cpu_to_le16(info->channel->center_freq);
2795 2861
2796 /* Enable uAPSD support by default */ 2862 /* Enable uAPSD support by default */
2797 res = ath6kl_wmi_ap_set_apsd(ar->wmi, vif->fw_vif_idx, true); 2863 res = ath6kl_wmi_ap_set_apsd(ar->wmi, vif->fw_vif_idx, true);
@@ -2815,8 +2881,8 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
2815 return res; 2881 return res;
2816 } 2882 }
2817 2883
2818 if (ath6kl_set_htcap(vif, vif->next_ch_band, 2884 if (ath6kl_set_htcap(vif, info->channel->band,
2819 vif->next_ch_type != NL80211_CHAN_NO_HT)) 2885 info->channel_type != NL80211_CHAN_NO_HT))
2820 return -EIO; 2886 return -EIO;
2821 2887
2822 /* 2888 /*
@@ -3160,10 +3226,24 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
3160 ath6kl_cfg80211_scan_complete_event(vif, true); 3226 ath6kl_cfg80211_scan_complete_event(vif, true);
3161 3227
3162 ret = ath6kl_set_probed_ssids(ar, vif, request->ssids, 3228 ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
3163 request->n_ssids); 3229 request->n_ssids,
3230 request->match_sets,
3231 request->n_match_sets);
3164 if (ret < 0) 3232 if (ret < 0)
3165 return ret; 3233 return ret;
3166 3234
3235 if (!request->n_match_sets) {
3236 ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
3237 ALL_BSS_FILTER, 0);
3238 if (ret < 0)
3239 return ret;
3240 } else {
3241 ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
3242 MATCHED_SSID_FILTER, 0);
3243 if (ret < 0)
3244 return ret;
3245 }
3246
3167 /* fw uses seconds, also make sure that it's >0 */ 3247 /* fw uses seconds, also make sure that it's >0 */
3168 interval = max_t(u16, 1, request->interval / 1000); 3248 interval = max_t(u16, 1, request->interval / 1000);
3169 3249
@@ -3185,7 +3265,7 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
3185 WMI_FRAME_PROBE_REQ, 3265 WMI_FRAME_PROBE_REQ,
3186 request->ie, request->ie_len); 3266 request->ie, request->ie_len);
3187 if (ret) { 3267 if (ret) {
3188 ath6kl_warn("Failed to set probe request IE for scheduled scan: %d", 3268 ath6kl_warn("Failed to set probe request IE for scheduled scan: %d\n",
3189 ret); 3269 ret);
3190 return ret; 3270 return ret;
3191 } 3271 }
@@ -3217,6 +3297,18 @@ static int ath6kl_cfg80211_sscan_stop(struct wiphy *wiphy,
3217 return 0; 3297 return 0;
3218} 3298}
3219 3299
3300static int ath6kl_cfg80211_set_bitrate(struct wiphy *wiphy,
3301 struct net_device *dev,
3302 const u8 *addr,
3303 const struct cfg80211_bitrate_mask *mask)
3304{
3305 struct ath6kl *ar = ath6kl_priv(dev);
3306 struct ath6kl_vif *vif = netdev_priv(dev);
3307
3308 return ath6kl_wmi_set_bitrate_mask(ar->wmi, vif->fw_vif_idx,
3309 mask);
3310}
3311
3220static const struct ieee80211_txrx_stypes 3312static const struct ieee80211_txrx_stypes
3221ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = { 3313ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = {
3222 [NL80211_IFTYPE_STATION] = { 3314 [NL80211_IFTYPE_STATION] = {
@@ -3271,7 +3363,6 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
3271 .suspend = __ath6kl_cfg80211_suspend, 3363 .suspend = __ath6kl_cfg80211_suspend,
3272 .resume = __ath6kl_cfg80211_resume, 3364 .resume = __ath6kl_cfg80211_resume,
3273#endif 3365#endif
3274 .set_channel = ath6kl_set_channel,
3275 .start_ap = ath6kl_start_ap, 3366 .start_ap = ath6kl_start_ap,
3276 .change_beacon = ath6kl_change_beacon, 3367 .change_beacon = ath6kl_change_beacon,
3277 .stop_ap = ath6kl_stop_ap, 3368 .stop_ap = ath6kl_stop_ap,
@@ -3283,6 +3374,7 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
3283 .mgmt_frame_register = ath6kl_mgmt_frame_register, 3374 .mgmt_frame_register = ath6kl_mgmt_frame_register,
3284 .sched_scan_start = ath6kl_cfg80211_sscan_start, 3375 .sched_scan_start = ath6kl_cfg80211_sscan_start,
3285 .sched_scan_stop = ath6kl_cfg80211_sscan_stop, 3376 .sched_scan_stop = ath6kl_cfg80211_sscan_stop,
3377 .set_bitrate_mask = ath6kl_cfg80211_set_bitrate,
3286}; 3378};
3287 3379
3288void ath6kl_cfg80211_stop(struct ath6kl_vif *vif) 3380void ath6kl_cfg80211_stop(struct ath6kl_vif *vif)
@@ -3410,7 +3502,8 @@ struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
3410 vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL; 3502 vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL;
3411 vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME; 3503 vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME;
3412 vif->bg_scan_period = 0; 3504 vif->bg_scan_period = 0;
3413 vif->htcap.ht_enable = true; 3505 vif->htcap[IEEE80211_BAND_2GHZ].ht_enable = true;
3506 vif->htcap[IEEE80211_BAND_5GHZ].ht_enable = true;
3414 3507
3415 memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN); 3508 memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
3416 if (fw_vif_idx != 0) 3509 if (fw_vif_idx != 0)
@@ -3470,7 +3563,13 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
3470 } 3563 }
3471 3564
3472 /* max num of ssids that can be probed during scanning */ 3565 /* max num of ssids that can be probed during scanning */
3473 wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX; 3566 wiphy->max_scan_ssids = MAX_PROBED_SSIDS;
3567
3568 /* max num of ssids that can be matched after scan */
3569 if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST,
3570 ar->fw_capabilities))
3571 wiphy->max_match_sets = MAX_PROBED_SSIDS;
3572
3474 wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */ 3573 wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
3475 switch (ar->hw.cap) { 3574 switch (ar->hw.cap) {
3476 case WMI_11AN_CAP: 3575 case WMI_11AN_CAP:
@@ -3507,6 +3606,17 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
3507 ath6kl_band_5ghz.ht_cap.cap = 0; 3606 ath6kl_band_5ghz.ht_cap.cap = 0;
3508 ath6kl_band_5ghz.ht_cap.ht_supported = false; 3607 ath6kl_band_5ghz.ht_cap.ht_supported = false;
3509 } 3608 }
3609
3610 if (ar->hw.flags & ATH6KL_HW_FLAG_64BIT_RATES) {
3611 ath6kl_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff;
3612 ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff;
3613 ath6kl_band_2ghz.ht_cap.mcs.rx_mask[1] = 0xff;
3614 ath6kl_band_5ghz.ht_cap.mcs.rx_mask[1] = 0xff;
3615 } else {
3616 ath6kl_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff;
3617 ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff;
3618 }
3619
3510 if (band_2gig) 3620 if (band_2gig)
3511 wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz; 3621 wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
3512 if (band_5gig) 3622 if (band_5gig)
@@ -3527,7 +3637,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
3527 wiphy->wowlan.pattern_min_len = 1; 3637 wiphy->wowlan.pattern_min_len = 1;
3528 wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE; 3638 wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE;
3529 3639
3530 wiphy->max_sched_scan_ssids = MAX_PROBED_SSID_INDEX; 3640 wiphy->max_sched_scan_ssids = MAX_PROBED_SSIDS;
3531 3641
3532 ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM | 3642 ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
3533 WIPHY_FLAG_HAVE_AP_SME | 3643 WIPHY_FLAG_HAVE_AP_SME |
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
index 5ea8cbb79f4..b992046a1b0 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.h
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -62,5 +62,7 @@ void ath6kl_cfg80211_cleanup(struct ath6kl *ar);
62 62
63struct ath6kl *ath6kl_cfg80211_create(void); 63struct ath6kl *ath6kl_cfg80211_create(void);
64void ath6kl_cfg80211_destroy(struct ath6kl *ar); 64void ath6kl_cfg80211_destroy(struct ath6kl *ar);
65/* TODO: remove this once ath6kl_vif_cleanup() is moved to cfg80211.c */
66void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif, bool enable);
65 67
66#endif /* ATH6KL_CFG80211_H */ 68#endif /* ATH6KL_CFG80211_H */
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 4d9c6f14269..d38a31de344 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -100,6 +100,21 @@ enum ath6kl_fw_capability {
100 /* Firmware has support to override rsn cap of rsn ie */ 100 /* Firmware has support to override rsn cap of rsn ie */
101 ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE, 101 ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
102 102
103 /*
104 * Multicast support in WOW and host awake mode.
105 * Allow all multicast in host awake mode.
106 * Apply multicast filter in WOW mode.
107 */
108 ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
109
110 /* Firmware supports enhanced bmiss detection */
111 ATH6KL_FW_CAPABILITY_BMISS_ENHANCE,
112
113 /*
114 * FW supports matching of ssid in schedule scan
115 */
116 ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST,
117
103 /* this needs to be last */ 118 /* this needs to be last */
104 ATH6KL_FW_CAPABILITY_MAX, 119 ATH6KL_FW_CAPABILITY_MAX,
105}; 120};
@@ -112,6 +127,10 @@ struct ath6kl_fw_ie {
112 u8 data[0]; 127 u8 data[0];
113}; 128};
114 129
130enum ath6kl_hw_flags {
131 ATH6KL_HW_FLAG_64BIT_RATES = BIT(0),
132};
133
115#define ATH6KL_FW_API2_FILE "fw-2.bin" 134#define ATH6KL_FW_API2_FILE "fw-2.bin"
116#define ATH6KL_FW_API3_FILE "fw-3.bin" 135#define ATH6KL_FW_API3_FILE "fw-3.bin"
117 136
@@ -196,7 +215,7 @@ struct ath6kl_fw_ie {
196 215
197#define AGGR_NUM_OF_FREE_NETBUFS 16 216#define AGGR_NUM_OF_FREE_NETBUFS 16
198 217
199#define AGGR_RX_TIMEOUT 400 /* in ms */ 218#define AGGR_RX_TIMEOUT 100 /* in ms */
200 219
201#define WMI_TIMEOUT (2 * HZ) 220#define WMI_TIMEOUT (2 * HZ)
202 221
@@ -245,7 +264,6 @@ struct skb_hold_q {
245 264
246struct rxtid { 265struct rxtid {
247 bool aggr; 266 bool aggr;
248 bool progress;
249 bool timer_mon; 267 bool timer_mon;
250 u16 win_sz; 268 u16 win_sz;
251 u16 seq_next; 269 u16 seq_next;
@@ -254,9 +272,15 @@ struct rxtid {
254 struct sk_buff_head q; 272 struct sk_buff_head q;
255 273
256 /* 274 /*
257 * FIXME: No clue what this should protect. Apparently it should 275 * lock mainly protects seq_next and hold_q. Movement of seq_next
258 * protect some of the fields above but they are also accessed 276 * needs to be protected between aggr_timeout() and
259 * without taking the lock. 277 * aggr_process_recv_frm(). hold_q will be holding the pending
278 * reorder frames and it's access should also be protected.
279 * Some of the other fields like hold_q_sz, win_sz and aggr are
280 * initialized/reset when receiving addba/delba req, also while
281 * deleting aggr state all the pending buffers are flushed before
282 * resetting these fields, so there should not be any race in accessing
283 * these fields.
260 */ 284 */
261 spinlock_t lock; 285 spinlock_t lock;
262}; 286};
@@ -541,7 +565,7 @@ struct ath6kl_vif {
541 struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1]; 565 struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
542 struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1]; 566 struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
543 struct aggr_info *aggr_cntxt; 567 struct aggr_info *aggr_cntxt;
544 struct ath6kl_htcap htcap; 568 struct ath6kl_htcap htcap[IEEE80211_NUM_BANDS];
545 569
546 struct timer_list disconnect_timer; 570 struct timer_list disconnect_timer;
547 struct timer_list sched_scan_timer; 571 struct timer_list sched_scan_timer;
@@ -553,9 +577,6 @@ struct ath6kl_vif {
553 u32 last_cancel_roc_id; 577 u32 last_cancel_roc_id;
554 u32 send_action_id; 578 u32 send_action_id;
555 bool probe_req_report; 579 bool probe_req_report;
556 u16 next_chan;
557 enum nl80211_channel_type next_ch_type;
558 enum ieee80211_band next_ch_band;
559 u16 assoc_bss_beacon_int; 580 u16 assoc_bss_beacon_int;
560 u16 listen_intvl_t; 581 u16 listen_intvl_t;
561 u16 bmiss_time_t; 582 u16 bmiss_time_t;
@@ -687,6 +708,8 @@ struct ath6kl {
687 u32 testscript_addr; 708 u32 testscript_addr;
688 enum wmi_phy_cap cap; 709 enum wmi_phy_cap cap;
689 710
711 u32 flags;
712
690 struct ath6kl_hw_fw { 713 struct ath6kl_hw_fw {
691 const char *dir; 714 const char *dir;
692 const char *otp; 715 const char *otp;
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index 2798624d3a9..cd0e1ba410d 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -1309,7 +1309,7 @@ static int ath6kl_htc_rx_packet(struct htc_target *target,
1309 } 1309 }
1310 1310
1311 ath6kl_dbg(ATH6KL_DBG_HTC, 1311 ath6kl_dbg(ATH6KL_DBG_HTC,
1312 "htc rx 0x%p hdr x%x len %d mbox 0x%x\n", 1312 "htc rx 0x%p hdr 0x%x len %d mbox 0x%x\n",
1313 packet, packet->info.rx.exp_hdr, 1313 packet, packet->info.rx.exp_hdr,
1314 padded_len, dev->ar->mbox_info.htc_addr); 1314 padded_len, dev->ar->mbox_info.htc_addr);
1315 1315
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 7eb0515f458..f90b5db741c 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -42,6 +42,7 @@ static const struct ath6kl_hw hw_list[] = {
42 .reserved_ram_size = 6912, 42 .reserved_ram_size = 6912,
43 .refclk_hz = 26000000, 43 .refclk_hz = 26000000,
44 .uarttx_pin = 8, 44 .uarttx_pin = 8,
45 .flags = 0,
45 46
46 /* hw2.0 needs override address hardcoded */ 47 /* hw2.0 needs override address hardcoded */
47 .app_start_override_addr = 0x944C00, 48 .app_start_override_addr = 0x944C00,
@@ -67,6 +68,7 @@ static const struct ath6kl_hw hw_list[] = {
67 .refclk_hz = 26000000, 68 .refclk_hz = 26000000,
68 .uarttx_pin = 8, 69 .uarttx_pin = 8,
69 .testscript_addr = 0x57ef74, 70 .testscript_addr = 0x57ef74,
71 .flags = 0,
70 72
71 .fw = { 73 .fw = {
72 .dir = AR6003_HW_2_1_1_FW_DIR, 74 .dir = AR6003_HW_2_1_1_FW_DIR,
@@ -91,6 +93,7 @@ static const struct ath6kl_hw hw_list[] = {
91 .board_addr = 0x433900, 93 .board_addr = 0x433900,
92 .refclk_hz = 26000000, 94 .refclk_hz = 26000000,
93 .uarttx_pin = 11, 95 .uarttx_pin = 11,
96 .flags = ATH6KL_HW_FLAG_64BIT_RATES,
94 97
95 .fw = { 98 .fw = {
96 .dir = AR6004_HW_1_0_FW_DIR, 99 .dir = AR6004_HW_1_0_FW_DIR,
@@ -110,6 +113,7 @@ static const struct ath6kl_hw hw_list[] = {
110 .board_addr = 0x43d400, 113 .board_addr = 0x43d400,
111 .refclk_hz = 40000000, 114 .refclk_hz = 40000000,
112 .uarttx_pin = 11, 115 .uarttx_pin = 11,
116 .flags = ATH6KL_HW_FLAG_64BIT_RATES,
113 117
114 .fw = { 118 .fw = {
115 .dir = AR6004_HW_1_1_FW_DIR, 119 .dir = AR6004_HW_1_1_FW_DIR,
@@ -129,6 +133,7 @@ static const struct ath6kl_hw hw_list[] = {
129 .board_addr = 0x435c00, 133 .board_addr = 0x435c00,
130 .refclk_hz = 40000000, 134 .refclk_hz = 40000000,
131 .uarttx_pin = 11, 135 .uarttx_pin = 11,
136 .flags = ATH6KL_HW_FLAG_64BIT_RATES,
132 137
133 .fw = { 138 .fw = {
134 .dir = AR6004_HW_1_2_FW_DIR, 139 .dir = AR6004_HW_1_2_FW_DIR,
@@ -938,6 +943,14 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
938 } 943 }
939 944
940 switch (ie_id) { 945 switch (ie_id) {
946 case ATH6KL_FW_IE_FW_VERSION:
947 strlcpy(ar->wiphy->fw_version, data,
948 sizeof(ar->wiphy->fw_version));
949
950 ath6kl_dbg(ATH6KL_DBG_BOOT,
951 "found fw version %s\n",
952 ar->wiphy->fw_version);
953 break;
941 case ATH6KL_FW_IE_OTP_IMAGE: 954 case ATH6KL_FW_IE_OTP_IMAGE:
942 ath6kl_dbg(ATH6KL_DBG_BOOT, "found otp image ie (%zd B)\n", 955 ath6kl_dbg(ATH6KL_DBG_BOOT, "found otp image ie (%zd B)\n",
943 ie_len); 956 ie_len);
@@ -991,9 +1004,6 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
991 ar->hw.reserved_ram_size); 1004 ar->hw.reserved_ram_size);
992 break; 1005 break;
993 case ATH6KL_FW_IE_CAPABILITIES: 1006 case ATH6KL_FW_IE_CAPABILITIES:
994 if (ie_len < DIV_ROUND_UP(ATH6KL_FW_CAPABILITY_MAX, 8))
995 break;
996
997 ath6kl_dbg(ATH6KL_DBG_BOOT, 1007 ath6kl_dbg(ATH6KL_DBG_BOOT,
998 "found firmware capabilities ie (%zd B)\n", 1008 "found firmware capabilities ie (%zd B)\n",
999 ie_len); 1009 ie_len);
@@ -1002,6 +1012,9 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
1002 index = i / 8; 1012 index = i / 8;
1003 bit = i % 8; 1013 bit = i % 8;
1004 1014
1015 if (index == ie_len)
1016 break;
1017
1005 if (data[index] & (1 << bit)) 1018 if (data[index] & (1 << bit))
1006 __set_bit(i, ar->fw_capabilities); 1019 __set_bit(i, ar->fw_capabilities);
1007 } 1020 }
@@ -1392,6 +1405,12 @@ static int ath6kl_init_upload(struct ath6kl *ar)
1392 ar->version.target_ver == AR6003_HW_2_1_1_VERSION) { 1405 ar->version.target_ver == AR6003_HW_2_1_1_VERSION) {
1393 ath6kl_err("temporary war to avoid sdio crc error\n"); 1406 ath6kl_err("temporary war to avoid sdio crc error\n");
1394 1407
1408 param = 0x28;
1409 address = GPIO_BASE_ADDRESS + GPIO_PIN9_ADDRESS;
1410 status = ath6kl_bmi_reg_write(ar, address, param);
1411 if (status)
1412 return status;
1413
1395 param = 0x20; 1414 param = 0x20;
1396 1415
1397 address = GPIO_BASE_ADDRESS + GPIO_PIN10_ADDRESS; 1416 address = GPIO_BASE_ADDRESS + GPIO_PIN10_ADDRESS;
@@ -1659,6 +1678,9 @@ void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready)
1659 cfg80211_scan_done(vif->scan_req, true); 1678 cfg80211_scan_done(vif->scan_req, true);
1660 vif->scan_req = NULL; 1679 vif->scan_req = NULL;
1661 } 1680 }
1681
1682 /* need to clean up enhanced bmiss detection fw state */
1683 ath6kl_cfg80211_sta_bmiss_enhance(vif, false);
1662} 1684}
1663 1685
1664void ath6kl_stop_txrx(struct ath6kl *ar) 1686void ath6kl_stop_txrx(struct ath6kl *ar)
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index e5524470529..c189e28e86a 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -554,20 +554,24 @@ void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver,
554 struct ath6kl *ar = devt; 554 struct ath6kl *ar = devt;
555 555
556 memcpy(ar->mac_addr, datap, ETH_ALEN); 556 memcpy(ar->mac_addr, datap, ETH_ALEN);
557 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n", 557
558 __func__, ar->mac_addr); 558 ath6kl_dbg(ATH6KL_DBG_BOOT,
559 "ready event mac addr %pM sw_ver 0x%x abi_ver 0x%x cap 0x%x\n",
560 ar->mac_addr, sw_ver, abi_ver, cap);
559 561
560 ar->version.wlan_ver = sw_ver; 562 ar->version.wlan_ver = sw_ver;
561 ar->version.abi_ver = abi_ver; 563 ar->version.abi_ver = abi_ver;
562 ar->hw.cap = cap; 564 ar->hw.cap = cap;
563 565
564 snprintf(ar->wiphy->fw_version, 566 if (strlen(ar->wiphy->fw_version) == 0) {
565 sizeof(ar->wiphy->fw_version), 567 snprintf(ar->wiphy->fw_version,
566 "%u.%u.%u.%u", 568 sizeof(ar->wiphy->fw_version),
567 (ar->version.wlan_ver & 0xf0000000) >> 28, 569 "%u.%u.%u.%u",
568 (ar->version.wlan_ver & 0x0f000000) >> 24, 570 (ar->version.wlan_ver & 0xf0000000) >> 28,
569 (ar->version.wlan_ver & 0x00ff0000) >> 16, 571 (ar->version.wlan_ver & 0x0f000000) >> 24,
570 (ar->version.wlan_ver & 0x0000ffff)); 572 (ar->version.wlan_ver & 0x00ff0000) >> 16,
573 (ar->version.wlan_ver & 0x0000ffff));
574 }
571 575
572 /* indicate to the waiting thread that the ready event was received */ 576 /* indicate to the waiting thread that the ready event was received */
573 set_bit(WMI_READY, &ar->flag); 577 set_bit(WMI_READY, &ar->flag);
@@ -598,7 +602,6 @@ static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
598 602
599 struct ath6kl *ar = vif->ar; 603 struct ath6kl *ar = vif->ar;
600 604
601 vif->next_chan = channel;
602 vif->profile.ch = cpu_to_le16(channel); 605 vif->profile.ch = cpu_to_le16(channel);
603 606
604 switch (vif->nw_type) { 607 switch (vif->nw_type) {
@@ -1167,7 +1170,10 @@ static void ath6kl_set_multicast_list(struct net_device *ndev)
1167 else 1170 else
1168 clear_bit(NETDEV_MCAST_ALL_ON, &vif->flags); 1171 clear_bit(NETDEV_MCAST_ALL_ON, &vif->flags);
1169 1172
1170 mc_all_on = mc_all_on || (vif->ar->state == ATH6KL_STATE_ON); 1173 if (test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
1174 vif->ar->fw_capabilities)) {
1175 mc_all_on = mc_all_on || (vif->ar->state == ATH6KL_STATE_ON);
1176 }
1171 1177
1172 if (!(ndev->flags & IFF_MULTICAST)) { 1178 if (!(ndev->flags & IFF_MULTICAST)) {
1173 mc_all_on = false; 1179 mc_all_on = false;
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
index 78e0ef4567a..a98c12ba70c 100644
--- a/drivers/net/wireless/ath/ath6kl/target.h
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -45,6 +45,7 @@
45#define LPO_CAL_ENABLE_S 20 45#define LPO_CAL_ENABLE_S 20
46#define LPO_CAL_ENABLE 0x00100000 46#define LPO_CAL_ENABLE 0x00100000
47 47
48#define GPIO_PIN9_ADDRESS 0x0000004c
48#define GPIO_PIN10_ADDRESS 0x00000050 49#define GPIO_PIN10_ADDRESS 0x00000050
49#define GPIO_PIN11_ADDRESS 0x00000054 50#define GPIO_PIN11_ADDRESS 0x00000054
50#define GPIO_PIN12_ADDRESS 0x00000058 51#define GPIO_PIN12_ADDRESS 0x00000058
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 67206aedea6..7dfa0fd86d7 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -1036,6 +1036,7 @@ static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
1036 rxtid = &agg_conn->rx_tid[tid]; 1036 rxtid = &agg_conn->rx_tid[tid];
1037 stats = &agg_conn->stat[tid]; 1037 stats = &agg_conn->stat[tid];
1038 1038
1039 spin_lock_bh(&rxtid->lock);
1039 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz); 1040 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1040 1041
1041 /* 1042 /*
@@ -1054,8 +1055,6 @@ static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
1054 seq_end = seq_no ? seq_no : rxtid->seq_next; 1055 seq_end = seq_no ? seq_no : rxtid->seq_next;
1055 idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz); 1056 idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
1056 1057
1057 spin_lock_bh(&rxtid->lock);
1058
1059 do { 1058 do {
1060 node = &rxtid->hold_q[idx]; 1059 node = &rxtid->hold_q[idx];
1061 if ((order == 1) && (!node->skb)) 1060 if ((order == 1) && (!node->skb))
@@ -1127,11 +1126,13 @@ static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
1127 ((end > extended_end) && (cur > extended_end) && 1126 ((end > extended_end) && (cur > extended_end) &&
1128 (cur < end))) { 1127 (cur < end))) {
1129 aggr_deque_frms(agg_conn, tid, 0, 0); 1128 aggr_deque_frms(agg_conn, tid, 0, 0);
1129 spin_lock_bh(&rxtid->lock);
1130 if (cur >= rxtid->hold_q_sz - 1) 1130 if (cur >= rxtid->hold_q_sz - 1)
1131 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1); 1131 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
1132 else 1132 else
1133 rxtid->seq_next = ATH6KL_MAX_SEQ_NO - 1133 rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
1134 (rxtid->hold_q_sz - 2 - cur); 1134 (rxtid->hold_q_sz - 2 - cur);
1135 spin_unlock_bh(&rxtid->lock);
1135 } else { 1136 } else {
1136 /* 1137 /*
1137 * Dequeue only those frames that are outside the 1138 * Dequeue only those frames that are outside the
@@ -1185,25 +1186,25 @@ static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
1185 aggr_deque_frms(agg_conn, tid, 0, 1); 1186 aggr_deque_frms(agg_conn, tid, 0, 1);
1186 1187
1187 if (agg_conn->timer_scheduled) 1188 if (agg_conn->timer_scheduled)
1188 rxtid->progress = true; 1189 return is_queued;
1189 else 1190
1190 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) { 1191 spin_lock_bh(&rxtid->lock);
1191 if (rxtid->hold_q[idx].skb) { 1192 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
1192 /* 1193 if (rxtid->hold_q[idx].skb) {
1193 * There is a frame in the queue and no 1194 /*
1194 * timer so start a timer to ensure that 1195 * There is a frame in the queue and no
1195 * the frame doesn't remain stuck 1196 * timer so start a timer to ensure that
1196 * forever. 1197 * the frame doesn't remain stuck
1197 */ 1198 * forever.
1198 agg_conn->timer_scheduled = true; 1199 */
1199 mod_timer(&agg_conn->timer, 1200 agg_conn->timer_scheduled = true;
1200 (jiffies + 1201 mod_timer(&agg_conn->timer,
1201 HZ * (AGGR_RX_TIMEOUT) / 1000)); 1202 (jiffies + (HZ * AGGR_RX_TIMEOUT) / 1000));
1202 rxtid->progress = false; 1203 rxtid->timer_mon = true;
1203 rxtid->timer_mon = true; 1204 break;
1204 break;
1205 }
1206 } 1205 }
1206 }
1207 spin_unlock_bh(&rxtid->lock);
1207 1208
1208 return is_queued; 1209 return is_queued;
1209} 1210}
@@ -1608,7 +1609,7 @@ static void aggr_timeout(unsigned long arg)
1608 rxtid = &aggr_conn->rx_tid[i]; 1609 rxtid = &aggr_conn->rx_tid[i];
1609 stats = &aggr_conn->stat[i]; 1610 stats = &aggr_conn->stat[i];
1610 1611
1611 if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress) 1612 if (!rxtid->aggr || !rxtid->timer_mon)
1612 continue; 1613 continue;
1613 1614
1614 stats->num_timeouts++; 1615 stats->num_timeouts++;
@@ -1626,14 +1627,15 @@ static void aggr_timeout(unsigned long arg)
1626 rxtid = &aggr_conn->rx_tid[i]; 1627 rxtid = &aggr_conn->rx_tid[i];
1627 1628
1628 if (rxtid->aggr && rxtid->hold_q) { 1629 if (rxtid->aggr && rxtid->hold_q) {
1630 spin_lock_bh(&rxtid->lock);
1629 for (j = 0; j < rxtid->hold_q_sz; j++) { 1631 for (j = 0; j < rxtid->hold_q_sz; j++) {
1630 if (rxtid->hold_q[j].skb) { 1632 if (rxtid->hold_q[j].skb) {
1631 aggr_conn->timer_scheduled = true; 1633 aggr_conn->timer_scheduled = true;
1632 rxtid->timer_mon = true; 1634 rxtid->timer_mon = true;
1633 rxtid->progress = false;
1634 break; 1635 break;
1635 } 1636 }
1636 } 1637 }
1638 spin_unlock_bh(&rxtid->lock);
1637 1639
1638 if (j >= rxtid->hold_q_sz) 1640 if (j >= rxtid->hold_q_sz)
1639 rxtid->timer_mon = false; 1641 rxtid->timer_mon = false;
@@ -1660,7 +1662,6 @@ static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid)
1660 aggr_deque_frms(aggr_conn, tid, 0, 0); 1662 aggr_deque_frms(aggr_conn, tid, 0, 0);
1661 1663
1662 rxtid->aggr = false; 1664 rxtid->aggr = false;
1663 rxtid->progress = false;
1664 rxtid->timer_mon = false; 1665 rxtid->timer_mon = false;
1665 rxtid->win_sz = 0; 1666 rxtid->win_sz = 0;
1666 rxtid->seq_next = 0; 1667 rxtid->seq_next = 0;
@@ -1739,7 +1740,6 @@ void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
1739 for (i = 0; i < NUM_OF_TIDS; i++) { 1740 for (i = 0; i < NUM_OF_TIDS; i++) {
1740 rxtid = &aggr_conn->rx_tid[i]; 1741 rxtid = &aggr_conn->rx_tid[i];
1741 rxtid->aggr = false; 1742 rxtid->aggr = false;
1742 rxtid->progress = false;
1743 rxtid->timer_mon = false; 1743 rxtid->timer_mon = false;
1744 skb_queue_head_init(&rxtid->q); 1744 skb_queue_head_init(&rxtid->q);
1745 spin_lock_init(&rxtid->lock); 1745 spin_lock_init(&rxtid->lock);
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index ee8ec2394c2..a6caa673e8a 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -743,7 +743,6 @@ int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid)
743 return -ENOMEM; 743 return -ENOMEM;
744 744
745 cmd = (struct roam_ctrl_cmd *) skb->data; 745 cmd = (struct roam_ctrl_cmd *) skb->data;
746 memset(cmd, 0, sizeof(*cmd));
747 746
748 memcpy(cmd->info.bssid, bssid, ETH_ALEN); 747 memcpy(cmd->info.bssid, bssid, ETH_ALEN);
749 cmd->roam_ctrl = WMI_FORCE_ROAM; 748 cmd->roam_ctrl = WMI_FORCE_ROAM;
@@ -753,6 +752,22 @@ int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid)
753 NO_SYNC_WMIFLAG); 752 NO_SYNC_WMIFLAG);
754} 753}
755 754
755int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period)
756{
757 struct sk_buff *skb;
758 struct set_dtim_cmd *cmd;
759
760 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
761 if (!skb)
762 return -ENOMEM;
763
764 cmd = (struct set_dtim_cmd *) skb->data;
765
766 cmd->dtim_period = cpu_to_le32(dtim_period);
767 return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
768 WMI_AP_SET_DTIM_CMDID, NO_SYNC_WMIFLAG);
769}
770
756int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode) 771int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode)
757{ 772{
758 struct sk_buff *skb; 773 struct sk_buff *skb;
@@ -763,7 +778,6 @@ int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode)
763 return -ENOMEM; 778 return -ENOMEM;
764 779
765 cmd = (struct roam_ctrl_cmd *) skb->data; 780 cmd = (struct roam_ctrl_cmd *) skb->data;
766 memset(cmd, 0, sizeof(*cmd));
767 781
768 cmd->info.roam_mode = mode; 782 cmd->info.roam_mode = mode;
769 cmd->roam_ctrl = WMI_SET_ROAM_MODE; 783 cmd->roam_ctrl = WMI_SET_ROAM_MODE;
@@ -1995,7 +2009,7 @@ int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag,
1995 struct wmi_probed_ssid_cmd *cmd; 2009 struct wmi_probed_ssid_cmd *cmd;
1996 int ret; 2010 int ret;
1997 2011
1998 if (index > MAX_PROBED_SSID_INDEX) 2012 if (index >= MAX_PROBED_SSIDS)
1999 return -EINVAL; 2013 return -EINVAL;
2000 2014
2001 if (ssid_len > sizeof(cmd->ssid)) 2015 if (ssid_len > sizeof(cmd->ssid))
@@ -2599,6 +2613,115 @@ static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi)
2599 spin_unlock_bh(&wmi->lock); 2613 spin_unlock_bh(&wmi->lock);
2600} 2614}
2601 2615
2616static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx,
2617 const struct cfg80211_bitrate_mask *mask)
2618{
2619 struct sk_buff *skb;
2620 int ret, mode, band;
2621 u64 mcsrate, ratemask[IEEE80211_NUM_BANDS];
2622 struct wmi_set_tx_select_rates64_cmd *cmd;
2623
2624 memset(&ratemask, 0, sizeof(ratemask));
2625 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
2626 /* copy legacy rate mask */
2627 ratemask[band] = mask->control[band].legacy;
2628 if (band == IEEE80211_BAND_5GHZ)
2629 ratemask[band] =
2630 mask->control[band].legacy << 4;
2631
2632 /* copy mcs rate mask */
2633 mcsrate = mask->control[band].mcs[1];
2634 mcsrate <<= 8;
2635 mcsrate |= mask->control[band].mcs[0];
2636 ratemask[band] |= mcsrate << 12;
2637 ratemask[band] |= mcsrate << 28;
2638 }
2639
2640 ath6kl_dbg(ATH6KL_DBG_WMI,
2641 "Ratemask 64 bit: 2.4:%llx 5:%llx\n",
2642 ratemask[0], ratemask[1]);
2643
2644 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd) * WMI_RATES_MODE_MAX);
2645 if (!skb)
2646 return -ENOMEM;
2647
2648 cmd = (struct wmi_set_tx_select_rates64_cmd *) skb->data;
2649 for (mode = 0; mode < WMI_RATES_MODE_MAX; mode++) {
2650 /* A mode operate in 5GHZ band */
2651 if (mode == WMI_RATES_MODE_11A ||
2652 mode == WMI_RATES_MODE_11A_HT20 ||
2653 mode == WMI_RATES_MODE_11A_HT40)
2654 band = IEEE80211_BAND_5GHZ;
2655 else
2656 band = IEEE80211_BAND_2GHZ;
2657 cmd->ratemask[mode] = cpu_to_le64(ratemask[band]);
2658 }
2659
2660 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
2661 WMI_SET_TX_SELECT_RATES_CMDID,
2662 NO_SYNC_WMIFLAG);
2663 return ret;
2664}
2665
2666static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx,
2667 const struct cfg80211_bitrate_mask *mask)
2668{
2669 struct sk_buff *skb;
2670 int ret, mode, band;
2671 u32 mcsrate, ratemask[IEEE80211_NUM_BANDS];
2672 struct wmi_set_tx_select_rates32_cmd *cmd;
2673
2674 memset(&ratemask, 0, sizeof(ratemask));
2675 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
2676 /* copy legacy rate mask */
2677 ratemask[band] = mask->control[band].legacy;
2678 if (band == IEEE80211_BAND_5GHZ)
2679 ratemask[band] =
2680 mask->control[band].legacy << 4;
2681
2682 /* copy mcs rate mask */
2683 mcsrate = mask->control[band].mcs[0];
2684 ratemask[band] |= mcsrate << 12;
2685 ratemask[band] |= mcsrate << 20;
2686 }
2687
2688 ath6kl_dbg(ATH6KL_DBG_WMI,
2689 "Ratemask 32 bit: 2.4:%x 5:%x\n",
2690 ratemask[0], ratemask[1]);
2691
2692 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd) * WMI_RATES_MODE_MAX);
2693 if (!skb)
2694 return -ENOMEM;
2695
2696 cmd = (struct wmi_set_tx_select_rates32_cmd *) skb->data;
2697 for (mode = 0; mode < WMI_RATES_MODE_MAX; mode++) {
2698 /* A mode operate in 5GHZ band */
2699 if (mode == WMI_RATES_MODE_11A ||
2700 mode == WMI_RATES_MODE_11A_HT20 ||
2701 mode == WMI_RATES_MODE_11A_HT40)
2702 band = IEEE80211_BAND_5GHZ;
2703 else
2704 band = IEEE80211_BAND_2GHZ;
2705 cmd->ratemask[mode] = cpu_to_le32(ratemask[band]);
2706 }
2707
2708 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
2709 WMI_SET_TX_SELECT_RATES_CMDID,
2710 NO_SYNC_WMIFLAG);
2711 return ret;
2712}
2713
2714int ath6kl_wmi_set_bitrate_mask(struct wmi *wmi, u8 if_idx,
2715 const struct cfg80211_bitrate_mask *mask)
2716{
2717 struct ath6kl *ar = wmi->parent_dev;
2718
2719 if (ar->hw.flags & ATH6KL_HW_FLAG_64BIT_RATES)
2720 return ath6kl_set_bitrate_mask64(wmi, if_idx, mask);
2721 else
2722 return ath6kl_set_bitrate_mask32(wmi, if_idx, mask);
2723}
2724
2602int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx, 2725int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
2603 enum ath6kl_host_mode host_mode) 2726 enum ath6kl_host_mode host_mode)
2604{ 2727{
@@ -2997,6 +3120,25 @@ int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx,
2997 return ret; 3120 return ret;
2998} 3121}
2999 3122
3123int ath6kl_wmi_sta_bmiss_enhance_cmd(struct wmi *wmi, u8 if_idx, bool enhance)
3124{
3125 struct sk_buff *skb;
3126 struct wmi_sta_bmiss_enhance_cmd *cmd;
3127 int ret;
3128
3129 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
3130 if (!skb)
3131 return -ENOMEM;
3132
3133 cmd = (struct wmi_sta_bmiss_enhance_cmd *) skb->data;
3134 cmd->enable = enhance ? 1 : 0;
3135
3136 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
3137 WMI_STA_BMISS_ENHANCE_CMDID,
3138 NO_SYNC_WMIFLAG);
3139 return ret;
3140}
3141
3000s32 ath6kl_wmi_get_rate(s8 rate_index) 3142s32 ath6kl_wmi_get_rate(s8 rate_index)
3001{ 3143{
3002 if (rate_index == RATE_AUTO) 3144 if (rate_index == RATE_AUTO)
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 9076bec3a2b..43339aca585 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -624,6 +624,10 @@ enum wmi_cmd_id {
624 WMI_SEND_MGMT_CMDID, 624 WMI_SEND_MGMT_CMDID,
625 WMI_BEGIN_SCAN_CMDID, 625 WMI_BEGIN_SCAN_CMDID,
626 626
627 WMI_SET_BLACK_LIST,
628 WMI_SET_MCASTRATE,
629
630 WMI_STA_BMISS_ENHANCE_CMDID,
627}; 631};
628 632
629enum wmi_mgmt_frame_type { 633enum wmi_mgmt_frame_type {
@@ -960,6 +964,9 @@ enum wmi_bss_filter {
960 /* beacons matching probed ssid */ 964 /* beacons matching probed ssid */
961 PROBED_SSID_FILTER, 965 PROBED_SSID_FILTER,
962 966
967 /* beacons matching matched ssid */
968 MATCHED_SSID_FILTER,
969
963 /* marker only */ 970 /* marker only */
964 LAST_BSS_FILTER, 971 LAST_BSS_FILTER,
965}; 972};
@@ -978,7 +985,7 @@ struct wmi_bss_filter_cmd {
978} __packed; 985} __packed;
979 986
980/* WMI_SET_PROBED_SSID_CMDID */ 987/* WMI_SET_PROBED_SSID_CMDID */
981#define MAX_PROBED_SSID_INDEX 9 988#define MAX_PROBED_SSIDS 16
982 989
983enum wmi_ssid_flag { 990enum wmi_ssid_flag {
984 /* disables entry */ 991 /* disables entry */
@@ -989,10 +996,13 @@ enum wmi_ssid_flag {
989 996
990 /* probes for any ssid */ 997 /* probes for any ssid */
991 ANY_SSID_FLAG = 0x02, 998 ANY_SSID_FLAG = 0x02,
999
1000 /* match for ssid */
1001 MATCH_SSID_FLAG = 0x08,
992}; 1002};
993 1003
994struct wmi_probed_ssid_cmd { 1004struct wmi_probed_ssid_cmd {
995 /* 0 to MAX_PROBED_SSID_INDEX */ 1005 /* 0 to MAX_PROBED_SSIDS - 1 */
996 u8 entry_index; 1006 u8 entry_index;
997 1007
998 /* see, enum wmi_ssid_flg */ 1008 /* see, enum wmi_ssid_flg */
@@ -1017,6 +1027,11 @@ struct wmi_bmiss_time_cmd {
1017 __le16 num_beacons; 1027 __le16 num_beacons;
1018}; 1028};
1019 1029
1030/* WMI_STA_ENHANCE_BMISS_CMDID */
1031struct wmi_sta_bmiss_enhance_cmd {
1032 u8 enable;
1033} __packed;
1034
1020/* WMI_SET_POWER_MODE_CMDID */ 1035/* WMI_SET_POWER_MODE_CMDID */
1021enum wmi_power_mode { 1036enum wmi_power_mode {
1022 REC_POWER = 0x01, 1037 REC_POWER = 0x01,
@@ -1048,6 +1063,36 @@ struct wmi_power_params_cmd {
1048 __le16 ps_fail_event_policy; 1063 __le16 ps_fail_event_policy;
1049} __packed; 1064} __packed;
1050 1065
1066/*
1067 * Ratemask for below modes should be passed
1068 * to WMI_SET_TX_SELECT_RATES_CMDID.
1069 * AR6003 has 32 bit mask for each modes.
1070 * First 12 bits for legacy rates, 13 to 20
1071 * bits for HT 20 rates and 21 to 28 bits for
1072 * HT 40 rates
1073 */
1074enum wmi_mode_phy {
1075 WMI_RATES_MODE_11A = 0,
1076 WMI_RATES_MODE_11G,
1077 WMI_RATES_MODE_11B,
1078 WMI_RATES_MODE_11GONLY,
1079 WMI_RATES_MODE_11A_HT20,
1080 WMI_RATES_MODE_11G_HT20,
1081 WMI_RATES_MODE_11A_HT40,
1082 WMI_RATES_MODE_11G_HT40,
1083 WMI_RATES_MODE_MAX
1084};
1085
1086/* WMI_SET_TX_SELECT_RATES_CMDID */
1087struct wmi_set_tx_select_rates32_cmd {
1088 __le32 ratemask[WMI_RATES_MODE_MAX];
1089} __packed;
1090
1091/* WMI_SET_TX_SELECT_RATES_CMDID */
1092struct wmi_set_tx_select_rates64_cmd {
1093 __le64 ratemask[WMI_RATES_MODE_MAX];
1094} __packed;
1095
1051/* WMI_SET_DISC_TIMEOUT_CMDID */ 1096/* WMI_SET_DISC_TIMEOUT_CMDID */
1052struct wmi_disc_timeout_cmd { 1097struct wmi_disc_timeout_cmd {
1053 /* seconds */ 1098 /* seconds */
@@ -1572,6 +1617,10 @@ struct roam_ctrl_cmd {
1572 u8 roam_ctrl; 1617 u8 roam_ctrl;
1573} __packed; 1618} __packed;
1574 1619
1620struct set_dtim_cmd {
1621 __le32 dtim_period;
1622} __packed;
1623
1575/* BSS INFO HDR version 2.0 */ 1624/* BSS INFO HDR version 2.0 */
1576struct wmi_bss_info_hdr2 { 1625struct wmi_bss_info_hdr2 {
1577 __le16 ch; /* frequency in MHz */ 1626 __le16 ch; /* frequency in MHz */
@@ -2532,6 +2581,8 @@ int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, u8 if_idx,
2532 __be32 ips0, __be32 ips1); 2581 __be32 ips0, __be32 ips1);
2533int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx, 2582int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
2534 enum ath6kl_host_mode host_mode); 2583 enum ath6kl_host_mode host_mode);
2584int ath6kl_wmi_set_bitrate_mask(struct wmi *wmi, u8 if_idx,
2585 const struct cfg80211_bitrate_mask *mask);
2535int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx, 2586int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
2536 enum ath6kl_wow_mode wow_mode, 2587 enum ath6kl_wow_mode wow_mode,
2537 u32 filter, u16 host_req_delay); 2588 u32 filter, u16 host_req_delay);
@@ -2542,11 +2593,14 @@ int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
2542int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx, 2593int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
2543 u16 list_id, u16 filter_id); 2594 u16 list_id, u16 filter_id);
2544int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi); 2595int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi);
2596int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period);
2545int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid); 2597int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid);
2546int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode); 2598int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode);
2547int ath6kl_wmi_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, bool mc_all_on); 2599int ath6kl_wmi_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, bool mc_all_on);
2548int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, 2600int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx,
2549 u8 *filter, bool add_filter); 2601 u8 *filter, bool add_filter);
2602int ath6kl_wmi_sta_bmiss_enhance_cmd(struct wmi *wmi, u8 if_idx, bool enable);
2603
2550/* AP mode uAPSD */ 2604/* AP mode uAPSD */
2551int ath6kl_wmi_ap_set_apsd(struct wmi *wmi, u8 if_idx, u8 enable); 2605int ath6kl_wmi_ap_set_apsd(struct wmi *wmi, u8 if_idx, u8 enable);
2552 2606
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 3f0b8472378..9c41232b0cd 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -3,7 +3,9 @@ ath9k-y += beacon.o \
3 init.o \ 3 init.o \
4 main.o \ 4 main.o \
5 recv.o \ 5 recv.o \
6 xmit.o 6 xmit.o \
7 link.o \
8 antenna.o
7 9
8ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o 10ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
9ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o 11ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 5e47ca6d16a..4a4e8a2b9d2 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -126,7 +126,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
126 sc->irq = irq; 126 sc->irq = irq;
127 127
128 /* Will be cleared in ath9k_start() */ 128 /* Will be cleared in ath9k_start() */
129 sc->sc_flags |= SC_OP_INVALID; 129 set_bit(SC_OP_INVALID, &sc->sc_flags);
130 130
131 ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc); 131 ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
132 if (ret) { 132 if (ret) {
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index b4c77f9d747..ff007f500fe 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -104,11 +104,6 @@ static const struct ani_cck_level_entry cck_level_table[] = {
104#define ATH9K_ANI_CCK_DEF_LEVEL \ 104#define ATH9K_ANI_CCK_DEF_LEVEL \
105 2 /* default level - matches the INI settings */ 105 2 /* default level - matches the INI settings */
106 106
107static bool use_new_ani(struct ath_hw *ah)
108{
109 return AR_SREV_9300_20_OR_LATER(ah) || modparam_force_new_ani;
110}
111
112static void ath9k_hw_update_mibstats(struct ath_hw *ah, 107static void ath9k_hw_update_mibstats(struct ath_hw *ah,
113 struct ath9k_mib_stats *stats) 108 struct ath9k_mib_stats *stats)
114{ 109{
@@ -122,8 +117,6 @@ static void ath9k_hw_update_mibstats(struct ath_hw *ah,
122static void ath9k_ani_restart(struct ath_hw *ah) 117static void ath9k_ani_restart(struct ath_hw *ah)
123{ 118{
124 struct ar5416AniState *aniState; 119 struct ar5416AniState *aniState;
125 struct ath_common *common = ath9k_hw_common(ah);
126 u32 ofdm_base = 0, cck_base = 0;
127 120
128 if (!DO_ANI(ah)) 121 if (!DO_ANI(ah))
129 return; 122 return;
@@ -131,18 +124,10 @@ static void ath9k_ani_restart(struct ath_hw *ah)
131 aniState = &ah->curchan->ani; 124 aniState = &ah->curchan->ani;
132 aniState->listenTime = 0; 125 aniState->listenTime = 0;
133 126
134 if (!use_new_ani(ah)) {
135 ofdm_base = AR_PHY_COUNTMAX - ah->config.ofdm_trig_high;
136 cck_base = AR_PHY_COUNTMAX - ah->config.cck_trig_high;
137 }
138
139 ath_dbg(common, ANI, "Writing ofdmbase=%u cckbase=%u\n",
140 ofdm_base, cck_base);
141
142 ENABLE_REGWRITE_BUFFER(ah); 127 ENABLE_REGWRITE_BUFFER(ah);
143 128
144 REG_WRITE(ah, AR_PHY_ERR_1, ofdm_base); 129 REG_WRITE(ah, AR_PHY_ERR_1, 0);
145 REG_WRITE(ah, AR_PHY_ERR_2, cck_base); 130 REG_WRITE(ah, AR_PHY_ERR_2, 0);
146 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); 131 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
147 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); 132 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
148 133
@@ -154,129 +139,23 @@ static void ath9k_ani_restart(struct ath_hw *ah)
154 aniState->cckPhyErrCount = 0; 139 aniState->cckPhyErrCount = 0;
155} 140}
156 141
157static void ath9k_hw_ani_ofdm_err_trigger_old(struct ath_hw *ah)
158{
159 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
160 struct ar5416AniState *aniState;
161 int32_t rssi;
162
163 aniState = &ah->curchan->ani;
164
165 if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
166 if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
167 aniState->noiseImmunityLevel + 1)) {
168 return;
169 }
170 }
171
172 if (aniState->spurImmunityLevel < HAL_SPUR_IMMUNE_MAX) {
173 if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
174 aniState->spurImmunityLevel + 1)) {
175 return;
176 }
177 }
178
179 if (ah->opmode == NL80211_IFTYPE_AP) {
180 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
181 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
182 aniState->firstepLevel + 1);
183 }
184 return;
185 }
186 rssi = BEACON_RSSI(ah);
187 if (rssi > aniState->rssiThrHigh) {
188 if (!aniState->ofdmWeakSigDetectOff) {
189 if (ath9k_hw_ani_control(ah,
190 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
191 false)) {
192 ath9k_hw_ani_control(ah,
193 ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0);
194 return;
195 }
196 }
197 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
198 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
199 aniState->firstepLevel + 1);
200 return;
201 }
202 } else if (rssi > aniState->rssiThrLow) {
203 if (aniState->ofdmWeakSigDetectOff)
204 ath9k_hw_ani_control(ah,
205 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
206 true);
207 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX)
208 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
209 aniState->firstepLevel + 1);
210 return;
211 } else {
212 if ((conf->channel->band == IEEE80211_BAND_2GHZ) &&
213 !conf_is_ht(conf)) {
214 if (!aniState->ofdmWeakSigDetectOff)
215 ath9k_hw_ani_control(ah,
216 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
217 false);
218 if (aniState->firstepLevel > 0)
219 ath9k_hw_ani_control(ah,
220 ATH9K_ANI_FIRSTEP_LEVEL, 0);
221 return;
222 }
223 }
224}
225
226static void ath9k_hw_ani_cck_err_trigger_old(struct ath_hw *ah)
227{
228 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
229 struct ar5416AniState *aniState;
230 int32_t rssi;
231
232 aniState = &ah->curchan->ani;
233 if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
234 if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
235 aniState->noiseImmunityLevel + 1)) {
236 return;
237 }
238 }
239 if (ah->opmode == NL80211_IFTYPE_AP) {
240 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
241 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
242 aniState->firstepLevel + 1);
243 }
244 return;
245 }
246 rssi = BEACON_RSSI(ah);
247 if (rssi > aniState->rssiThrLow) {
248 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX)
249 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
250 aniState->firstepLevel + 1);
251 } else {
252 if ((conf->channel->band == IEEE80211_BAND_2GHZ) &&
253 !conf_is_ht(conf)) {
254 if (aniState->firstepLevel > 0)
255 ath9k_hw_ani_control(ah,
256 ATH9K_ANI_FIRSTEP_LEVEL, 0);
257 }
258 }
259}
260
261/* Adjust the OFDM Noise Immunity Level */ 142/* Adjust the OFDM Noise Immunity Level */
262static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel) 143static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
144 bool scan)
263{ 145{
264 struct ar5416AniState *aniState = &ah->curchan->ani; 146 struct ar5416AniState *aniState = &ah->curchan->ani;
265 struct ath_common *common = ath9k_hw_common(ah); 147 struct ath_common *common = ath9k_hw_common(ah);
266 const struct ani_ofdm_level_entry *entry_ofdm; 148 const struct ani_ofdm_level_entry *entry_ofdm;
267 const struct ani_cck_level_entry *entry_cck; 149 const struct ani_cck_level_entry *entry_cck;
268 150 bool weak_sig;
269 aniState->noiseFloor = BEACON_RSSI(ah);
270 151
271 ath_dbg(common, ANI, "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n", 152 ath_dbg(common, ANI, "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
272 aniState->ofdmNoiseImmunityLevel, 153 aniState->ofdmNoiseImmunityLevel,
273 immunityLevel, aniState->noiseFloor, 154 immunityLevel, BEACON_RSSI(ah),
274 aniState->rssiThrLow, aniState->rssiThrHigh); 155 aniState->rssiThrLow, aniState->rssiThrHigh);
275 156
276 if (aniState->update_ani) 157 if (!scan)
277 aniState->ofdmNoiseImmunityLevel = 158 aniState->ofdmNoiseImmunityLevel = immunityLevel;
278 (immunityLevel > ATH9K_ANI_OFDM_DEF_LEVEL) ?
279 immunityLevel : ATH9K_ANI_OFDM_DEF_LEVEL;
280 159
281 entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel]; 160 entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
282 entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel]; 161 entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
@@ -292,12 +171,22 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
292 ATH9K_ANI_FIRSTEP_LEVEL, 171 ATH9K_ANI_FIRSTEP_LEVEL,
293 entry_ofdm->fir_step_level); 172 entry_ofdm->fir_step_level);
294 173
295 if ((aniState->noiseFloor >= aniState->rssiThrHigh) && 174 weak_sig = entry_ofdm->ofdm_weak_signal_on;
296 (!aniState->ofdmWeakSigDetectOff != 175 if (ah->opmode == NL80211_IFTYPE_STATION &&
297 entry_ofdm->ofdm_weak_signal_on)) { 176 BEACON_RSSI(ah) <= aniState->rssiThrHigh)
177 weak_sig = true;
178
179 if (aniState->ofdmWeakSigDetect != weak_sig)
298 ath9k_hw_ani_control(ah, 180 ath9k_hw_ani_control(ah,
299 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, 181 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
300 entry_ofdm->ofdm_weak_signal_on); 182 entry_ofdm->ofdm_weak_signal_on);
183
184 if (aniState->ofdmNoiseImmunityLevel >= ATH9K_ANI_OFDM_DEF_LEVEL) {
185 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
186 ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI;
187 } else {
188 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI;
189 ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW;
301 } 190 }
302} 191}
303 192
@@ -308,43 +197,35 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
308 if (!DO_ANI(ah)) 197 if (!DO_ANI(ah))
309 return; 198 return;
310 199
311 if (!use_new_ani(ah)) {
312 ath9k_hw_ani_ofdm_err_trigger_old(ah);
313 return;
314 }
315
316 aniState = &ah->curchan->ani; 200 aniState = &ah->curchan->ani;
317 201
318 if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL) 202 if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL)
319 ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1); 203 ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1, false);
320} 204}
321 205
322/* 206/*
323 * Set the ANI settings to match an CCK level. 207 * Set the ANI settings to match an CCK level.
324 */ 208 */
325static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel) 209static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
210 bool scan)
326{ 211{
327 struct ar5416AniState *aniState = &ah->curchan->ani; 212 struct ar5416AniState *aniState = &ah->curchan->ani;
328 struct ath_common *common = ath9k_hw_common(ah); 213 struct ath_common *common = ath9k_hw_common(ah);
329 const struct ani_ofdm_level_entry *entry_ofdm; 214 const struct ani_ofdm_level_entry *entry_ofdm;
330 const struct ani_cck_level_entry *entry_cck; 215 const struct ani_cck_level_entry *entry_cck;
331 216
332 aniState->noiseFloor = BEACON_RSSI(ah);
333 ath_dbg(common, ANI, "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n", 217 ath_dbg(common, ANI, "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
334 aniState->cckNoiseImmunityLevel, immunityLevel, 218 aniState->cckNoiseImmunityLevel, immunityLevel,
335 aniState->noiseFloor, aniState->rssiThrLow, 219 BEACON_RSSI(ah), aniState->rssiThrLow,
336 aniState->rssiThrHigh); 220 aniState->rssiThrHigh);
337 221
338 if ((ah->opmode == NL80211_IFTYPE_STATION || 222 if (ah->opmode == NL80211_IFTYPE_STATION &&
339 ah->opmode == NL80211_IFTYPE_ADHOC) && 223 BEACON_RSSI(ah) <= aniState->rssiThrLow &&
340 aniState->noiseFloor <= aniState->rssiThrLow &&
341 immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI) 224 immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI)
342 immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI; 225 immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI;
343 226
344 if (aniState->update_ani) 227 if (!scan)
345 aniState->cckNoiseImmunityLevel = 228 aniState->cckNoiseImmunityLevel = immunityLevel;
346 (immunityLevel > ATH9K_ANI_CCK_DEF_LEVEL) ?
347 immunityLevel : ATH9K_ANI_CCK_DEF_LEVEL;
348 229
349 entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel]; 230 entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
350 entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel]; 231 entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
@@ -359,7 +240,7 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel)
359 if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah)) 240 if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah))
360 return; 241 return;
361 242
362 if (aniState->mrcCCKOff == entry_cck->mrc_cck_on) 243 if (aniState->mrcCCK != entry_cck->mrc_cck_on)
363 ath9k_hw_ani_control(ah, 244 ath9k_hw_ani_control(ah,
364 ATH9K_ANI_MRC_CCK, 245 ATH9K_ANI_MRC_CCK,
365 entry_cck->mrc_cck_on); 246 entry_cck->mrc_cck_on);
@@ -372,68 +253,11 @@ static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah)
372 if (!DO_ANI(ah)) 253 if (!DO_ANI(ah))
373 return; 254 return;
374 255
375 if (!use_new_ani(ah)) {
376 ath9k_hw_ani_cck_err_trigger_old(ah);
377 return;
378 }
379
380 aniState = &ah->curchan->ani; 256 aniState = &ah->curchan->ani;
381 257
382 if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL) 258 if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL)
383 ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1); 259 ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1,
384} 260 false);
385
386static void ath9k_hw_ani_lower_immunity_old(struct ath_hw *ah)
387{
388 struct ar5416AniState *aniState;
389 int32_t rssi;
390
391 aniState = &ah->curchan->ani;
392
393 if (ah->opmode == NL80211_IFTYPE_AP) {
394 if (aniState->firstepLevel > 0) {
395 if (ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
396 aniState->firstepLevel - 1))
397 return;
398 }
399 } else {
400 rssi = BEACON_RSSI(ah);
401 if (rssi > aniState->rssiThrHigh) {
402 /* XXX: Handle me */
403 } else if (rssi > aniState->rssiThrLow) {
404 if (aniState->ofdmWeakSigDetectOff) {
405 if (ath9k_hw_ani_control(ah,
406 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
407 true))
408 return;
409 }
410 if (aniState->firstepLevel > 0) {
411 if (ath9k_hw_ani_control(ah,
412 ATH9K_ANI_FIRSTEP_LEVEL,
413 aniState->firstepLevel - 1))
414 return;
415 }
416 } else {
417 if (aniState->firstepLevel > 0) {
418 if (ath9k_hw_ani_control(ah,
419 ATH9K_ANI_FIRSTEP_LEVEL,
420 aniState->firstepLevel - 1))
421 return;
422 }
423 }
424 }
425
426 if (aniState->spurImmunityLevel > 0) {
427 if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
428 aniState->spurImmunityLevel - 1))
429 return;
430 }
431
432 if (aniState->noiseImmunityLevel > 0) {
433 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
434 aniState->noiseImmunityLevel - 1);
435 return;
436 }
437} 261}
438 262
439/* 263/*
@@ -446,87 +270,18 @@ static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
446 270
447 aniState = &ah->curchan->ani; 271 aniState = &ah->curchan->ani;
448 272
449 if (!use_new_ani(ah)) {
450 ath9k_hw_ani_lower_immunity_old(ah);
451 return;
452 }
453
454 /* lower OFDM noise immunity */ 273 /* lower OFDM noise immunity */
455 if (aniState->ofdmNoiseImmunityLevel > 0 && 274 if (aniState->ofdmNoiseImmunityLevel > 0 &&
456 (aniState->ofdmsTurn || aniState->cckNoiseImmunityLevel == 0)) { 275 (aniState->ofdmsTurn || aniState->cckNoiseImmunityLevel == 0)) {
457 ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel - 1); 276 ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel - 1,
277 false);
458 return; 278 return;
459 } 279 }
460 280
461 /* lower CCK noise immunity */ 281 /* lower CCK noise immunity */
462 if (aniState->cckNoiseImmunityLevel > 0) 282 if (aniState->cckNoiseImmunityLevel > 0)
463 ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel - 1); 283 ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel - 1,
464} 284 false);
465
466static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning)
467{
468 struct ar5416AniState *aniState;
469 struct ath9k_channel *chan = ah->curchan;
470 struct ath_common *common = ath9k_hw_common(ah);
471
472 if (!DO_ANI(ah))
473 return;
474
475 aniState = &ah->curchan->ani;
476
477 if (ah->opmode != NL80211_IFTYPE_STATION
478 && ah->opmode != NL80211_IFTYPE_ADHOC) {
479 ath_dbg(common, ANI, "Reset ANI state opmode %u\n", ah->opmode);
480 ah->stats.ast_ani_reset++;
481
482 if (ah->opmode == NL80211_IFTYPE_AP) {
483 /*
484 * ath9k_hw_ani_control() will only process items set on
485 * ah->ani_function
486 */
487 if (IS_CHAN_2GHZ(chan))
488 ah->ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL |
489 ATH9K_ANI_FIRSTEP_LEVEL);
490 else
491 ah->ani_function = 0;
492 }
493
494 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 0);
495 ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0);
496 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 0);
497 ath9k_hw_ani_control(ah, ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
498 !ATH9K_ANI_USE_OFDM_WEAK_SIG);
499 ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR,
500 ATH9K_ANI_CCK_WEAK_SIG_THR);
501
502 ath9k_ani_restart(ah);
503 return;
504 }
505
506 if (aniState->noiseImmunityLevel != 0)
507 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
508 aniState->noiseImmunityLevel);
509 if (aniState->spurImmunityLevel != 0)
510 ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
511 aniState->spurImmunityLevel);
512 if (aniState->ofdmWeakSigDetectOff)
513 ath9k_hw_ani_control(ah, ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
514 !aniState->ofdmWeakSigDetectOff);
515 if (aniState->cckWeakSigThreshold)
516 ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR,
517 aniState->cckWeakSigThreshold);
518 if (aniState->firstepLevel != 0)
519 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
520 aniState->firstepLevel);
521
522 ath9k_ani_restart(ah);
523
524 ENABLE_REGWRITE_BUFFER(ah);
525
526 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
527 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
528
529 REGWRITE_BUFFER_FLUSH(ah);
530} 285}
531 286
532/* 287/*
@@ -539,13 +294,11 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
539 struct ar5416AniState *aniState = &ah->curchan->ani; 294 struct ar5416AniState *aniState = &ah->curchan->ani;
540 struct ath9k_channel *chan = ah->curchan; 295 struct ath9k_channel *chan = ah->curchan;
541 struct ath_common *common = ath9k_hw_common(ah); 296 struct ath_common *common = ath9k_hw_common(ah);
297 int ofdm_nil, cck_nil;
542 298
543 if (!DO_ANI(ah)) 299 if (!DO_ANI(ah))
544 return; 300 return;
545 301
546 if (!use_new_ani(ah))
547 return ath9k_ani_reset_old(ah, is_scanning);
548
549 BUG_ON(aniState == NULL); 302 BUG_ON(aniState == NULL);
550 ah->stats.ast_ani_reset++; 303 ah->stats.ast_ani_reset++;
551 304
@@ -563,6 +316,11 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
563 /* always allow mode (on/off) to be controlled */ 316 /* always allow mode (on/off) to be controlled */
564 ah->ani_function |= ATH9K_ANI_MODE; 317 ah->ani_function |= ATH9K_ANI_MODE;
565 318
319 ofdm_nil = max_t(int, ATH9K_ANI_OFDM_DEF_LEVEL,
320 aniState->ofdmNoiseImmunityLevel);
321 cck_nil = max_t(int, ATH9K_ANI_CCK_DEF_LEVEL,
322 aniState->cckNoiseImmunityLevel);
323
566 if (is_scanning || 324 if (is_scanning ||
567 (ah->opmode != NL80211_IFTYPE_STATION && 325 (ah->opmode != NL80211_IFTYPE_STATION &&
568 ah->opmode != NL80211_IFTYPE_ADHOC)) { 326 ah->opmode != NL80211_IFTYPE_ADHOC)) {
@@ -585,9 +343,8 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
585 aniState->ofdmNoiseImmunityLevel, 343 aniState->ofdmNoiseImmunityLevel,
586 aniState->cckNoiseImmunityLevel); 344 aniState->cckNoiseImmunityLevel);
587 345
588 aniState->update_ani = false; 346 ofdm_nil = ATH9K_ANI_OFDM_DEF_LEVEL;
589 ath9k_hw_set_ofdm_nil(ah, ATH9K_ANI_OFDM_DEF_LEVEL); 347 cck_nil = ATH9K_ANI_CCK_DEF_LEVEL;
590 ath9k_hw_set_cck_nil(ah, ATH9K_ANI_CCK_DEF_LEVEL);
591 } 348 }
592 } else { 349 } else {
593 /* 350 /*
@@ -601,13 +358,9 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
601 is_scanning, 358 is_scanning,
602 aniState->ofdmNoiseImmunityLevel, 359 aniState->ofdmNoiseImmunityLevel,
603 aniState->cckNoiseImmunityLevel); 360 aniState->cckNoiseImmunityLevel);
604
605 aniState->update_ani = true;
606 ath9k_hw_set_ofdm_nil(ah,
607 aniState->ofdmNoiseImmunityLevel);
608 ath9k_hw_set_cck_nil(ah,
609 aniState->cckNoiseImmunityLevel);
610 } 361 }
362 ath9k_hw_set_ofdm_nil(ah, ofdm_nil, is_scanning);
363 ath9k_hw_set_cck_nil(ah, cck_nil, is_scanning);
611 364
612 /* 365 /*
613 * enable phy counters if hw supports or if not, enable phy 366 * enable phy counters if hw supports or if not, enable phy
@@ -627,9 +380,6 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
627{ 380{
628 struct ath_common *common = ath9k_hw_common(ah); 381 struct ath_common *common = ath9k_hw_common(ah);
629 struct ar5416AniState *aniState = &ah->curchan->ani; 382 struct ar5416AniState *aniState = &ah->curchan->ani;
630 u32 ofdm_base = 0;
631 u32 cck_base = 0;
632 u32 ofdmPhyErrCnt, cckPhyErrCnt;
633 u32 phyCnt1, phyCnt2; 383 u32 phyCnt1, phyCnt2;
634 int32_t listenTime; 384 int32_t listenTime;
635 385
@@ -642,11 +392,6 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
642 return false; 392 return false;
643 } 393 }
644 394
645 if (!use_new_ani(ah)) {
646 ofdm_base = AR_PHY_COUNTMAX - ah->config.ofdm_trig_high;
647 cck_base = AR_PHY_COUNTMAX - ah->config.cck_trig_high;
648 }
649
650 aniState->listenTime += listenTime; 395 aniState->listenTime += listenTime;
651 396
652 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); 397 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
@@ -654,35 +399,12 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
654 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1); 399 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
655 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2); 400 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
656 401
657 if (!use_new_ani(ah) && (phyCnt1 < ofdm_base || phyCnt2 < cck_base)) { 402 ah->stats.ast_ani_ofdmerrs += phyCnt1 - aniState->ofdmPhyErrCount;
658 if (phyCnt1 < ofdm_base) { 403 aniState->ofdmPhyErrCount = phyCnt1;
659 ath_dbg(common, ANI,
660 "phyCnt1 0x%x, resetting counter value to 0x%x\n",
661 phyCnt1, ofdm_base);
662 REG_WRITE(ah, AR_PHY_ERR_1, ofdm_base);
663 REG_WRITE(ah, AR_PHY_ERR_MASK_1,
664 AR_PHY_ERR_OFDM_TIMING);
665 }
666 if (phyCnt2 < cck_base) {
667 ath_dbg(common, ANI,
668 "phyCnt2 0x%x, resetting counter value to 0x%x\n",
669 phyCnt2, cck_base);
670 REG_WRITE(ah, AR_PHY_ERR_2, cck_base);
671 REG_WRITE(ah, AR_PHY_ERR_MASK_2,
672 AR_PHY_ERR_CCK_TIMING);
673 }
674 return false;
675 }
676 404
677 ofdmPhyErrCnt = phyCnt1 - ofdm_base; 405 ah->stats.ast_ani_cckerrs += phyCnt2 - aniState->cckPhyErrCount;
678 ah->stats.ast_ani_ofdmerrs += 406 aniState->cckPhyErrCount = phyCnt2;
679 ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
680 aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
681 407
682 cckPhyErrCnt = phyCnt2 - cck_base;
683 ah->stats.ast_ani_cckerrs +=
684 cckPhyErrCnt - aniState->cckPhyErrCount;
685 aniState->cckPhyErrCount = cckPhyErrCnt;
686 return true; 408 return true;
687} 409}
688 410
@@ -716,21 +438,10 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan)
716 438
717 if (aniState->listenTime > ah->aniperiod) { 439 if (aniState->listenTime > ah->aniperiod) {
718 if (cckPhyErrRate < ah->config.cck_trig_low && 440 if (cckPhyErrRate < ah->config.cck_trig_low &&
719 ((ofdmPhyErrRate < ah->config.ofdm_trig_low && 441 ofdmPhyErrRate < ah->config.ofdm_trig_low) {
720 aniState->ofdmNoiseImmunityLevel <
721 ATH9K_ANI_OFDM_DEF_LEVEL) ||
722 (ofdmPhyErrRate < ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI &&
723 aniState->ofdmNoiseImmunityLevel >=
724 ATH9K_ANI_OFDM_DEF_LEVEL))) {
725 ath9k_hw_ani_lower_immunity(ah); 442 ath9k_hw_ani_lower_immunity(ah);
726 aniState->ofdmsTurn = !aniState->ofdmsTurn; 443 aniState->ofdmsTurn = !aniState->ofdmsTurn;
727 } else if ((ofdmPhyErrRate > ah->config.ofdm_trig_high && 444 } else if (ofdmPhyErrRate > ah->config.ofdm_trig_high) {
728 aniState->ofdmNoiseImmunityLevel >=
729 ATH9K_ANI_OFDM_DEF_LEVEL) ||
730 (ofdmPhyErrRate >
731 ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI &&
732 aniState->ofdmNoiseImmunityLevel <
733 ATH9K_ANI_OFDM_DEF_LEVEL)) {
734 ath9k_hw_ani_ofdm_err_trigger(ah); 445 ath9k_hw_ani_ofdm_err_trigger(ah);
735 aniState->ofdmsTurn = false; 446 aniState->ofdmsTurn = false;
736 } else if (cckPhyErrRate > ah->config.cck_trig_high) { 447 } else if (cckPhyErrRate > ah->config.cck_trig_high) {
@@ -778,49 +489,6 @@ void ath9k_hw_disable_mib_counters(struct ath_hw *ah)
778} 489}
779EXPORT_SYMBOL(ath9k_hw_disable_mib_counters); 490EXPORT_SYMBOL(ath9k_hw_disable_mib_counters);
780 491
781/*
782 * Process a MIB interrupt. We may potentially be invoked because
783 * any of the MIB counters overflow/trigger so don't assume we're
784 * here because a PHY error counter triggered.
785 */
786void ath9k_hw_proc_mib_event(struct ath_hw *ah)
787{
788 u32 phyCnt1, phyCnt2;
789
790 /* Reset these counters regardless */
791 REG_WRITE(ah, AR_FILT_OFDM, 0);
792 REG_WRITE(ah, AR_FILT_CCK, 0);
793 if (!(REG_READ(ah, AR_SLP_MIB_CTRL) & AR_SLP_MIB_PENDING))
794 REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR);
795
796 /* Clear the mib counters and save them in the stats */
797 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
798
799 if (!DO_ANI(ah)) {
800 /*
801 * We must always clear the interrupt cause by
802 * resetting the phy error regs.
803 */
804 REG_WRITE(ah, AR_PHY_ERR_1, 0);
805 REG_WRITE(ah, AR_PHY_ERR_2, 0);
806 return;
807 }
808
809 /* NB: these are not reset-on-read */
810 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
811 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
812 if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) ||
813 ((phyCnt2 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK)) {
814
815 if (!use_new_ani(ah))
816 ath9k_hw_ani_read_counters(ah);
817
818 /* NB: always restart to insure the h/w counters are reset */
819 ath9k_ani_restart(ah);
820 }
821}
822EXPORT_SYMBOL(ath9k_hw_proc_mib_event);
823
824void ath9k_hw_ani_setup(struct ath_hw *ah) 492void ath9k_hw_ani_setup(struct ath_hw *ah)
825{ 493{
826 int i; 494 int i;
@@ -845,66 +513,37 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
845 513
846 ath_dbg(common, ANI, "Initialize ANI\n"); 514 ath_dbg(common, ANI, "Initialize ANI\n");
847 515
848 if (use_new_ani(ah)) { 516 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
849 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_NEW; 517 ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW;
850 ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW_NEW;
851 518
852 ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH_NEW; 519 ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH;
853 ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW_NEW; 520 ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW;
854 } else {
855 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_OLD;
856 ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW_OLD;
857
858 ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH_OLD;
859 ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW_OLD;
860 }
861 521
862 for (i = 0; i < ARRAY_SIZE(ah->channels); i++) { 522 for (i = 0; i < ARRAY_SIZE(ah->channels); i++) {
863 struct ath9k_channel *chan = &ah->channels[i]; 523 struct ath9k_channel *chan = &ah->channels[i];
864 struct ar5416AniState *ani = &chan->ani; 524 struct ar5416AniState *ani = &chan->ani;
865 525
866 if (use_new_ani(ah)) { 526 ani->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
867 ani->spurImmunityLevel =
868 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
869 527
870 ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW; 528 ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
871 529
872 if (AR_SREV_9300_20_OR_LATER(ah)) 530 ani->mrcCCK = AR_SREV_9300_20_OR_LATER(ah) ? true : false;
873 ani->mrcCCKOff = 531
874 !ATH9K_ANI_ENABLE_MRC_CCK; 532 ani->ofdmsTurn = true;
875 else
876 ani->mrcCCKOff = true;
877
878 ani->ofdmsTurn = true;
879 } else {
880 ani->spurImmunityLevel =
881 ATH9K_ANI_SPUR_IMMUNE_LVL_OLD;
882 ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_OLD;
883
884 ani->cckWeakSigThreshold =
885 ATH9K_ANI_CCK_WEAK_SIG_THR;
886 }
887 533
888 ani->rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH; 534 ani->rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH;
889 ani->rssiThrLow = ATH9K_ANI_RSSI_THR_LOW; 535 ani->rssiThrLow = ATH9K_ANI_RSSI_THR_LOW;
890 ani->ofdmWeakSigDetectOff = 536 ani->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG;
891 !ATH9K_ANI_USE_OFDM_WEAK_SIG;
892 ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL; 537 ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
893 ani->ofdmNoiseImmunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL; 538 ani->ofdmNoiseImmunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL;
894 ani->update_ani = false;
895 } 539 }
896 540
897 /* 541 /*
898 * since we expect some ongoing maintenance on the tables, let's sanity 542 * since we expect some ongoing maintenance on the tables, let's sanity
899 * check here default level should not modify INI setting. 543 * check here default level should not modify INI setting.
900 */ 544 */
901 if (use_new_ani(ah)) { 545 ah->aniperiod = ATH9K_ANI_PERIOD;
902 ah->aniperiod = ATH9K_ANI_PERIOD_NEW; 546 ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL;
903 ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL_NEW;
904 } else {
905 ah->aniperiod = ATH9K_ANI_PERIOD_OLD;
906 ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL_OLD;
907 }
908 547
909 if (ah->config.enable_ani) 548 if (ah->config.enable_ani)
910 ah->proc_phyerr |= HAL_PROCESS_ANI; 549 ah->proc_phyerr |= HAL_PROCESS_ANI;
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 72e2b874e17..1485bf5e351 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -24,42 +24,34 @@
24#define BEACON_RSSI(ahp) (ahp->stats.avgbrssi) 24#define BEACON_RSSI(ahp) (ahp->stats.avgbrssi)
25 25
26/* units are errors per second */ 26/* units are errors per second */
27#define ATH9K_ANI_OFDM_TRIG_HIGH_OLD 500 27#define ATH9K_ANI_OFDM_TRIG_HIGH 3500
28#define ATH9K_ANI_OFDM_TRIG_HIGH_NEW 3500
29#define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000 28#define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000
30 29
31/* units are errors per second */ 30/* units are errors per second */
32#define ATH9K_ANI_OFDM_TRIG_LOW_OLD 200 31#define ATH9K_ANI_OFDM_TRIG_LOW 400
33#define ATH9K_ANI_OFDM_TRIG_LOW_NEW 400
34#define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900 32#define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900
35 33
36/* units are errors per second */ 34/* units are errors per second */
37#define ATH9K_ANI_CCK_TRIG_HIGH_OLD 200 35#define ATH9K_ANI_CCK_TRIG_HIGH 600
38#define ATH9K_ANI_CCK_TRIG_HIGH_NEW 600
39 36
40/* units are errors per second */ 37/* units are errors per second */
41#define ATH9K_ANI_CCK_TRIG_LOW_OLD 100 38#define ATH9K_ANI_CCK_TRIG_LOW 300
42#define ATH9K_ANI_CCK_TRIG_LOW_NEW 300
43 39
44#define ATH9K_ANI_NOISE_IMMUNE_LVL 4 40#define ATH9K_ANI_NOISE_IMMUNE_LVL 4
45#define ATH9K_ANI_USE_OFDM_WEAK_SIG true 41#define ATH9K_ANI_USE_OFDM_WEAK_SIG true
46#define ATH9K_ANI_CCK_WEAK_SIG_THR false 42#define ATH9K_ANI_CCK_WEAK_SIG_THR false
47 43
48#define ATH9K_ANI_SPUR_IMMUNE_LVL_OLD 7 44#define ATH9K_ANI_SPUR_IMMUNE_LVL 3
49#define ATH9K_ANI_SPUR_IMMUNE_LVL_NEW 3
50 45
51#define ATH9K_ANI_FIRSTEP_LVL_OLD 0 46#define ATH9K_ANI_FIRSTEP_LVL 2
52#define ATH9K_ANI_FIRSTEP_LVL_NEW 2
53 47
54#define ATH9K_ANI_RSSI_THR_HIGH 40 48#define ATH9K_ANI_RSSI_THR_HIGH 40
55#define ATH9K_ANI_RSSI_THR_LOW 7 49#define ATH9K_ANI_RSSI_THR_LOW 7
56 50
57#define ATH9K_ANI_PERIOD_OLD 100 51#define ATH9K_ANI_PERIOD 300
58#define ATH9K_ANI_PERIOD_NEW 300
59 52
60/* in ms */ 53/* in ms */
61#define ATH9K_ANI_POLLINTERVAL_OLD 100 54#define ATH9K_ANI_POLLINTERVAL 1000
62#define ATH9K_ANI_POLLINTERVAL_NEW 1000
63 55
64#define HAL_NOISE_IMMUNE_MAX 4 56#define HAL_NOISE_IMMUNE_MAX 4
65#define HAL_SPUR_IMMUNE_MAX 7 57#define HAL_SPUR_IMMUNE_MAX 7
@@ -70,8 +62,6 @@
70#define ATH9K_SIG_SPUR_IMM_SETTING_MIN 0 62#define ATH9K_SIG_SPUR_IMM_SETTING_MIN 0
71#define ATH9K_SIG_SPUR_IMM_SETTING_MAX 22 63#define ATH9K_SIG_SPUR_IMM_SETTING_MAX 22
72 64
73#define ATH9K_ANI_ENABLE_MRC_CCK true
74
75/* values here are relative to the INI */ 65/* values here are relative to the INI */
76 66
77enum ath9k_ani_cmd { 67enum ath9k_ani_cmd {
@@ -119,16 +109,14 @@ struct ar5416AniState {
119 u8 ofdmNoiseImmunityLevel; 109 u8 ofdmNoiseImmunityLevel;
120 u8 cckNoiseImmunityLevel; 110 u8 cckNoiseImmunityLevel;
121 bool ofdmsTurn; 111 bool ofdmsTurn;
122 u8 mrcCCKOff; 112 u8 mrcCCK;
123 u8 spurImmunityLevel; 113 u8 spurImmunityLevel;
124 u8 firstepLevel; 114 u8 firstepLevel;
125 u8 ofdmWeakSigDetectOff; 115 u8 ofdmWeakSigDetect;
126 u8 cckWeakSigThreshold; 116 u8 cckWeakSigThreshold;
127 bool update_ani;
128 u32 listenTime; 117 u32 listenTime;
129 int32_t rssiThrLow; 118 int32_t rssiThrLow;
130 int32_t rssiThrHigh; 119 int32_t rssiThrHigh;
131 u32 noiseFloor;
132 u32 ofdmPhyErrCount; 120 u32 ofdmPhyErrCount;
133 u32 cckPhyErrCount; 121 u32 cckPhyErrCount;
134 int16_t pktRssi[2]; 122 int16_t pktRssi[2];
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
new file mode 100644
index 00000000000..bbcfeb3b2a6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -0,0 +1,776 @@
1/*
2 * Copyright (c) 2012 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "ath9k.h"
18
19static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
20 int mindelta, int main_rssi_avg,
21 int alt_rssi_avg, int pkt_count)
22{
23 return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
24 (alt_rssi_avg > main_rssi_avg + maxdelta)) ||
25 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
26}
27
28static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio,
29 int curr_main_set, int curr_alt_set,
30 int alt_rssi_avg, int main_rssi_avg)
31{
32 bool result = false;
33 switch (div_group) {
34 case 0:
35 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
36 result = true;
37 break;
38 case 1:
39 case 2:
40 if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) &&
41 (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) &&
42 (alt_rssi_avg >= (main_rssi_avg - 5))) ||
43 ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) &&
44 (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) &&
45 (alt_rssi_avg >= (main_rssi_avg - 2)))) &&
46 (alt_rssi_avg >= 4))
47 result = true;
48 else
49 result = false;
50 break;
51 }
52
53 return result;
54}
55
56static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
57 struct ath_hw_antcomb_conf ant_conf,
58 int main_rssi_avg)
59{
60 antcomb->quick_scan_cnt = 0;
61
62 if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
63 antcomb->rssi_lna2 = main_rssi_avg;
64 else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1)
65 antcomb->rssi_lna1 = main_rssi_avg;
66
67 switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) {
68 case 0x10: /* LNA2 A-B */
69 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
70 antcomb->first_quick_scan_conf =
71 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
72 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
73 break;
74 case 0x20: /* LNA1 A-B */
75 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
76 antcomb->first_quick_scan_conf =
77 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
78 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
79 break;
80 case 0x21: /* LNA1 LNA2 */
81 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2;
82 antcomb->first_quick_scan_conf =
83 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
84 antcomb->second_quick_scan_conf =
85 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
86 break;
87 case 0x12: /* LNA2 LNA1 */
88 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1;
89 antcomb->first_quick_scan_conf =
90 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
91 antcomb->second_quick_scan_conf =
92 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
93 break;
94 case 0x13: /* LNA2 A+B */
95 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
96 antcomb->first_quick_scan_conf =
97 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
98 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
99 break;
100 case 0x23: /* LNA1 A+B */
101 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
102 antcomb->first_quick_scan_conf =
103 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
104 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
105 break;
106 default:
107 break;
108 }
109}
110
111static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
112 struct ath_hw_antcomb_conf *div_ant_conf,
113 int main_rssi_avg, int alt_rssi_avg,
114 int alt_ratio)
115{
116 /* alt_good */
117 switch (antcomb->quick_scan_cnt) {
118 case 0:
119 /* set alt to main, and alt to first conf */
120 div_ant_conf->main_lna_conf = antcomb->main_conf;
121 div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf;
122 break;
123 case 1:
124 /* set alt to main, and alt to first conf */
125 div_ant_conf->main_lna_conf = antcomb->main_conf;
126 div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf;
127 antcomb->rssi_first = main_rssi_avg;
128 antcomb->rssi_second = alt_rssi_avg;
129
130 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
131 /* main is LNA1 */
132 if (ath_is_alt_ant_ratio_better(alt_ratio,
133 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
134 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
135 main_rssi_avg, alt_rssi_avg,
136 antcomb->total_pkt_count))
137 antcomb->first_ratio = true;
138 else
139 antcomb->first_ratio = false;
140 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
141 if (ath_is_alt_ant_ratio_better(alt_ratio,
142 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
143 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
144 main_rssi_avg, alt_rssi_avg,
145 antcomb->total_pkt_count))
146 antcomb->first_ratio = true;
147 else
148 antcomb->first_ratio = false;
149 } else {
150 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
151 (alt_rssi_avg > main_rssi_avg +
152 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
153 (alt_rssi_avg > main_rssi_avg)) &&
154 (antcomb->total_pkt_count > 50))
155 antcomb->first_ratio = true;
156 else
157 antcomb->first_ratio = false;
158 }
159 break;
160 case 2:
161 antcomb->alt_good = false;
162 antcomb->scan_not_start = false;
163 antcomb->scan = false;
164 antcomb->rssi_first = main_rssi_avg;
165 antcomb->rssi_third = alt_rssi_avg;
166
167 if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
168 antcomb->rssi_lna1 = alt_rssi_avg;
169 else if (antcomb->second_quick_scan_conf ==
170 ATH_ANT_DIV_COMB_LNA2)
171 antcomb->rssi_lna2 = alt_rssi_avg;
172 else if (antcomb->second_quick_scan_conf ==
173 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
174 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
175 antcomb->rssi_lna2 = main_rssi_avg;
176 else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
177 antcomb->rssi_lna1 = main_rssi_avg;
178 }
179
180 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
181 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
182 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
183 else
184 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
185
186 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
187 if (ath_is_alt_ant_ratio_better(alt_ratio,
188 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
189 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
190 main_rssi_avg, alt_rssi_avg,
191 antcomb->total_pkt_count))
192 antcomb->second_ratio = true;
193 else
194 antcomb->second_ratio = false;
195 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
196 if (ath_is_alt_ant_ratio_better(alt_ratio,
197 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
198 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
199 main_rssi_avg, alt_rssi_avg,
200 antcomb->total_pkt_count))
201 antcomb->second_ratio = true;
202 else
203 antcomb->second_ratio = false;
204 } else {
205 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
206 (alt_rssi_avg > main_rssi_avg +
207 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
208 (alt_rssi_avg > main_rssi_avg)) &&
209 (antcomb->total_pkt_count > 50))
210 antcomb->second_ratio = true;
211 else
212 antcomb->second_ratio = false;
213 }
214
215 /* set alt to the conf with maximun ratio */
216 if (antcomb->first_ratio && antcomb->second_ratio) {
217 if (antcomb->rssi_second > antcomb->rssi_third) {
218 /* first alt*/
219 if ((antcomb->first_quick_scan_conf ==
220 ATH_ANT_DIV_COMB_LNA1) ||
221 (antcomb->first_quick_scan_conf ==
222 ATH_ANT_DIV_COMB_LNA2))
223 /* Set alt LNA1 or LNA2*/
224 if (div_ant_conf->main_lna_conf ==
225 ATH_ANT_DIV_COMB_LNA2)
226 div_ant_conf->alt_lna_conf =
227 ATH_ANT_DIV_COMB_LNA1;
228 else
229 div_ant_conf->alt_lna_conf =
230 ATH_ANT_DIV_COMB_LNA2;
231 else
232 /* Set alt to A+B or A-B */
233 div_ant_conf->alt_lna_conf =
234 antcomb->first_quick_scan_conf;
235 } else if ((antcomb->second_quick_scan_conf ==
236 ATH_ANT_DIV_COMB_LNA1) ||
237 (antcomb->second_quick_scan_conf ==
238 ATH_ANT_DIV_COMB_LNA2)) {
239 /* Set alt LNA1 or LNA2 */
240 if (div_ant_conf->main_lna_conf ==
241 ATH_ANT_DIV_COMB_LNA2)
242 div_ant_conf->alt_lna_conf =
243 ATH_ANT_DIV_COMB_LNA1;
244 else
245 div_ant_conf->alt_lna_conf =
246 ATH_ANT_DIV_COMB_LNA2;
247 } else {
248 /* Set alt to A+B or A-B */
249 div_ant_conf->alt_lna_conf =
250 antcomb->second_quick_scan_conf;
251 }
252 } else if (antcomb->first_ratio) {
253 /* first alt */
254 if ((antcomb->first_quick_scan_conf ==
255 ATH_ANT_DIV_COMB_LNA1) ||
256 (antcomb->first_quick_scan_conf ==
257 ATH_ANT_DIV_COMB_LNA2))
258 /* Set alt LNA1 or LNA2 */
259 if (div_ant_conf->main_lna_conf ==
260 ATH_ANT_DIV_COMB_LNA2)
261 div_ant_conf->alt_lna_conf =
262 ATH_ANT_DIV_COMB_LNA1;
263 else
264 div_ant_conf->alt_lna_conf =
265 ATH_ANT_DIV_COMB_LNA2;
266 else
267 /* Set alt to A+B or A-B */
268 div_ant_conf->alt_lna_conf =
269 antcomb->first_quick_scan_conf;
270 } else if (antcomb->second_ratio) {
271 /* second alt */
272 if ((antcomb->second_quick_scan_conf ==
273 ATH_ANT_DIV_COMB_LNA1) ||
274 (antcomb->second_quick_scan_conf ==
275 ATH_ANT_DIV_COMB_LNA2))
276 /* Set alt LNA1 or LNA2 */
277 if (div_ant_conf->main_lna_conf ==
278 ATH_ANT_DIV_COMB_LNA2)
279 div_ant_conf->alt_lna_conf =
280 ATH_ANT_DIV_COMB_LNA1;
281 else
282 div_ant_conf->alt_lna_conf =
283 ATH_ANT_DIV_COMB_LNA2;
284 else
285 /* Set alt to A+B or A-B */
286 div_ant_conf->alt_lna_conf =
287 antcomb->second_quick_scan_conf;
288 } else {
289 /* main is largest */
290 if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
291 (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
292 /* Set alt LNA1 or LNA2 */
293 if (div_ant_conf->main_lna_conf ==
294 ATH_ANT_DIV_COMB_LNA2)
295 div_ant_conf->alt_lna_conf =
296 ATH_ANT_DIV_COMB_LNA1;
297 else
298 div_ant_conf->alt_lna_conf =
299 ATH_ANT_DIV_COMB_LNA2;
300 else
301 /* Set alt to A+B or A-B */
302 div_ant_conf->alt_lna_conf = antcomb->main_conf;
303 }
304 break;
305 default:
306 break;
307 }
308}
309
310static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
311 struct ath_ant_comb *antcomb,
312 int alt_ratio)
313{
314 if (ant_conf->div_group == 0) {
315 /* Adjust the fast_div_bias based on main and alt lna conf */
316 switch ((ant_conf->main_lna_conf << 4) |
317 ant_conf->alt_lna_conf) {
318 case 0x01: /* A-B LNA2 */
319 ant_conf->fast_div_bias = 0x3b;
320 break;
321 case 0x02: /* A-B LNA1 */
322 ant_conf->fast_div_bias = 0x3d;
323 break;
324 case 0x03: /* A-B A+B */
325 ant_conf->fast_div_bias = 0x1;
326 break;
327 case 0x10: /* LNA2 A-B */
328 ant_conf->fast_div_bias = 0x7;
329 break;
330 case 0x12: /* LNA2 LNA1 */
331 ant_conf->fast_div_bias = 0x2;
332 break;
333 case 0x13: /* LNA2 A+B */
334 ant_conf->fast_div_bias = 0x7;
335 break;
336 case 0x20: /* LNA1 A-B */
337 ant_conf->fast_div_bias = 0x6;
338 break;
339 case 0x21: /* LNA1 LNA2 */
340 ant_conf->fast_div_bias = 0x0;
341 break;
342 case 0x23: /* LNA1 A+B */
343 ant_conf->fast_div_bias = 0x6;
344 break;
345 case 0x30: /* A+B A-B */
346 ant_conf->fast_div_bias = 0x1;
347 break;
348 case 0x31: /* A+B LNA2 */
349 ant_conf->fast_div_bias = 0x3b;
350 break;
351 case 0x32: /* A+B LNA1 */
352 ant_conf->fast_div_bias = 0x3d;
353 break;
354 default:
355 break;
356 }
357 } else if (ant_conf->div_group == 1) {
358 /* Adjust the fast_div_bias based on main and alt_lna_conf */
359 switch ((ant_conf->main_lna_conf << 4) |
360 ant_conf->alt_lna_conf) {
361 case 0x01: /* A-B LNA2 */
362 ant_conf->fast_div_bias = 0x1;
363 ant_conf->main_gaintb = 0;
364 ant_conf->alt_gaintb = 0;
365 break;
366 case 0x02: /* A-B LNA1 */
367 ant_conf->fast_div_bias = 0x1;
368 ant_conf->main_gaintb = 0;
369 ant_conf->alt_gaintb = 0;
370 break;
371 case 0x03: /* A-B A+B */
372 ant_conf->fast_div_bias = 0x1;
373 ant_conf->main_gaintb = 0;
374 ant_conf->alt_gaintb = 0;
375 break;
376 case 0x10: /* LNA2 A-B */
377 if (!(antcomb->scan) &&
378 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
379 ant_conf->fast_div_bias = 0x3f;
380 else
381 ant_conf->fast_div_bias = 0x1;
382 ant_conf->main_gaintb = 0;
383 ant_conf->alt_gaintb = 0;
384 break;
385 case 0x12: /* LNA2 LNA1 */
386 ant_conf->fast_div_bias = 0x1;
387 ant_conf->main_gaintb = 0;
388 ant_conf->alt_gaintb = 0;
389 break;
390 case 0x13: /* LNA2 A+B */
391 if (!(antcomb->scan) &&
392 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
393 ant_conf->fast_div_bias = 0x3f;
394 else
395 ant_conf->fast_div_bias = 0x1;
396 ant_conf->main_gaintb = 0;
397 ant_conf->alt_gaintb = 0;
398 break;
399 case 0x20: /* LNA1 A-B */
400 if (!(antcomb->scan) &&
401 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
402 ant_conf->fast_div_bias = 0x3f;
403 else
404 ant_conf->fast_div_bias = 0x1;
405 ant_conf->main_gaintb = 0;
406 ant_conf->alt_gaintb = 0;
407 break;
408 case 0x21: /* LNA1 LNA2 */
409 ant_conf->fast_div_bias = 0x1;
410 ant_conf->main_gaintb = 0;
411 ant_conf->alt_gaintb = 0;
412 break;
413 case 0x23: /* LNA1 A+B */
414 if (!(antcomb->scan) &&
415 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
416 ant_conf->fast_div_bias = 0x3f;
417 else
418 ant_conf->fast_div_bias = 0x1;
419 ant_conf->main_gaintb = 0;
420 ant_conf->alt_gaintb = 0;
421 break;
422 case 0x30: /* A+B A-B */
423 ant_conf->fast_div_bias = 0x1;
424 ant_conf->main_gaintb = 0;
425 ant_conf->alt_gaintb = 0;
426 break;
427 case 0x31: /* A+B LNA2 */
428 ant_conf->fast_div_bias = 0x1;
429 ant_conf->main_gaintb = 0;
430 ant_conf->alt_gaintb = 0;
431 break;
432 case 0x32: /* A+B LNA1 */
433 ant_conf->fast_div_bias = 0x1;
434 ant_conf->main_gaintb = 0;
435 ant_conf->alt_gaintb = 0;
436 break;
437 default:
438 break;
439 }
440 } else if (ant_conf->div_group == 2) {
441 /* Adjust the fast_div_bias based on main and alt_lna_conf */
442 switch ((ant_conf->main_lna_conf << 4) |
443 ant_conf->alt_lna_conf) {
444 case 0x01: /* A-B LNA2 */
445 ant_conf->fast_div_bias = 0x1;
446 ant_conf->main_gaintb = 0;
447 ant_conf->alt_gaintb = 0;
448 break;
449 case 0x02: /* A-B LNA1 */
450 ant_conf->fast_div_bias = 0x1;
451 ant_conf->main_gaintb = 0;
452 ant_conf->alt_gaintb = 0;
453 break;
454 case 0x03: /* A-B A+B */
455 ant_conf->fast_div_bias = 0x1;
456 ant_conf->main_gaintb = 0;
457 ant_conf->alt_gaintb = 0;
458 break;
459 case 0x10: /* LNA2 A-B */
460 if (!(antcomb->scan) &&
461 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
462 ant_conf->fast_div_bias = 0x1;
463 else
464 ant_conf->fast_div_bias = 0x2;
465 ant_conf->main_gaintb = 0;
466 ant_conf->alt_gaintb = 0;
467 break;
468 case 0x12: /* LNA2 LNA1 */
469 ant_conf->fast_div_bias = 0x1;
470 ant_conf->main_gaintb = 0;
471 ant_conf->alt_gaintb = 0;
472 break;
473 case 0x13: /* LNA2 A+B */
474 if (!(antcomb->scan) &&
475 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
476 ant_conf->fast_div_bias = 0x1;
477 else
478 ant_conf->fast_div_bias = 0x2;
479 ant_conf->main_gaintb = 0;
480 ant_conf->alt_gaintb = 0;
481 break;
482 case 0x20: /* LNA1 A-B */
483 if (!(antcomb->scan) &&
484 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
485 ant_conf->fast_div_bias = 0x1;
486 else
487 ant_conf->fast_div_bias = 0x2;
488 ant_conf->main_gaintb = 0;
489 ant_conf->alt_gaintb = 0;
490 break;
491 case 0x21: /* LNA1 LNA2 */
492 ant_conf->fast_div_bias = 0x1;
493 ant_conf->main_gaintb = 0;
494 ant_conf->alt_gaintb = 0;
495 break;
496 case 0x23: /* LNA1 A+B */
497 if (!(antcomb->scan) &&
498 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
499 ant_conf->fast_div_bias = 0x1;
500 else
501 ant_conf->fast_div_bias = 0x2;
502 ant_conf->main_gaintb = 0;
503 ant_conf->alt_gaintb = 0;
504 break;
505 case 0x30: /* A+B A-B */
506 ant_conf->fast_div_bias = 0x1;
507 ant_conf->main_gaintb = 0;
508 ant_conf->alt_gaintb = 0;
509 break;
510 case 0x31: /* A+B LNA2 */
511 ant_conf->fast_div_bias = 0x1;
512 ant_conf->main_gaintb = 0;
513 ant_conf->alt_gaintb = 0;
514 break;
515 case 0x32: /* A+B LNA1 */
516 ant_conf->fast_div_bias = 0x1;
517 ant_conf->main_gaintb = 0;
518 ant_conf->alt_gaintb = 0;
519 break;
520 default:
521 break;
522 }
523 }
524}
525
526void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
527{
528 struct ath_hw_antcomb_conf div_ant_conf;
529 struct ath_ant_comb *antcomb = &sc->ant_comb;
530 int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
531 int curr_main_set;
532 int main_rssi = rs->rs_rssi_ctl0;
533 int alt_rssi = rs->rs_rssi_ctl1;
534 int rx_ant_conf, main_ant_conf;
535 bool short_scan = false;
536
537 rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
538 ATH_ANT_RX_MASK;
539 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
540 ATH_ANT_RX_MASK;
541
542 /* Record packet only when both main_rssi and alt_rssi is positive */
543 if (main_rssi > 0 && alt_rssi > 0) {
544 antcomb->total_pkt_count++;
545 antcomb->main_total_rssi += main_rssi;
546 antcomb->alt_total_rssi += alt_rssi;
547 if (main_ant_conf == rx_ant_conf)
548 antcomb->main_recv_cnt++;
549 else
550 antcomb->alt_recv_cnt++;
551 }
552
553 /* Short scan check */
554 if (antcomb->scan && antcomb->alt_good) {
555 if (time_after(jiffies, antcomb->scan_start_time +
556 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
557 short_scan = true;
558 else
559 if (antcomb->total_pkt_count ==
560 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
561 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
562 antcomb->total_pkt_count);
563 if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
564 short_scan = true;
565 }
566 }
567
568 if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
569 rs->rs_moreaggr) && !short_scan)
570 return;
571
572 if (antcomb->total_pkt_count) {
573 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
574 antcomb->total_pkt_count);
575 main_rssi_avg = (antcomb->main_total_rssi /
576 antcomb->total_pkt_count);
577 alt_rssi_avg = (antcomb->alt_total_rssi /
578 antcomb->total_pkt_count);
579 }
580
581
582 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
583 curr_alt_set = div_ant_conf.alt_lna_conf;
584 curr_main_set = div_ant_conf.main_lna_conf;
585
586 antcomb->count++;
587
588 if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
589 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
590 ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
591 main_rssi_avg);
592 antcomb->alt_good = true;
593 } else {
594 antcomb->alt_good = false;
595 }
596
597 antcomb->count = 0;
598 antcomb->scan = true;
599 antcomb->scan_not_start = true;
600 }
601
602 if (!antcomb->scan) {
603 if (ath_ant_div_comb_alt_check(div_ant_conf.div_group,
604 alt_ratio, curr_main_set, curr_alt_set,
605 alt_rssi_avg, main_rssi_avg)) {
606 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
607 /* Switch main and alt LNA */
608 div_ant_conf.main_lna_conf =
609 ATH_ANT_DIV_COMB_LNA2;
610 div_ant_conf.alt_lna_conf =
611 ATH_ANT_DIV_COMB_LNA1;
612 } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
613 div_ant_conf.main_lna_conf =
614 ATH_ANT_DIV_COMB_LNA1;
615 div_ant_conf.alt_lna_conf =
616 ATH_ANT_DIV_COMB_LNA2;
617 }
618
619 goto div_comb_done;
620 } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
621 (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
622 /* Set alt to another LNA */
623 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
624 div_ant_conf.alt_lna_conf =
625 ATH_ANT_DIV_COMB_LNA1;
626 else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
627 div_ant_conf.alt_lna_conf =
628 ATH_ANT_DIV_COMB_LNA2;
629
630 goto div_comb_done;
631 }
632
633 if ((alt_rssi_avg < (main_rssi_avg +
634 div_ant_conf.lna1_lna2_delta)))
635 goto div_comb_done;
636 }
637
638 if (!antcomb->scan_not_start) {
639 switch (curr_alt_set) {
640 case ATH_ANT_DIV_COMB_LNA2:
641 antcomb->rssi_lna2 = alt_rssi_avg;
642 antcomb->rssi_lna1 = main_rssi_avg;
643 antcomb->scan = true;
644 /* set to A+B */
645 div_ant_conf.main_lna_conf =
646 ATH_ANT_DIV_COMB_LNA1;
647 div_ant_conf.alt_lna_conf =
648 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
649 break;
650 case ATH_ANT_DIV_COMB_LNA1:
651 antcomb->rssi_lna1 = alt_rssi_avg;
652 antcomb->rssi_lna2 = main_rssi_avg;
653 antcomb->scan = true;
654 /* set to A+B */
655 div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
656 div_ant_conf.alt_lna_conf =
657 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
658 break;
659 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
660 antcomb->rssi_add = alt_rssi_avg;
661 antcomb->scan = true;
662 /* set to A-B */
663 div_ant_conf.alt_lna_conf =
664 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
665 break;
666 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
667 antcomb->rssi_sub = alt_rssi_avg;
668 antcomb->scan = false;
669 if (antcomb->rssi_lna2 >
670 (antcomb->rssi_lna1 +
671 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
672 /* use LNA2 as main LNA */
673 if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
674 (antcomb->rssi_add > antcomb->rssi_sub)) {
675 /* set to A+B */
676 div_ant_conf.main_lna_conf =
677 ATH_ANT_DIV_COMB_LNA2;
678 div_ant_conf.alt_lna_conf =
679 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
680 } else if (antcomb->rssi_sub >
681 antcomb->rssi_lna1) {
682 /* set to A-B */
683 div_ant_conf.main_lna_conf =
684 ATH_ANT_DIV_COMB_LNA2;
685 div_ant_conf.alt_lna_conf =
686 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
687 } else {
688 /* set to LNA1 */
689 div_ant_conf.main_lna_conf =
690 ATH_ANT_DIV_COMB_LNA2;
691 div_ant_conf.alt_lna_conf =
692 ATH_ANT_DIV_COMB_LNA1;
693 }
694 } else {
695 /* use LNA1 as main LNA */
696 if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
697 (antcomb->rssi_add > antcomb->rssi_sub)) {
698 /* set to A+B */
699 div_ant_conf.main_lna_conf =
700 ATH_ANT_DIV_COMB_LNA1;
701 div_ant_conf.alt_lna_conf =
702 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
703 } else if (antcomb->rssi_sub >
704 antcomb->rssi_lna1) {
705 /* set to A-B */
706 div_ant_conf.main_lna_conf =
707 ATH_ANT_DIV_COMB_LNA1;
708 div_ant_conf.alt_lna_conf =
709 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
710 } else {
711 /* set to LNA2 */
712 div_ant_conf.main_lna_conf =
713 ATH_ANT_DIV_COMB_LNA1;
714 div_ant_conf.alt_lna_conf =
715 ATH_ANT_DIV_COMB_LNA2;
716 }
717 }
718 break;
719 default:
720 break;
721 }
722 } else {
723 if (!antcomb->alt_good) {
724 antcomb->scan_not_start = false;
725 /* Set alt to another LNA */
726 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
727 div_ant_conf.main_lna_conf =
728 ATH_ANT_DIV_COMB_LNA2;
729 div_ant_conf.alt_lna_conf =
730 ATH_ANT_DIV_COMB_LNA1;
731 } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
732 div_ant_conf.main_lna_conf =
733 ATH_ANT_DIV_COMB_LNA1;
734 div_ant_conf.alt_lna_conf =
735 ATH_ANT_DIV_COMB_LNA2;
736 }
737 goto div_comb_done;
738 }
739 }
740
741 ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
742 main_rssi_avg, alt_rssi_avg,
743 alt_ratio);
744
745 antcomb->quick_scan_cnt++;
746
747div_comb_done:
748 ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio);
749 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
750
751 antcomb->scan_start_time = jiffies;
752 antcomb->total_pkt_count = 0;
753 antcomb->main_total_rssi = 0;
754 antcomb->alt_total_rssi = 0;
755 antcomb->main_recv_cnt = 0;
756 antcomb->alt_recv_cnt = 0;
757}
758
759void ath_ant_comb_update(struct ath_softc *sc)
760{
761 struct ath_hw *ah = sc->sc_ah;
762 struct ath_hw_antcomb_conf div_ant_conf;
763 u8 lna_conf;
764
765 ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
766
767 if (sc->ant_rx == 1)
768 lna_conf = ATH_ANT_DIV_COMB_LNA1;
769 else
770 lna_conf = ATH_ANT_DIV_COMB_LNA2;
771
772 div_ant_conf.main_lna_conf = lna_conf;
773 div_ant_conf.alt_lna_conf = lna_conf;
774
775 ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf);
776}
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index c7492c6a251..874186bfda4 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -995,141 +995,6 @@ static u32 ar5008_hw_compute_pll_control(struct ath_hw *ah,
995 return pll; 995 return pll;
996} 996}
997 997
998static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
999 enum ath9k_ani_cmd cmd,
1000 int param)
1001{
1002 struct ar5416AniState *aniState = &ah->curchan->ani;
1003 struct ath_common *common = ath9k_hw_common(ah);
1004
1005 switch (cmd & ah->ani_function) {
1006 case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
1007 u32 level = param;
1008
1009 if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
1010 ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
1011 level, ARRAY_SIZE(ah->totalSizeDesired));
1012 return false;
1013 }
1014
1015 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
1016 AR_PHY_DESIRED_SZ_TOT_DES,
1017 ah->totalSizeDesired[level]);
1018 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
1019 AR_PHY_AGC_CTL1_COARSE_LOW,
1020 ah->coarse_low[level]);
1021 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
1022 AR_PHY_AGC_CTL1_COARSE_HIGH,
1023 ah->coarse_high[level]);
1024 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
1025 AR_PHY_FIND_SIG_FIRPWR,
1026 ah->firpwr[level]);
1027
1028 if (level > aniState->noiseImmunityLevel)
1029 ah->stats.ast_ani_niup++;
1030 else if (level < aniState->noiseImmunityLevel)
1031 ah->stats.ast_ani_nidown++;
1032 aniState->noiseImmunityLevel = level;
1033 break;
1034 }
1035 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
1036 u32 on = param ? 1 : 0;
1037
1038 if (on)
1039 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
1040 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
1041 else
1042 REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
1043 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
1044
1045 if (!on != aniState->ofdmWeakSigDetectOff) {
1046 if (on)
1047 ah->stats.ast_ani_ofdmon++;
1048 else
1049 ah->stats.ast_ani_ofdmoff++;
1050 aniState->ofdmWeakSigDetectOff = !on;
1051 }
1052 break;
1053 }
1054 case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
1055 static const int weakSigThrCck[] = { 8, 6 };
1056 u32 high = param ? 1 : 0;
1057
1058 REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
1059 AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
1060 weakSigThrCck[high]);
1061 if (high != aniState->cckWeakSigThreshold) {
1062 if (high)
1063 ah->stats.ast_ani_cckhigh++;
1064 else
1065 ah->stats.ast_ani_ccklow++;
1066 aniState->cckWeakSigThreshold = high;
1067 }
1068 break;
1069 }
1070 case ATH9K_ANI_FIRSTEP_LEVEL:{
1071 static const int firstep[] = { 0, 4, 8 };
1072 u32 level = param;
1073
1074 if (level >= ARRAY_SIZE(firstep)) {
1075 ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
1076 level, ARRAY_SIZE(firstep));
1077 return false;
1078 }
1079 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
1080 AR_PHY_FIND_SIG_FIRSTEP,
1081 firstep[level]);
1082 if (level > aniState->firstepLevel)
1083 ah->stats.ast_ani_stepup++;
1084 else if (level < aniState->firstepLevel)
1085 ah->stats.ast_ani_stepdown++;
1086 aniState->firstepLevel = level;
1087 break;
1088 }
1089 case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
1090 static const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
1091 u32 level = param;
1092
1093 if (level >= ARRAY_SIZE(cycpwrThr1)) {
1094 ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
1095 level, ARRAY_SIZE(cycpwrThr1));
1096 return false;
1097 }
1098 REG_RMW_FIELD(ah, AR_PHY_TIMING5,
1099 AR_PHY_TIMING5_CYCPWR_THR1,
1100 cycpwrThr1[level]);
1101 if (level > aniState->spurImmunityLevel)
1102 ah->stats.ast_ani_spurup++;
1103 else if (level < aniState->spurImmunityLevel)
1104 ah->stats.ast_ani_spurdown++;
1105 aniState->spurImmunityLevel = level;
1106 break;
1107 }
1108 case ATH9K_ANI_PRESENT:
1109 break;
1110 default:
1111 ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
1112 return false;
1113 }
1114
1115 ath_dbg(common, ANI, "ANI parameters:\n");
1116 ath_dbg(common, ANI,
1117 "noiseImmunityLevel=%d, spurImmunityLevel=%d, ofdmWeakSigDetectOff=%d\n",
1118 aniState->noiseImmunityLevel,
1119 aniState->spurImmunityLevel,
1120 !aniState->ofdmWeakSigDetectOff);
1121 ath_dbg(common, ANI,
1122 "cckWeakSigThreshold=%d, firstepLevel=%d, listenTime=%d\n",
1123 aniState->cckWeakSigThreshold,
1124 aniState->firstepLevel,
1125 aniState->listenTime);
1126 ath_dbg(common, ANI, "ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
1127 aniState->ofdmPhyErrCount,
1128 aniState->cckPhyErrCount);
1129
1130 return true;
1131}
1132
1133static bool ar5008_hw_ani_control_new(struct ath_hw *ah, 998static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1134 enum ath9k_ani_cmd cmd, 999 enum ath9k_ani_cmd cmd,
1135 int param) 1000 int param)
@@ -1206,18 +1071,18 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1206 REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW, 1071 REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
1207 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); 1072 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
1208 1073
1209 if (!on != aniState->ofdmWeakSigDetectOff) { 1074 if (on != aniState->ofdmWeakSigDetect) {
1210 ath_dbg(common, ANI, 1075 ath_dbg(common, ANI,
1211 "** ch %d: ofdm weak signal: %s=>%s\n", 1076 "** ch %d: ofdm weak signal: %s=>%s\n",
1212 chan->channel, 1077 chan->channel,
1213 !aniState->ofdmWeakSigDetectOff ? 1078 aniState->ofdmWeakSigDetect ?
1214 "on" : "off", 1079 "on" : "off",
1215 on ? "on" : "off"); 1080 on ? "on" : "off");
1216 if (on) 1081 if (on)
1217 ah->stats.ast_ani_ofdmon++; 1082 ah->stats.ast_ani_ofdmon++;
1218 else 1083 else
1219 ah->stats.ast_ani_ofdmoff++; 1084 ah->stats.ast_ani_ofdmoff++;
1220 aniState->ofdmWeakSigDetectOff = !on; 1085 aniState->ofdmWeakSigDetect = on;
1221 } 1086 }
1222 break; 1087 break;
1223 } 1088 }
@@ -1236,7 +1101,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1236 * from INI file & cap value 1101 * from INI file & cap value
1237 */ 1102 */
1238 value = firstep_table[level] - 1103 value = firstep_table[level] -
1239 firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] + 1104 firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
1240 aniState->iniDef.firstep; 1105 aniState->iniDef.firstep;
1241 if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN) 1106 if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
1242 value = ATH9K_SIG_FIRSTEP_SETTING_MIN; 1107 value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
@@ -1251,7 +1116,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1251 * from INI file & cap value 1116 * from INI file & cap value
1252 */ 1117 */
1253 value2 = firstep_table[level] - 1118 value2 = firstep_table[level] -
1254 firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] + 1119 firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
1255 aniState->iniDef.firstepLow; 1120 aniState->iniDef.firstepLow;
1256 if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN) 1121 if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
1257 value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN; 1122 value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
@@ -1267,7 +1132,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1267 chan->channel, 1132 chan->channel,
1268 aniState->firstepLevel, 1133 aniState->firstepLevel,
1269 level, 1134 level,
1270 ATH9K_ANI_FIRSTEP_LVL_NEW, 1135 ATH9K_ANI_FIRSTEP_LVL,
1271 value, 1136 value,
1272 aniState->iniDef.firstep); 1137 aniState->iniDef.firstep);
1273 ath_dbg(common, ANI, 1138 ath_dbg(common, ANI,
@@ -1275,7 +1140,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1275 chan->channel, 1140 chan->channel,
1276 aniState->firstepLevel, 1141 aniState->firstepLevel,
1277 level, 1142 level,
1278 ATH9K_ANI_FIRSTEP_LVL_NEW, 1143 ATH9K_ANI_FIRSTEP_LVL,
1279 value2, 1144 value2,
1280 aniState->iniDef.firstepLow); 1145 aniState->iniDef.firstepLow);
1281 if (level > aniState->firstepLevel) 1146 if (level > aniState->firstepLevel)
@@ -1300,7 +1165,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1300 * from INI file & cap value 1165 * from INI file & cap value
1301 */ 1166 */
1302 value = cycpwrThr1_table[level] - 1167 value = cycpwrThr1_table[level] -
1303 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] + 1168 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
1304 aniState->iniDef.cycpwrThr1; 1169 aniState->iniDef.cycpwrThr1;
1305 if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN) 1170 if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
1306 value = ATH9K_SIG_SPUR_IMM_SETTING_MIN; 1171 value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
@@ -1316,7 +1181,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1316 * from INI file & cap value 1181 * from INI file & cap value
1317 */ 1182 */
1318 value2 = cycpwrThr1_table[level] - 1183 value2 = cycpwrThr1_table[level] -
1319 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] + 1184 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
1320 aniState->iniDef.cycpwrThr1Ext; 1185 aniState->iniDef.cycpwrThr1Ext;
1321 if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN) 1186 if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
1322 value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN; 1187 value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
@@ -1331,7 +1196,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1331 chan->channel, 1196 chan->channel,
1332 aniState->spurImmunityLevel, 1197 aniState->spurImmunityLevel,
1333 level, 1198 level,
1334 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW, 1199 ATH9K_ANI_SPUR_IMMUNE_LVL,
1335 value, 1200 value,
1336 aniState->iniDef.cycpwrThr1); 1201 aniState->iniDef.cycpwrThr1);
1337 ath_dbg(common, ANI, 1202 ath_dbg(common, ANI,
@@ -1339,7 +1204,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1339 chan->channel, 1204 chan->channel,
1340 aniState->spurImmunityLevel, 1205 aniState->spurImmunityLevel,
1341 level, 1206 level,
1342 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW, 1207 ATH9K_ANI_SPUR_IMMUNE_LVL,
1343 value2, 1208 value2,
1344 aniState->iniDef.cycpwrThr1Ext); 1209 aniState->iniDef.cycpwrThr1Ext);
1345 if (level > aniState->spurImmunityLevel) 1210 if (level > aniState->spurImmunityLevel)
@@ -1367,9 +1232,9 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1367 ath_dbg(common, ANI, 1232 ath_dbg(common, ANI,
1368 "ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n", 1233 "ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
1369 aniState->spurImmunityLevel, 1234 aniState->spurImmunityLevel,
1370 !aniState->ofdmWeakSigDetectOff ? "on" : "off", 1235 aniState->ofdmWeakSigDetect ? "on" : "off",
1371 aniState->firstepLevel, 1236 aniState->firstepLevel,
1372 !aniState->mrcCCKOff ? "on" : "off", 1237 aniState->mrcCCK ? "on" : "off",
1373 aniState->listenTime, 1238 aniState->listenTime,
1374 aniState->ofdmPhyErrCount, 1239 aniState->ofdmPhyErrCount,
1375 aniState->cckPhyErrCount); 1240 aniState->cckPhyErrCount);
@@ -1454,10 +1319,10 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
1454 AR_PHY_EXT_TIMING5_CYCPWR_THR1); 1319 AR_PHY_EXT_TIMING5_CYCPWR_THR1);
1455 1320
1456 /* these levels just got reset to defaults by the INI */ 1321 /* these levels just got reset to defaults by the INI */
1457 aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL_NEW; 1322 aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
1458 aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW; 1323 aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
1459 aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG; 1324 aniState->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG;
1460 aniState->mrcCCKOff = true; /* not available on pre AR9003 */ 1325 aniState->mrcCCK = false; /* not available on pre AR9003 */
1461} 1326}
1462 1327
1463static void ar5008_hw_set_nf_limits(struct ath_hw *ah) 1328static void ar5008_hw_set_nf_limits(struct ath_hw *ah)
@@ -1545,11 +1410,8 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
1545 priv_ops->do_getnf = ar5008_hw_do_getnf; 1410 priv_ops->do_getnf = ar5008_hw_do_getnf;
1546 priv_ops->set_radar_params = ar5008_hw_set_radar_params; 1411 priv_ops->set_radar_params = ar5008_hw_set_radar_params;
1547 1412
1548 if (modparam_force_new_ani) { 1413 priv_ops->ani_control = ar5008_hw_ani_control_new;
1549 priv_ops->ani_control = ar5008_hw_ani_control_new; 1414 priv_ops->ani_cache_ini_regs = ar5008_hw_ani_cache_ini_regs;
1550 priv_ops->ani_cache_ini_regs = ar5008_hw_ani_cache_ini_regs;
1551 } else
1552 priv_ops->ani_control = ar5008_hw_ani_control_old;
1553 1415
1554 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) 1416 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
1555 priv_ops->compute_pll_control = ar9160_hw_compute_pll_control; 1417 priv_ops->compute_pll_control = ar9160_hw_compute_pll_control;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index d9a69fc470c..edf21ea4fe9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -21,10 +21,6 @@
21#include "ar9002_initvals.h" 21#include "ar9002_initvals.h"
22#include "ar9002_phy.h" 22#include "ar9002_phy.h"
23 23
24int modparam_force_new_ani;
25module_param_named(force_new_ani, modparam_force_new_ani, int, 0444);
26MODULE_PARM_DESC(force_new_ani, "Force new ANI for AR5008, AR9001, AR9002");
27
28/* General hardware code for the A5008/AR9001/AR9002 hadware families */ 24/* General hardware code for the A5008/AR9001/AR9002 hadware families */
29 25
30static void ar9002_hw_init_mode_regs(struct ath_hw *ah) 26static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 952cb2b4656..89bf94d4d8a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2010-2011 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
3 * 4 *
4 * Permission to use, copy, modify, and/or distribute this software for any 5 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 9fdd70fcaf5..d7deb8c9f29 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -653,7 +653,6 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
653} 653}
654 654
655static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah, 655static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
656 u8 num_chains,
657 struct coeff *coeff, 656 struct coeff *coeff,
658 bool is_reusable) 657 bool is_reusable)
659{ 658{
@@ -677,7 +676,9 @@ static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
677 } 676 }
678 677
679 /* Load the average of 2 passes */ 678 /* Load the average of 2 passes */
680 for (i = 0; i < num_chains; i++) { 679 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
680 if (!(ah->txchainmask & (1 << i)))
681 continue;
681 nmeasurement = REG_READ_FIELD(ah, 682 nmeasurement = REG_READ_FIELD(ah,
682 AR_PHY_TX_IQCAL_STATUS_B0, 683 AR_PHY_TX_IQCAL_STATUS_B0,
683 AR_PHY_CALIBRATED_GAINS_0); 684 AR_PHY_CALIBRATED_GAINS_0);
@@ -767,16 +768,13 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
767 }; 768 };
768 struct coeff coeff; 769 struct coeff coeff;
769 s32 iq_res[6]; 770 s32 iq_res[6];
770 u8 num_chains = 0;
771 int i, im, j; 771 int i, im, j;
772 int nmeasurement; 772 int nmeasurement;
773 773
774 for (i = 0; i < AR9300_MAX_CHAINS; i++) { 774 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
775 if (ah->txchainmask & (1 << i)) 775 if (!(ah->txchainmask & (1 << i)))
776 num_chains++; 776 continue;
777 }
778 777
779 for (i = 0; i < num_chains; i++) {
780 nmeasurement = REG_READ_FIELD(ah, 778 nmeasurement = REG_READ_FIELD(ah,
781 AR_PHY_TX_IQCAL_STATUS_B0, 779 AR_PHY_TX_IQCAL_STATUS_B0,
782 AR_PHY_CALIBRATED_GAINS_0); 780 AR_PHY_CALIBRATED_GAINS_0);
@@ -839,8 +837,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
839 coeff.phs_coeff[i][im] -= 128; 837 coeff.phs_coeff[i][im] -= 128;
840 } 838 }
841 } 839 }
842 ar9003_hw_tx_iqcal_load_avg_2_passes(ah, num_chains, 840 ar9003_hw_tx_iqcal_load_avg_2_passes(ah, &coeff, is_reusable);
843 &coeff, is_reusable);
844 841
845 return; 842 return;
846 843
@@ -901,7 +898,6 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
901 bool is_reusable = true, status = true; 898 bool is_reusable = true, status = true;
902 bool run_rtt_cal = false, run_agc_cal; 899 bool run_rtt_cal = false, run_agc_cal;
903 bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT); 900 bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
904 bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
905 u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL | 901 u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
906 AR_PHY_AGC_CONTROL_FLTR_CAL | 902 AR_PHY_AGC_CONTROL_FLTR_CAL |
907 AR_PHY_AGC_CONTROL_PKDET_CAL; 903 AR_PHY_AGC_CONTROL_PKDET_CAL;
@@ -970,7 +966,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
970 } else if (caldata && !caldata->done_txiqcal_once) 966 } else if (caldata && !caldata->done_txiqcal_once)
971 run_agc_cal = true; 967 run_agc_cal = true;
972 968
973 if (mci && IS_CHAN_2GHZ(chan) && run_agc_cal) 969 if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
974 ar9003_mci_init_cal_req(ah, &is_reusable); 970 ar9003_mci_init_cal_req(ah, &is_reusable);
975 971
976 if (!(IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan))) { 972 if (!(IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan))) {
@@ -993,7 +989,7 @@ skip_tx_iqcal:
993 0, AH_WAIT_TIMEOUT); 989 0, AH_WAIT_TIMEOUT);
994 } 990 }
995 991
996 if (mci && IS_CHAN_2GHZ(chan) && run_agc_cal) 992 if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
997 ar9003_mci_init_cal_done(ah); 993 ar9003_mci_init_cal_done(ah);
998 994
999 if (rtt && !run_rtt_cal) { 995 if (rtt && !run_rtt_cal) {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index dfb0441f406..b1e59236d24 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3178,7 +3178,7 @@ static int ar9300_compress_decision(struct ath_hw *ah,
3178 mdata_size, length); 3178 mdata_size, length);
3179 return -1; 3179 return -1;
3180 } 3180 }
3181 memcpy(mptr, (u8 *) (word + COMP_HDR_LEN), length); 3181 memcpy(mptr, word + COMP_HDR_LEN, length);
3182 ath_dbg(common, EEPROM, 3182 ath_dbg(common, EEPROM,
3183 "restored eeprom %d: uncompressed, length %d\n", 3183 "restored eeprom %d: uncompressed, length %d\n",
3184 it, length); 3184 it, length);
@@ -3199,7 +3199,7 @@ static int ar9300_compress_decision(struct ath_hw *ah,
3199 "restore eeprom %d: block, reference %d, length %d\n", 3199 "restore eeprom %d: block, reference %d, length %d\n",
3200 it, reference, length); 3200 it, reference, length);
3201 ar9300_uncompress_block(ah, mptr, mdata_size, 3201 ar9300_uncompress_block(ah, mptr, mdata_size,
3202 (u8 *) (word + COMP_HDR_LEN), length); 3202 (word + COMP_HDR_LEN), length);
3203 break; 3203 break;
3204 default: 3204 default:
3205 ath_dbg(common, EEPROM, "unknown compression code %d\n", code); 3205 ath_dbg(common, EEPROM, "unknown compression code %d\n", code);
@@ -3412,11 +3412,11 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
3412 if (!dump_base_hdr) { 3412 if (!dump_base_hdr) {
3413 len += snprintf(buf + len, size - len, 3413 len += snprintf(buf + len, size - len,
3414 "%20s :\n", "2GHz modal Header"); 3414 "%20s :\n", "2GHz modal Header");
3415 len += ar9003_dump_modal_eeprom(buf, len, size, 3415 len = ar9003_dump_modal_eeprom(buf, len, size,
3416 &eep->modalHeader2G); 3416 &eep->modalHeader2G);
3417 len += snprintf(buf + len, size - len, 3417 len += snprintf(buf + len, size - len,
3418 "%20s :\n", "5GHz modal Header"); 3418 "%20s :\n", "5GHz modal Header");
3419 len += ar9003_dump_modal_eeprom(buf, len, size, 3419 len = ar9003_dump_modal_eeprom(buf, len, size,
3420 &eep->modalHeader5G); 3420 &eep->modalHeader5G);
3421 goto out; 3421 goto out;
3422 } 3422 }
@@ -3613,6 +3613,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3613 value = ar9003_switch_com_spdt_get(ah, is2ghz); 3613 value = ar9003_switch_com_spdt_get(ah, is2ghz);
3614 REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, 3614 REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
3615 AR_SWITCH_TABLE_COM_SPDT_ALL, value); 3615 AR_SWITCH_TABLE_COM_SPDT_ALL, value);
3616 REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, AR_BTCOEX_CTRL_SPDT_ENABLE);
3616 } 3617 }
3617 3618
3618 value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz); 3619 value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index d9e0824af09..78816b8b217 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -181,11 +181,14 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
181 u32 mask2 = 0; 181 u32 mask2 = 0;
182 struct ath9k_hw_capabilities *pCap = &ah->caps; 182 struct ath9k_hw_capabilities *pCap = &ah->caps;
183 struct ath_common *common = ath9k_hw_common(ah); 183 struct ath_common *common = ath9k_hw_common(ah);
184 u32 sync_cause = 0, async_cause; 184 u32 sync_cause = 0, async_cause, async_mask = AR_INTR_MAC_IRQ;
185
186 if (ath9k_hw_mci_is_enabled(ah))
187 async_mask |= AR_INTR_ASYNC_MASK_MCI;
185 188
186 async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE); 189 async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
187 190
188 if (async_cause & (AR_INTR_MAC_IRQ | AR_INTR_ASYNC_MASK_MCI)) { 191 if (async_cause & async_mask) {
189 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) 192 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
190 == AR_RTC_STATUS_ON) 193 == AR_RTC_STATUS_ON)
191 isr = REG_READ(ah, AR_ISR); 194 isr = REG_READ(ah, AR_ISR);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index ffbb180f91e..61558375bfb 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -35,31 +35,30 @@ static int ar9003_mci_wait_for_interrupt(struct ath_hw *ah, u32 address,
35 struct ath_common *common = ath9k_hw_common(ah); 35 struct ath_common *common = ath9k_hw_common(ah);
36 36
37 while (time_out) { 37 while (time_out) {
38 if (REG_READ(ah, address) & bit_position) { 38 if (!(REG_READ(ah, address) & bit_position)) {
39 REG_WRITE(ah, address, bit_position); 39 udelay(10);
40 40 time_out -= 10;
41 if (address == AR_MCI_INTERRUPT_RX_MSG_RAW) {
42 if (bit_position &
43 AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
44 ar9003_mci_reset_req_wakeup(ah);
45
46 if (bit_position &
47 (AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING |
48 AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING))
49 REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
50 AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
51
52 REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
53 AR_MCI_INTERRUPT_RX_MSG);
54 }
55 break;
56 }
57 41
58 udelay(10); 42 if (time_out < 0)
59 time_out -= 10; 43 break;
44 else
45 continue;
46 }
47 REG_WRITE(ah, address, bit_position);
60 48
61 if (time_out < 0) 49 if (address != AR_MCI_INTERRUPT_RX_MSG_RAW)
62 break; 50 break;
51
52 if (bit_position & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
53 ar9003_mci_reset_req_wakeup(ah);
54
55 if (bit_position & (AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING |
56 AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING))
57 REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
58 AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
59
60 REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_RX_MSG);
61 break;
63 } 62 }
64 63
65 if (time_out <= 0) { 64 if (time_out <= 0) {
@@ -127,14 +126,13 @@ static void ar9003_mci_send_coex_version_query(struct ath_hw *ah,
127 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 126 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
128 u32 payload[4] = {0, 0, 0, 0}; 127 u32 payload[4] = {0, 0, 0, 0};
129 128
130 if (!mci->bt_version_known && 129 if (mci->bt_version_known ||
131 (mci->bt_state != MCI_BT_SLEEP)) { 130 (mci->bt_state == MCI_BT_SLEEP))
132 MCI_GPM_SET_TYPE_OPCODE(payload, 131 return;
133 MCI_GPM_COEX_AGENT, 132
134 MCI_GPM_COEX_VERSION_QUERY); 133 MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
135 ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, 134 MCI_GPM_COEX_VERSION_QUERY);
136 wait_done, true); 135 ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
137 }
138} 136}
139 137
140static void ar9003_mci_send_coex_version_response(struct ath_hw *ah, 138static void ar9003_mci_send_coex_version_response(struct ath_hw *ah,
@@ -158,15 +156,14 @@ static void ar9003_mci_send_coex_wlan_channels(struct ath_hw *ah,
158 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 156 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
159 u32 *payload = &mci->wlan_channels[0]; 157 u32 *payload = &mci->wlan_channels[0];
160 158
161 if ((mci->wlan_channels_update == true) && 159 if (!mci->wlan_channels_update ||
162 (mci->bt_state != MCI_BT_SLEEP)) { 160 (mci->bt_state == MCI_BT_SLEEP))
163 MCI_GPM_SET_TYPE_OPCODE(payload, 161 return;
164 MCI_GPM_COEX_AGENT, 162
165 MCI_GPM_COEX_WLAN_CHANNELS); 163 MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
166 ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, 164 MCI_GPM_COEX_WLAN_CHANNELS);
167 wait_done, true); 165 ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
168 MCI_GPM_SET_TYPE_OPCODE(payload, 0xff, 0xff); 166 MCI_GPM_SET_TYPE_OPCODE(payload, 0xff, 0xff);
169 }
170} 167}
171 168
172static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah, 169static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah,
@@ -174,29 +171,30 @@ static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah,
174{ 171{
175 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 172 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
176 u32 payload[4] = {0, 0, 0, 0}; 173 u32 payload[4] = {0, 0, 0, 0};
177 bool query_btinfo = !!(query_type & (MCI_GPM_COEX_QUERY_BT_ALL_INFO | 174 bool query_btinfo;
178 MCI_GPM_COEX_QUERY_BT_TOPOLOGY));
179 175
180 if (mci->bt_state != MCI_BT_SLEEP) { 176 if (mci->bt_state == MCI_BT_SLEEP)
181 177 return;
182 MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
183 MCI_GPM_COEX_STATUS_QUERY);
184 178
185 *(((u8 *)payload) + MCI_GPM_COEX_B_BT_BITMAP) = query_type; 179 query_btinfo = !!(query_type & (MCI_GPM_COEX_QUERY_BT_ALL_INFO |
180 MCI_GPM_COEX_QUERY_BT_TOPOLOGY));
181 MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
182 MCI_GPM_COEX_STATUS_QUERY);
186 183
187 /* 184 *(((u8 *)payload) + MCI_GPM_COEX_B_BT_BITMAP) = query_type;
188 * If bt_status_query message is not sent successfully,
189 * then need_flush_btinfo should be set again.
190 */
191 if (!ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
192 wait_done, true)) {
193 if (query_btinfo)
194 mci->need_flush_btinfo = true;
195 }
196 185
186 /*
187 * If bt_status_query message is not sent successfully,
188 * then need_flush_btinfo should be set again.
189 */
190 if (!ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
191 wait_done, true)) {
197 if (query_btinfo) 192 if (query_btinfo)
198 mci->query_bt = false; 193 mci->need_flush_btinfo = true;
199 } 194 }
195
196 if (query_btinfo)
197 mci->query_bt = false;
200} 198}
201 199
202static void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt, 200static void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
@@ -241,73 +239,73 @@ static void ar9003_mci_prep_interface(struct ath_hw *ah)
241 ar9003_mci_remote_reset(ah, true); 239 ar9003_mci_remote_reset(ah, true);
242 ar9003_mci_send_req_wake(ah, true); 240 ar9003_mci_send_req_wake(ah, true);
243 241
244 if (ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, 242 if (!ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
245 AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500)) { 243 AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500))
244 goto clear_redunt;
246 245
247 mci->bt_state = MCI_BT_AWAKE; 246 mci->bt_state = MCI_BT_AWAKE;
248 247
249 /* 248 /*
250 * we don't need to send more remote_reset at this moment. 249 * we don't need to send more remote_reset at this moment.
251 * If BT receive first remote_reset, then BT HW will 250 * If BT receive first remote_reset, then BT HW will
252 * be cleaned up and will be able to receive req_wake 251 * be cleaned up and will be able to receive req_wake
253 * and BT HW will respond sys_waking. 252 * and BT HW will respond sys_waking.
254 * In this case, WLAN will receive BT's HW sys_waking. 253 * In this case, WLAN will receive BT's HW sys_waking.
255 * Otherwise, if BT SW missed initial remote_reset, 254 * Otherwise, if BT SW missed initial remote_reset,
256 * that remote_reset will still clean up BT MCI RX, 255 * that remote_reset will still clean up BT MCI RX,
257 * and the req_wake will wake BT up, 256 * and the req_wake will wake BT up,
258 * and BT SW will respond this req_wake with a remote_reset and 257 * and BT SW will respond this req_wake with a remote_reset and
259 * sys_waking. In this case, WLAN will receive BT's SW 258 * sys_waking. In this case, WLAN will receive BT's SW
260 * sys_waking. In either case, BT's RX is cleaned up. So we 259 * sys_waking. In either case, BT's RX is cleaned up. So we
261 * don't need to reply BT's remote_reset now, if any. 260 * don't need to reply BT's remote_reset now, if any.
262 * Similarly, if in any case, WLAN can receive BT's sys_waking, 261 * Similarly, if in any case, WLAN can receive BT's sys_waking,
263 * that means WLAN's RX is also fine. 262 * that means WLAN's RX is also fine.
264 */ 263 */
265 ar9003_mci_send_sys_waking(ah, true); 264 ar9003_mci_send_sys_waking(ah, true);
266 udelay(10); 265 udelay(10);
267 266
268 /* 267 /*
269 * Set BT priority interrupt value to be 0xff to 268 * Set BT priority interrupt value to be 0xff to
270 * avoid having too many BT PRIORITY interrupts. 269 * avoid having too many BT PRIORITY interrupts.
271 */ 270 */
272 REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF); 271 REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF);
273 REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF); 272 REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF);
274 REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF); 273 REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF);
275 REG_WRITE(ah, AR_MCI_BT_PRI3, 0xFFFFFFFF); 274 REG_WRITE(ah, AR_MCI_BT_PRI3, 0xFFFFFFFF);
276 REG_WRITE(ah, AR_MCI_BT_PRI, 0X000000FF); 275 REG_WRITE(ah, AR_MCI_BT_PRI, 0X000000FF);
277 276
278 /* 277 /*
279 * A contention reset will be received after send out 278 * A contention reset will be received after send out
280 * sys_waking. Also BT priority interrupt bits will be set. 279 * sys_waking. Also BT priority interrupt bits will be set.
281 * Clear those bits before the next step. 280 * Clear those bits before the next step.
282 */ 281 */
283 282
284 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, 283 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
285 AR_MCI_INTERRUPT_RX_MSG_CONT_RST); 284 AR_MCI_INTERRUPT_RX_MSG_CONT_RST);
286 REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, 285 REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_BT_PRI);
287 AR_MCI_INTERRUPT_BT_PRI);
288 286
289 if (mci->is_2g) { 287 if (mci->is_2g) {
290 ar9003_mci_send_lna_transfer(ah, true); 288 ar9003_mci_send_lna_transfer(ah, true);
291 udelay(5); 289 udelay(5);
292 } 290 }
293 291
294 if ((mci->is_2g && !mci->update_2g5g)) { 292 if ((mci->is_2g && !mci->update_2g5g)) {
295 if (ar9003_mci_wait_for_interrupt(ah, 293 if (ar9003_mci_wait_for_interrupt(ah,
296 AR_MCI_INTERRUPT_RX_MSG_RAW, 294 AR_MCI_INTERRUPT_RX_MSG_RAW,
297 AR_MCI_INTERRUPT_RX_MSG_LNA_INFO, 295 AR_MCI_INTERRUPT_RX_MSG_LNA_INFO,
298 mci_timeout)) 296 mci_timeout))
299 ath_dbg(common, MCI, 297 ath_dbg(common, MCI,
300 "MCI WLAN has control over the LNA & BT obeys it\n"); 298 "MCI WLAN has control over the LNA & BT obeys it\n");
301 else 299 else
302 ath_dbg(common, MCI, 300 ath_dbg(common, MCI,
303 "MCI BT didn't respond to LNA_TRANS\n"); 301 "MCI BT didn't respond to LNA_TRANS\n");
304 }
305 } 302 }
306 303
304clear_redunt:
307 /* Clear the extra redundant SYS_WAKING from BT */ 305 /* Clear the extra redundant SYS_WAKING from BT */
308 if ((mci->bt_state == MCI_BT_AWAKE) && 306 if ((mci->bt_state == MCI_BT_AWAKE) &&
309 (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, 307 (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
310 AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) && 308 AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) &&
311 (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, 309 (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
312 AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) == 0)) { 310 AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) == 0)) {
313 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, 311 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
@@ -323,14 +321,13 @@ void ar9003_mci_set_full_sleep(struct ath_hw *ah)
323{ 321{
324 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 322 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
325 323
326 if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) && 324 if (ar9003_mci_state(ah, MCI_STATE_ENABLE) &&
327 (mci->bt_state != MCI_BT_SLEEP) && 325 (mci->bt_state != MCI_BT_SLEEP) &&
328 !mci->halted_bt_gpm) { 326 !mci->halted_bt_gpm) {
329 ar9003_mci_send_coex_halt_bt_gpm(ah, true, true); 327 ar9003_mci_send_coex_halt_bt_gpm(ah, true, true);
330 } 328 }
331 329
332 mci->ready = false; 330 mci->ready = false;
333 REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
334} 331}
335 332
336static void ar9003_mci_disable_interrupt(struct ath_hw *ah) 333static void ar9003_mci_disable_interrupt(struct ath_hw *ah)
@@ -487,7 +484,7 @@ static void ar9003_mci_sync_bt_state(struct ath_hw *ah)
487 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 484 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
488 u32 cur_bt_state; 485 u32 cur_bt_state;
489 486
490 cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL); 487 cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP);
491 488
492 if (mci->bt_state != cur_bt_state) 489 if (mci->bt_state != cur_bt_state)
493 mci->bt_state = cur_bt_state; 490 mci->bt_state = cur_bt_state;
@@ -596,8 +593,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
596 if (!time_out) 593 if (!time_out)
597 break; 594 break;
598 595
599 offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET, 596 offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data);
600 &more_data);
601 597
602 if (offset == MCI_GPM_INVALID) 598 if (offset == MCI_GPM_INVALID)
603 continue; 599 continue;
@@ -615,9 +611,9 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
615 } 611 }
616 break; 612 break;
617 } 613 }
618 } else if ((recv_type == gpm_type) && (recv_opcode == gpm_opcode)) { 614 } else if ((recv_type == gpm_type) &&
615 (recv_opcode == gpm_opcode))
619 break; 616 break;
620 }
621 617
622 /* 618 /*
623 * check if it's cal_grant 619 * check if it's cal_grant
@@ -661,8 +657,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
661 time_out = 0; 657 time_out = 0;
662 658
663 while (more_data == MCI_GPM_MORE) { 659 while (more_data == MCI_GPM_MORE) {
664 offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET, 660 offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data);
665 &more_data);
666 if (offset == MCI_GPM_INVALID) 661 if (offset == MCI_GPM_INVALID)
667 break; 662 break;
668 663
@@ -731,38 +726,38 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
731 if (!IS_CHAN_2GHZ(chan) || (mci_hw->bt_state != MCI_BT_SLEEP)) 726 if (!IS_CHAN_2GHZ(chan) || (mci_hw->bt_state != MCI_BT_SLEEP))
732 goto exit; 727 goto exit;
733 728
734 if (ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) || 729 if (!ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) &&
735 ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)) { 730 !ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE))
731 goto exit;
736 732
737 /* 733 /*
738 * BT is sleeping. Check if BT wakes up during 734 * BT is sleeping. Check if BT wakes up during
739 * WLAN calibration. If BT wakes up during 735 * WLAN calibration. If BT wakes up during
740 * WLAN calibration, need to go through all 736 * WLAN calibration, need to go through all
741 * message exchanges again and recal. 737 * message exchanges again and recal.
742 */ 738 */
743 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, 739 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
744 AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET | 740 (AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
745 AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE); 741 AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE));
746 742
747 ar9003_mci_remote_reset(ah, true); 743 ar9003_mci_remote_reset(ah, true);
748 ar9003_mci_send_sys_waking(ah, true); 744 ar9003_mci_send_sys_waking(ah, true);
749 udelay(1); 745 udelay(1);
750 746
751 if (IS_CHAN_2GHZ(chan)) 747 if (IS_CHAN_2GHZ(chan))
752 ar9003_mci_send_lna_transfer(ah, true); 748 ar9003_mci_send_lna_transfer(ah, true);
753 749
754 mci_hw->bt_state = MCI_BT_AWAKE; 750 mci_hw->bt_state = MCI_BT_AWAKE;
755 751
756 if (caldata) { 752 if (caldata) {
757 caldata->done_txiqcal_once = false; 753 caldata->done_txiqcal_once = false;
758 caldata->done_txclcal_once = false; 754 caldata->done_txclcal_once = false;
759 caldata->rtt_done = false; 755 caldata->rtt_done = false;
760 } 756 }
761 757
762 if (!ath9k_hw_init_cal(ah, chan)) 758 if (!ath9k_hw_init_cal(ah, chan))
763 return -EIO; 759 return -EIO;
764 760
765 }
766exit: 761exit:
767 ar9003_mci_enable_interrupt(ah); 762 ar9003_mci_enable_interrupt(ah);
768 return 0; 763 return 0;
@@ -772,10 +767,6 @@ static void ar9003_mci_mute_bt(struct ath_hw *ah)
772{ 767{
773 /* disable all MCI messages */ 768 /* disable all MCI messages */
774 REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000); 769 REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000);
775 REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff);
776 REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff);
777 REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff);
778 REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff);
779 REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); 770 REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
780 771
781 /* wait pending HW messages to flush out */ 772 /* wait pending HW messages to flush out */
@@ -798,29 +789,27 @@ static void ar9003_mci_osla_setup(struct ath_hw *ah, bool enable)
798 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 789 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
799 u32 thresh; 790 u32 thresh;
800 791
801 if (enable) { 792 if (!enable) {
802 REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
803 AR_MCI_SCHD_TABLE_2_HW_BASED, 1);
804 REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
805 AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
806
807 if (!(mci->config & ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
808 thresh = MS(mci->config, ATH_MCI_CONFIG_AGGR_THRESH);
809 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
810 AR_BTCOEX_CTRL_AGGR_THRESH, thresh);
811 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
812 AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 1);
813 } else {
814 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
815 AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 0);
816 }
817
818 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
819 AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1);
820 } else {
821 REG_CLR_BIT(ah, AR_BTCOEX_CTRL, 793 REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
822 AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); 794 AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
795 return;
823 } 796 }
797 REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2, AR_MCI_SCHD_TABLE_2_HW_BASED, 1);
798 REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
799 AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
800
801 if (!(mci->config & ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
802 thresh = MS(mci->config, ATH_MCI_CONFIG_AGGR_THRESH);
803 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
804 AR_BTCOEX_CTRL_AGGR_THRESH, thresh);
805 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
806 AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 1);
807 } else
808 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
809 AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 0);
810
811 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
812 AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1);
824} 813}
825 814
826void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, 815void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
@@ -898,13 +887,16 @@ void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
898 udelay(100); 887 udelay(100);
899 } 888 }
900 889
890 /* Check pending GPM msg before MCI Reset Rx */
891 ar9003_mci_check_gpm_offset(ah);
892
901 regval |= SM(1, AR_MCI_COMMAND2_RESET_RX); 893 regval |= SM(1, AR_MCI_COMMAND2_RESET_RX);
902 REG_WRITE(ah, AR_MCI_COMMAND2, regval); 894 REG_WRITE(ah, AR_MCI_COMMAND2, regval);
903 udelay(1); 895 udelay(1);
904 regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX); 896 regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX);
905 REG_WRITE(ah, AR_MCI_COMMAND2, regval); 897 REG_WRITE(ah, AR_MCI_COMMAND2, regval);
906 898
907 ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL); 899 ar9003_mci_get_next_gpm_offset(ah, true, NULL);
908 900
909 REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 901 REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE,
910 (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) | 902 (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) |
@@ -943,26 +935,27 @@ static void ar9003_mci_send_2g5g_status(struct ath_hw *ah, bool wait_done)
943 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 935 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
944 u32 new_flags, to_set, to_clear; 936 u32 new_flags, to_set, to_clear;
945 937
946 if (mci->update_2g5g && (mci->bt_state != MCI_BT_SLEEP)) { 938 if (!mci->update_2g5g || (mci->bt_state == MCI_BT_SLEEP))
947 if (mci->is_2g) { 939 return;
948 new_flags = MCI_2G_FLAGS; 940
949 to_clear = MCI_2G_FLAGS_CLEAR_MASK; 941 if (mci->is_2g) {
950 to_set = MCI_2G_FLAGS_SET_MASK; 942 new_flags = MCI_2G_FLAGS;
951 } else { 943 to_clear = MCI_2G_FLAGS_CLEAR_MASK;
952 new_flags = MCI_5G_FLAGS; 944 to_set = MCI_2G_FLAGS_SET_MASK;
953 to_clear = MCI_5G_FLAGS_CLEAR_MASK; 945 } else {
954 to_set = MCI_5G_FLAGS_SET_MASK; 946 new_flags = MCI_5G_FLAGS;
955 } 947 to_clear = MCI_5G_FLAGS_CLEAR_MASK;
948 to_set = MCI_5G_FLAGS_SET_MASK;
949 }
956 950
957 if (to_clear) 951 if (to_clear)
958 ar9003_mci_send_coex_bt_flags(ah, wait_done, 952 ar9003_mci_send_coex_bt_flags(ah, wait_done,
959 MCI_GPM_COEX_BT_FLAGS_CLEAR, 953 MCI_GPM_COEX_BT_FLAGS_CLEAR,
960 to_clear); 954 to_clear);
961 if (to_set) 955 if (to_set)
962 ar9003_mci_send_coex_bt_flags(ah, wait_done, 956 ar9003_mci_send_coex_bt_flags(ah, wait_done,
963 MCI_GPM_COEX_BT_FLAGS_SET, 957 MCI_GPM_COEX_BT_FLAGS_SET,
964 to_set); 958 to_set);
965 }
966} 959}
967 960
968static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header, 961static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
@@ -1014,38 +1007,42 @@ static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
1014 } 1007 }
1015} 1008}
1016 1009
1017void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done) 1010void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force)
1018{ 1011{
1019 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 1012 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1020 1013
1021 if (mci->update_2g5g) { 1014 if (!mci->update_2g5g && !force)
1022 if (mci->is_2g) { 1015 return;
1016
1017 if (mci->is_2g) {
1018 if (!force) {
1023 ar9003_mci_send_2g5g_status(ah, true); 1019 ar9003_mci_send_2g5g_status(ah, true);
1020
1024 ar9003_mci_send_lna_transfer(ah, true); 1021 ar9003_mci_send_lna_transfer(ah, true);
1025 udelay(5); 1022 udelay(5);
1023 }
1026 1024
1027 REG_CLR_BIT(ah, AR_MCI_TX_CTRL, 1025 REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
1028 AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); 1026 AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
1029 REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL, 1027 REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL,
1030 AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL); 1028 AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
1031 1029
1032 if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) { 1030 if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
1033 REG_SET_BIT(ah, AR_BTCOEX_CTRL, 1031 ar9003_mci_osla_setup(ah, true);
1034 AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); 1032 } else {
1035 } 1033 if (!force) {
1036 } else {
1037 ar9003_mci_send_lna_take(ah, true); 1034 ar9003_mci_send_lna_take(ah, true);
1038 udelay(5); 1035 udelay(5);
1036 }
1039 1037
1040 REG_SET_BIT(ah, AR_MCI_TX_CTRL, 1038 REG_SET_BIT(ah, AR_MCI_TX_CTRL,
1041 AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); 1039 AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
1042 REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, 1040 REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
1043 AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL); 1041 AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
1044 REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
1045 AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
1046 1042
1043 ar9003_mci_osla_setup(ah, false);
1044 if (!force)
1047 ar9003_mci_send_2g5g_status(ah, true); 1045 ar9003_mci_send_2g5g_status(ah, true);
1048 }
1049 } 1046 }
1050} 1047}
1051 1048
@@ -1132,7 +1129,7 @@ void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable)
1132 if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_GRANT, 0, 50000)) { 1129 if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_GRANT, 0, 50000)) {
1133 ath_dbg(common, MCI, "MCI BT_CAL_GRANT received\n"); 1130 ath_dbg(common, MCI, "MCI BT_CAL_GRANT received\n");
1134 } else { 1131 } else {
1135 is_reusable = false; 1132 *is_reusable = false;
1136 ath_dbg(common, MCI, "MCI BT_CAL_GRANT not received\n"); 1133 ath_dbg(common, MCI, "MCI BT_CAL_GRANT not received\n");
1137 } 1134 }
1138} 1135}
@@ -1173,11 +1170,10 @@ void ar9003_mci_cleanup(struct ath_hw *ah)
1173} 1170}
1174EXPORT_SYMBOL(ar9003_mci_cleanup); 1171EXPORT_SYMBOL(ar9003_mci_cleanup);
1175 1172
1176u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data) 1173u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
1177{ 1174{
1178 struct ath_common *common = ath9k_hw_common(ah);
1179 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 1175 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1180 u32 value = 0, more_gpm = 0, gpm_ptr; 1176 u32 value = 0;
1181 u8 query_type; 1177 u8 query_type;
1182 1178
1183 switch (state_type) { 1179 switch (state_type) {
@@ -1190,81 +1186,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
1190 } 1186 }
1191 value &= AR_BTCOEX_CTRL_MCI_MODE_EN; 1187 value &= AR_BTCOEX_CTRL_MCI_MODE_EN;
1192 break; 1188 break;
1193 case MCI_STATE_INIT_GPM_OFFSET:
1194 value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
1195 mci->gpm_idx = value;
1196 break;
1197 case MCI_STATE_NEXT_GPM_OFFSET:
1198 case MCI_STATE_LAST_GPM_OFFSET:
1199 /*
1200 * This could be useful to avoid new GPM message interrupt which
1201 * may lead to spurious interrupt after power sleep, or multiple
1202 * entry of ath_mci_intr().
1203 * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can
1204 * alleviate this effect, but clearing GPM RX interrupt bit is
1205 * safe, because whether this is called from hw or driver code
1206 * there must be an interrupt bit set/triggered initially
1207 */
1208 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
1209 AR_MCI_INTERRUPT_RX_MSG_GPM);
1210
1211 gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
1212 value = gpm_ptr;
1213
1214 if (value == 0)
1215 value = mci->gpm_len - 1;
1216 else if (value >= mci->gpm_len) {
1217 if (value != 0xFFFF)
1218 value = 0;
1219 } else {
1220 value--;
1221 }
1222
1223 if (value == 0xFFFF) {
1224 value = MCI_GPM_INVALID;
1225 more_gpm = MCI_GPM_NOMORE;
1226 } else if (state_type == MCI_STATE_NEXT_GPM_OFFSET) {
1227 if (gpm_ptr == mci->gpm_idx) {
1228 value = MCI_GPM_INVALID;
1229 more_gpm = MCI_GPM_NOMORE;
1230 } else {
1231 for (;;) {
1232 u32 temp_index;
1233
1234 /* skip reserved GPM if any */
1235
1236 if (value != mci->gpm_idx)
1237 more_gpm = MCI_GPM_MORE;
1238 else
1239 more_gpm = MCI_GPM_NOMORE;
1240
1241 temp_index = mci->gpm_idx;
1242 mci->gpm_idx++;
1243
1244 if (mci->gpm_idx >=
1245 mci->gpm_len)
1246 mci->gpm_idx = 0;
1247
1248 if (ar9003_mci_is_gpm_valid(ah,
1249 temp_index)) {
1250 value = temp_index;
1251 break;
1252 }
1253
1254 if (more_gpm == MCI_GPM_NOMORE) {
1255 value = MCI_GPM_INVALID;
1256 break;
1257 }
1258 }
1259 }
1260 if (p_data)
1261 *p_data = more_gpm;
1262 }
1263
1264 if (value != MCI_GPM_INVALID)
1265 value <<= 4;
1266
1267 break;
1268 case MCI_STATE_LAST_SCHD_MSG_OFFSET: 1189 case MCI_STATE_LAST_SCHD_MSG_OFFSET:
1269 value = MS(REG_READ(ah, AR_MCI_RX_STATUS), 1190 value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
1270 AR_MCI_RX_LAST_SCHD_MSG_INDEX); 1191 AR_MCI_RX_LAST_SCHD_MSG_INDEX);
@@ -1276,21 +1197,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
1276 AR_MCI_RX_REMOTE_SLEEP) ? 1197 AR_MCI_RX_REMOTE_SLEEP) ?
1277 MCI_BT_SLEEP : MCI_BT_AWAKE; 1198 MCI_BT_SLEEP : MCI_BT_AWAKE;
1278 break; 1199 break;
1279 case MCI_STATE_CONT_RSSI_POWER:
1280 value = MS(mci->cont_status, AR_MCI_CONT_RSSI_POWER);
1281 break;
1282 case MCI_STATE_CONT_PRIORITY:
1283 value = MS(mci->cont_status, AR_MCI_CONT_RRIORITY);
1284 break;
1285 case MCI_STATE_CONT_TXRX:
1286 value = MS(mci->cont_status, AR_MCI_CONT_TXRX);
1287 break;
1288 case MCI_STATE_BT:
1289 value = mci->bt_state;
1290 break;
1291 case MCI_STATE_SET_BT_SLEEP:
1292 mci->bt_state = MCI_BT_SLEEP;
1293 break;
1294 case MCI_STATE_SET_BT_AWAKE: 1200 case MCI_STATE_SET_BT_AWAKE:
1295 mci->bt_state = MCI_BT_AWAKE; 1201 mci->bt_state = MCI_BT_AWAKE;
1296 ar9003_mci_send_coex_version_query(ah, true); 1202 ar9003_mci_send_coex_version_query(ah, true);
@@ -1299,7 +1205,7 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
1299 if (mci->unhalt_bt_gpm) 1205 if (mci->unhalt_bt_gpm)
1300 ar9003_mci_send_coex_halt_bt_gpm(ah, false, true); 1206 ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
1301 1207
1302 ar9003_mci_2g5g_switch(ah, true); 1208 ar9003_mci_2g5g_switch(ah, false);
1303 break; 1209 break;
1304 case MCI_STATE_SET_BT_CAL_START: 1210 case MCI_STATE_SET_BT_CAL_START:
1305 mci->bt_state = MCI_BT_CAL_START; 1211 mci->bt_state = MCI_BT_CAL_START;
@@ -1323,34 +1229,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
1323 case MCI_STATE_SEND_WLAN_COEX_VERSION: 1229 case MCI_STATE_SEND_WLAN_COEX_VERSION:
1324 ar9003_mci_send_coex_version_response(ah, true); 1230 ar9003_mci_send_coex_version_response(ah, true);
1325 break; 1231 break;
1326 case MCI_STATE_SET_BT_COEX_VERSION:
1327 if (!p_data)
1328 ath_dbg(common, MCI,
1329 "MCI Set BT Coex version with NULL data!!\n");
1330 else {
1331 mci->bt_ver_major = (*p_data >> 8) & 0xff;
1332 mci->bt_ver_minor = (*p_data) & 0xff;
1333 mci->bt_version_known = true;
1334 ath_dbg(common, MCI, "MCI BT version set: %d.%d\n",
1335 mci->bt_ver_major, mci->bt_ver_minor);
1336 }
1337 break;
1338 case MCI_STATE_SEND_WLAN_CHANNELS:
1339 if (p_data) {
1340 if (((mci->wlan_channels[1] & 0xffff0000) ==
1341 (*(p_data + 1) & 0xffff0000)) &&
1342 (mci->wlan_channels[2] == *(p_data + 2)) &&
1343 (mci->wlan_channels[3] == *(p_data + 3)))
1344 break;
1345
1346 mci->wlan_channels[0] = *p_data++;
1347 mci->wlan_channels[1] = *p_data++;
1348 mci->wlan_channels[2] = *p_data++;
1349 mci->wlan_channels[3] = *p_data++;
1350 }
1351 mci->wlan_channels_update = true;
1352 ar9003_mci_send_coex_wlan_channels(ah, true);
1353 break;
1354 case MCI_STATE_SEND_VERSION_QUERY: 1232 case MCI_STATE_SEND_VERSION_QUERY:
1355 ar9003_mci_send_coex_version_query(ah, true); 1233 ar9003_mci_send_coex_version_query(ah, true);
1356 break; 1234 break;
@@ -1358,38 +1236,16 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
1358 query_type = MCI_GPM_COEX_QUERY_BT_TOPOLOGY; 1236 query_type = MCI_GPM_COEX_QUERY_BT_TOPOLOGY;
1359 ar9003_mci_send_coex_bt_status_query(ah, true, query_type); 1237 ar9003_mci_send_coex_bt_status_query(ah, true, query_type);
1360 break; 1238 break;
1361 case MCI_STATE_NEED_FLUSH_BT_INFO:
1362 /*
1363 * btcoex_hw.mci.unhalt_bt_gpm means whether it's
1364 * needed to send UNHALT message. It's set whenever
1365 * there's a request to send HALT message.
1366 * mci_halted_bt_gpm means whether HALT message is sent
1367 * out successfully.
1368 *
1369 * Checking (mci_unhalt_bt_gpm == false) instead of
1370 * checking (ah->mci_halted_bt_gpm == false) will make
1371 * sure currently is in UNHALT-ed mode and BT can
1372 * respond to status query.
1373 */
1374 value = (!mci->unhalt_bt_gpm &&
1375 mci->need_flush_btinfo) ? 1 : 0;
1376 if (p_data)
1377 mci->need_flush_btinfo =
1378 (*p_data != 0) ? true : false;
1379 break;
1380 case MCI_STATE_RECOVER_RX: 1239 case MCI_STATE_RECOVER_RX:
1381 ar9003_mci_prep_interface(ah); 1240 ar9003_mci_prep_interface(ah);
1382 mci->query_bt = true; 1241 mci->query_bt = true;
1383 mci->need_flush_btinfo = true; 1242 mci->need_flush_btinfo = true;
1384 ar9003_mci_send_coex_wlan_channels(ah, true); 1243 ar9003_mci_send_coex_wlan_channels(ah, true);
1385 ar9003_mci_2g5g_switch(ah, true); 1244 ar9003_mci_2g5g_switch(ah, false);
1386 break; 1245 break;
1387 case MCI_STATE_NEED_FTP_STOMP: 1246 case MCI_STATE_NEED_FTP_STOMP:
1388 value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP); 1247 value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP);
1389 break; 1248 break;
1390 case MCI_STATE_NEED_TUNING:
1391 value = !(mci->config & ATH_MCI_CONFIG_DISABLE_TUNING);
1392 break;
1393 default: 1249 default:
1394 break; 1250 break;
1395 } 1251 }
@@ -1397,3 +1253,173 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
1397 return value; 1253 return value;
1398} 1254}
1399EXPORT_SYMBOL(ar9003_mci_state); 1255EXPORT_SYMBOL(ar9003_mci_state);
1256
1257void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah)
1258{
1259 struct ath_common *common = ath9k_hw_common(ah);
1260 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1261
1262 ath_dbg(common, MCI, "Give LNA and SPDT control to BT\n");
1263
1264 ar9003_mci_send_lna_take(ah, true);
1265 udelay(50);
1266
1267 REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
1268 mci->is_2g = false;
1269 mci->update_2g5g = true;
1270 ar9003_mci_send_2g5g_status(ah, true);
1271
1272 /* Force another 2g5g update at next scanning */
1273 mci->update_2g5g = true;
1274}
1275
1276void ar9003_mci_set_power_awake(struct ath_hw *ah)
1277{
1278 u32 btcoex_ctrl2, diag_sw;
1279 int i;
1280 u8 lna_ctrl, bt_sleep;
1281
1282 for (i = 0; i < AH_WAIT_TIMEOUT; i++) {
1283 btcoex_ctrl2 = REG_READ(ah, AR_BTCOEX_CTRL2);
1284 if (btcoex_ctrl2 != 0xdeadbeef)
1285 break;
1286 udelay(AH_TIME_QUANTUM);
1287 }
1288 REG_WRITE(ah, AR_BTCOEX_CTRL2, (btcoex_ctrl2 | BIT(23)));
1289
1290 for (i = 0; i < AH_WAIT_TIMEOUT; i++) {
1291 diag_sw = REG_READ(ah, AR_DIAG_SW);
1292 if (diag_sw != 0xdeadbeef)
1293 break;
1294 udelay(AH_TIME_QUANTUM);
1295 }
1296 REG_WRITE(ah, AR_DIAG_SW, (diag_sw | BIT(27) | BIT(19) | BIT(18)));
1297 lna_ctrl = REG_READ(ah, AR_OBS_BUS_CTRL) & 0x3;
1298 bt_sleep = REG_READ(ah, AR_MCI_RX_STATUS) & AR_MCI_RX_REMOTE_SLEEP;
1299
1300 REG_WRITE(ah, AR_BTCOEX_CTRL2, btcoex_ctrl2);
1301 REG_WRITE(ah, AR_DIAG_SW, diag_sw);
1302
1303 if (bt_sleep && (lna_ctrl == 2)) {
1304 REG_SET_BIT(ah, AR_BTCOEX_RC, 0x1);
1305 REG_CLR_BIT(ah, AR_BTCOEX_RC, 0x1);
1306 udelay(50);
1307 }
1308}
1309
1310void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
1311{
1312 struct ath_common *common = ath9k_hw_common(ah);
1313 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1314 u32 offset;
1315
1316 /*
1317 * This should only be called before "MAC Warm Reset" or "MCI Reset Rx".
1318 */
1319 offset = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
1320 if (mci->gpm_idx == offset)
1321 return;
1322 ath_dbg(common, MCI, "GPM cached write pointer mismatch %d %d\n",
1323 mci->gpm_idx, offset);
1324 mci->query_bt = true;
1325 mci->need_flush_btinfo = true;
1326 mci->gpm_idx = 0;
1327}
1328
1329u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more)
1330{
1331 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1332 u32 offset, more_gpm = 0, gpm_ptr;
1333
1334 if (first) {
1335 gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
1336 mci->gpm_idx = gpm_ptr;
1337 return gpm_ptr;
1338 }
1339
1340 /*
1341 * This could be useful to avoid new GPM message interrupt which
1342 * may lead to spurious interrupt after power sleep, or multiple
1343 * entry of ath_mci_intr().
1344 * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can
1345 * alleviate this effect, but clearing GPM RX interrupt bit is
1346 * safe, because whether this is called from hw or driver code
1347 * there must be an interrupt bit set/triggered initially
1348 */
1349 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
1350 AR_MCI_INTERRUPT_RX_MSG_GPM);
1351
1352 gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
1353 offset = gpm_ptr;
1354
1355 if (!offset)
1356 offset = mci->gpm_len - 1;
1357 else if (offset >= mci->gpm_len) {
1358 if (offset != 0xFFFF)
1359 offset = 0;
1360 } else {
1361 offset--;
1362 }
1363
1364 if ((offset == 0xFFFF) || (gpm_ptr == mci->gpm_idx)) {
1365 offset = MCI_GPM_INVALID;
1366 more_gpm = MCI_GPM_NOMORE;
1367 goto out;
1368 }
1369 for (;;) {
1370 u32 temp_index;
1371
1372 /* skip reserved GPM if any */
1373
1374 if (offset != mci->gpm_idx)
1375 more_gpm = MCI_GPM_MORE;
1376 else
1377 more_gpm = MCI_GPM_NOMORE;
1378
1379 temp_index = mci->gpm_idx;
1380 mci->gpm_idx++;
1381
1382 if (mci->gpm_idx >= mci->gpm_len)
1383 mci->gpm_idx = 0;
1384
1385 if (ar9003_mci_is_gpm_valid(ah, temp_index)) {
1386 offset = temp_index;
1387 break;
1388 }
1389
1390 if (more_gpm == MCI_GPM_NOMORE) {
1391 offset = MCI_GPM_INVALID;
1392 break;
1393 }
1394 }
1395
1396 if (offset != MCI_GPM_INVALID)
1397 offset <<= 4;
1398out:
1399 if (more)
1400 *more = more_gpm;
1401
1402 return offset;
1403}
1404EXPORT_SYMBOL(ar9003_mci_get_next_gpm_offset);
1405
1406void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor)
1407{
1408 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1409
1410 mci->bt_ver_major = major;
1411 mci->bt_ver_minor = minor;
1412 mci->bt_version_known = true;
1413 ath_dbg(ath9k_hw_common(ah), MCI, "MCI BT version set: %d.%d\n",
1414 mci->bt_ver_major, mci->bt_ver_minor);
1415}
1416EXPORT_SYMBOL(ar9003_mci_set_bt_version);
1417
1418void ar9003_mci_send_wlan_channels(struct ath_hw *ah)
1419{
1420 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1421
1422 mci->wlan_channels_update = true;
1423 ar9003_mci_send_coex_wlan_channels(ah, true);
1424}
1425EXPORT_SYMBOL(ar9003_mci_send_wlan_channels);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
index 4842f6c06b8..d33b8e12885 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
@@ -189,30 +189,18 @@ enum mci_bt_state {
189/* Type of state query */ 189/* Type of state query */
190enum mci_state_type { 190enum mci_state_type {
191 MCI_STATE_ENABLE, 191 MCI_STATE_ENABLE,
192 MCI_STATE_INIT_GPM_OFFSET,
193 MCI_STATE_NEXT_GPM_OFFSET,
194 MCI_STATE_LAST_GPM_OFFSET,
195 MCI_STATE_BT,
196 MCI_STATE_SET_BT_SLEEP,
197 MCI_STATE_SET_BT_AWAKE, 192 MCI_STATE_SET_BT_AWAKE,
198 MCI_STATE_SET_BT_CAL_START, 193 MCI_STATE_SET_BT_CAL_START,
199 MCI_STATE_SET_BT_CAL, 194 MCI_STATE_SET_BT_CAL,
200 MCI_STATE_LAST_SCHD_MSG_OFFSET, 195 MCI_STATE_LAST_SCHD_MSG_OFFSET,
201 MCI_STATE_REMOTE_SLEEP, 196 MCI_STATE_REMOTE_SLEEP,
202 MCI_STATE_CONT_RSSI_POWER,
203 MCI_STATE_CONT_PRIORITY,
204 MCI_STATE_CONT_TXRX,
205 MCI_STATE_RESET_REQ_WAKE, 197 MCI_STATE_RESET_REQ_WAKE,
206 MCI_STATE_SEND_WLAN_COEX_VERSION, 198 MCI_STATE_SEND_WLAN_COEX_VERSION,
207 MCI_STATE_SET_BT_COEX_VERSION,
208 MCI_STATE_SEND_WLAN_CHANNELS,
209 MCI_STATE_SEND_VERSION_QUERY, 199 MCI_STATE_SEND_VERSION_QUERY,
210 MCI_STATE_SEND_STATUS_QUERY, 200 MCI_STATE_SEND_STATUS_QUERY,
211 MCI_STATE_NEED_FLUSH_BT_INFO,
212 MCI_STATE_SET_CONCUR_TX_PRI, 201 MCI_STATE_SET_CONCUR_TX_PRI,
213 MCI_STATE_RECOVER_RX, 202 MCI_STATE_RECOVER_RX,
214 MCI_STATE_NEED_FTP_STOMP, 203 MCI_STATE_NEED_FTP_STOMP,
215 MCI_STATE_NEED_TUNING,
216 MCI_STATE_DEBUG, 204 MCI_STATE_DEBUG,
217 MCI_STATE_MAX 205 MCI_STATE_MAX
218}; 206};
@@ -260,28 +248,26 @@ enum mci_gpm_coex_opcode {
260bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag, 248bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
261 u32 *payload, u8 len, bool wait_done, 249 u32 *payload, u8 len, bool wait_done,
262 bool check_bt); 250 bool check_bt);
263u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data); 251u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type);
264void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf, 252void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
265 u16 len, u32 sched_addr); 253 u16 len, u32 sched_addr);
266void ar9003_mci_cleanup(struct ath_hw *ah); 254void ar9003_mci_cleanup(struct ath_hw *ah);
267void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr, 255void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
268 u32 *rx_msg_intr); 256 u32 *rx_msg_intr);
269 257u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more);
258void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor);
259void ar9003_mci_send_wlan_channels(struct ath_hw *ah);
270/* 260/*
271 * These functions are used by ath9k_hw. 261 * These functions are used by ath9k_hw.
272 */ 262 */
273 263
274#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 264#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
275 265
276static inline bool ar9003_mci_is_ready(struct ath_hw *ah)
277{
278 return ah->btcoex_hw.mci.ready;
279}
280void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep); 266void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep);
281void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable); 267void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable);
282void ar9003_mci_init_cal_done(struct ath_hw *ah); 268void ar9003_mci_init_cal_done(struct ath_hw *ah);
283void ar9003_mci_set_full_sleep(struct ath_hw *ah); 269void ar9003_mci_set_full_sleep(struct ath_hw *ah);
284void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done); 270void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force);
285void ar9003_mci_check_bt(struct ath_hw *ah); 271void ar9003_mci_check_bt(struct ath_hw *ah);
286bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan); 272bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan);
287int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan, 273int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
@@ -289,13 +275,12 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
289void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, 275void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
290 bool is_full_sleep); 276 bool is_full_sleep);
291void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked); 277void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked);
278void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah);
279void ar9003_mci_set_power_awake(struct ath_hw *ah);
280void ar9003_mci_check_gpm_offset(struct ath_hw *ah);
292 281
293#else 282#else
294 283
295static inline bool ar9003_mci_is_ready(struct ath_hw *ah)
296{
297 return false;
298}
299static inline void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep) 284static inline void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep)
300{ 285{
301} 286}
@@ -330,6 +315,15 @@ static inline void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
330static inline void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked) 315static inline void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
331{ 316{
332} 317}
318static inline void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah)
319{
320}
321static inline void ar9003_mci_set_power_awake(struct ath_hw *ah)
322{
323}
324static inline void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
325{
326}
333#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */ 327#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
334 328
335#endif 329#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 11abb972be1..6b91ebb158f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -173,7 +173,7 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
173 int cur_bb_spur, negative = 0, cck_spur_freq; 173 int cur_bb_spur, negative = 0, cck_spur_freq;
174 int i; 174 int i;
175 int range, max_spur_cnts, synth_freq; 175 int range, max_spur_cnts, synth_freq;
176 u8 *spur_fbin_ptr = NULL; 176 u8 *spur_fbin_ptr = ar9003_get_spur_chan_ptr(ah, IS_CHAN_2GHZ(chan));
177 177
178 /* 178 /*
179 * Need to verify range +/- 10 MHz in control channel, otherwise spur 179 * Need to verify range +/- 10 MHz in control channel, otherwise spur
@@ -181,8 +181,6 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
181 */ 181 */
182 182
183 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah)) { 183 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah)) {
184 spur_fbin_ptr = ar9003_get_spur_chan_ptr(ah,
185 IS_CHAN_2GHZ(chan));
186 if (spur_fbin_ptr[0] == 0) /* No spur */ 184 if (spur_fbin_ptr[0] == 0) /* No spur */
187 return; 185 return;
188 max_spur_cnts = 5; 186 max_spur_cnts = 5;
@@ -676,6 +674,10 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
676 if (chan->channel == 2484) 674 if (chan->channel == 2484)
677 ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1); 675 ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1);
678 676
677 if (AR_SREV_9462(ah))
678 REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
679 AR_GLB_SWREG_DISCONT_EN_BT_WLAN);
680
679 ah->modes_index = modesIndex; 681 ah->modes_index = modesIndex;
680 ar9003_hw_override_ini(ah); 682 ar9003_hw_override_ini(ah);
681 ar9003_hw_set_channel_regs(ah, chan); 683 ar9003_hw_set_channel_regs(ah, chan);
@@ -821,18 +823,18 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
821 REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW, 823 REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
822 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); 824 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
823 825
824 if (!on != aniState->ofdmWeakSigDetectOff) { 826 if (on != aniState->ofdmWeakSigDetect) {
825 ath_dbg(common, ANI, 827 ath_dbg(common, ANI,
826 "** ch %d: ofdm weak signal: %s=>%s\n", 828 "** ch %d: ofdm weak signal: %s=>%s\n",
827 chan->channel, 829 chan->channel,
828 !aniState->ofdmWeakSigDetectOff ? 830 aniState->ofdmWeakSigDetect ?
829 "on" : "off", 831 "on" : "off",
830 on ? "on" : "off"); 832 on ? "on" : "off");
831 if (on) 833 if (on)
832 ah->stats.ast_ani_ofdmon++; 834 ah->stats.ast_ani_ofdmon++;
833 else 835 else
834 ah->stats.ast_ani_ofdmoff++; 836 ah->stats.ast_ani_ofdmoff++;
835 aniState->ofdmWeakSigDetectOff = !on; 837 aniState->ofdmWeakSigDetect = on;
836 } 838 }
837 break; 839 break;
838 } 840 }
@@ -851,7 +853,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
851 * from INI file & cap value 853 * from INI file & cap value
852 */ 854 */
853 value = firstep_table[level] - 855 value = firstep_table[level] -
854 firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] + 856 firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
855 aniState->iniDef.firstep; 857 aniState->iniDef.firstep;
856 if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN) 858 if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
857 value = ATH9K_SIG_FIRSTEP_SETTING_MIN; 859 value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
@@ -866,7 +868,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
866 * from INI file & cap value 868 * from INI file & cap value
867 */ 869 */
868 value2 = firstep_table[level] - 870 value2 = firstep_table[level] -
869 firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] + 871 firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
870 aniState->iniDef.firstepLow; 872 aniState->iniDef.firstepLow;
871 if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN) 873 if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
872 value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN; 874 value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
@@ -882,7 +884,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
882 chan->channel, 884 chan->channel,
883 aniState->firstepLevel, 885 aniState->firstepLevel,
884 level, 886 level,
885 ATH9K_ANI_FIRSTEP_LVL_NEW, 887 ATH9K_ANI_FIRSTEP_LVL,
886 value, 888 value,
887 aniState->iniDef.firstep); 889 aniState->iniDef.firstep);
888 ath_dbg(common, ANI, 890 ath_dbg(common, ANI,
@@ -890,7 +892,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
890 chan->channel, 892 chan->channel,
891 aniState->firstepLevel, 893 aniState->firstepLevel,
892 level, 894 level,
893 ATH9K_ANI_FIRSTEP_LVL_NEW, 895 ATH9K_ANI_FIRSTEP_LVL,
894 value2, 896 value2,
895 aniState->iniDef.firstepLow); 897 aniState->iniDef.firstepLow);
896 if (level > aniState->firstepLevel) 898 if (level > aniState->firstepLevel)
@@ -915,7 +917,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
915 * from INI file & cap value 917 * from INI file & cap value
916 */ 918 */
917 value = cycpwrThr1_table[level] - 919 value = cycpwrThr1_table[level] -
918 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] + 920 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
919 aniState->iniDef.cycpwrThr1; 921 aniState->iniDef.cycpwrThr1;
920 if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN) 922 if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
921 value = ATH9K_SIG_SPUR_IMM_SETTING_MIN; 923 value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
@@ -931,7 +933,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
931 * from INI file & cap value 933 * from INI file & cap value
932 */ 934 */
933 value2 = cycpwrThr1_table[level] - 935 value2 = cycpwrThr1_table[level] -
934 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] + 936 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
935 aniState->iniDef.cycpwrThr1Ext; 937 aniState->iniDef.cycpwrThr1Ext;
936 if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN) 938 if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
937 value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN; 939 value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
@@ -946,7 +948,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
946 chan->channel, 948 chan->channel,
947 aniState->spurImmunityLevel, 949 aniState->spurImmunityLevel,
948 level, 950 level,
949 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW, 951 ATH9K_ANI_SPUR_IMMUNE_LVL,
950 value, 952 value,
951 aniState->iniDef.cycpwrThr1); 953 aniState->iniDef.cycpwrThr1);
952 ath_dbg(common, ANI, 954 ath_dbg(common, ANI,
@@ -954,7 +956,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
954 chan->channel, 956 chan->channel,
955 aniState->spurImmunityLevel, 957 aniState->spurImmunityLevel,
956 level, 958 level,
957 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW, 959 ATH9K_ANI_SPUR_IMMUNE_LVL,
958 value2, 960 value2,
959 aniState->iniDef.cycpwrThr1Ext); 961 aniState->iniDef.cycpwrThr1Ext);
960 if (level > aniState->spurImmunityLevel) 962 if (level > aniState->spurImmunityLevel)
@@ -975,16 +977,16 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
975 AR_PHY_MRC_CCK_ENABLE, is_on); 977 AR_PHY_MRC_CCK_ENABLE, is_on);
976 REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL, 978 REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
977 AR_PHY_MRC_CCK_MUX_REG, is_on); 979 AR_PHY_MRC_CCK_MUX_REG, is_on);
978 if (!is_on != aniState->mrcCCKOff) { 980 if (is_on != aniState->mrcCCK) {
979 ath_dbg(common, ANI, "** ch %d: MRC CCK: %s=>%s\n", 981 ath_dbg(common, ANI, "** ch %d: MRC CCK: %s=>%s\n",
980 chan->channel, 982 chan->channel,
981 !aniState->mrcCCKOff ? "on" : "off", 983 aniState->mrcCCK ? "on" : "off",
982 is_on ? "on" : "off"); 984 is_on ? "on" : "off");
983 if (is_on) 985 if (is_on)
984 ah->stats.ast_ani_ccklow++; 986 ah->stats.ast_ani_ccklow++;
985 else 987 else
986 ah->stats.ast_ani_cckhigh++; 988 ah->stats.ast_ani_cckhigh++;
987 aniState->mrcCCKOff = !is_on; 989 aniState->mrcCCK = is_on;
988 } 990 }
989 break; 991 break;
990 } 992 }
@@ -998,9 +1000,9 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
998 ath_dbg(common, ANI, 1000 ath_dbg(common, ANI,
999 "ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n", 1001 "ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
1000 aniState->spurImmunityLevel, 1002 aniState->spurImmunityLevel,
1001 !aniState->ofdmWeakSigDetectOff ? "on" : "off", 1003 aniState->ofdmWeakSigDetect ? "on" : "off",
1002 aniState->firstepLevel, 1004 aniState->firstepLevel,
1003 !aniState->mrcCCKOff ? "on" : "off", 1005 aniState->mrcCCK ? "on" : "off",
1004 aniState->listenTime, 1006 aniState->listenTime,
1005 aniState->ofdmPhyErrCount, 1007 aniState->ofdmPhyErrCount,
1006 aniState->cckPhyErrCount); 1008 aniState->cckPhyErrCount);
@@ -1107,10 +1109,10 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
1107 AR_PHY_EXT_CYCPWR_THR1); 1109 AR_PHY_EXT_CYCPWR_THR1);
1108 1110
1109 /* these levels just got reset to defaults by the INI */ 1111 /* these levels just got reset to defaults by the INI */
1110 aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL_NEW; 1112 aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
1111 aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW; 1113 aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
1112 aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG; 1114 aniState->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG;
1113 aniState->mrcCCKOff = !ATH9K_ANI_ENABLE_MRC_CCK; 1115 aniState->mrcCCK = true;
1114} 1116}
1115 1117
1116static void ar9003_hw_set_radar_params(struct ath_hw *ah, 1118static void ar9003_hw_set_radar_params(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 7268a48a92a..ed662c3bae5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -820,18 +820,26 @@
820#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001 820#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
821#define AR_PHY_RX_DELAY_DELAY 0x00003FFF 821#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
822#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010 822#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
823#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x00000001 823
824#define AR_PHY_SPECTRAL_SCAN_ENABLE_S 0 824#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x00000001
825#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002 825#define AR_PHY_SPECTRAL_SCAN_ENABLE_S 0
826#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1 826#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002
827#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0 827#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1
828#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4 828#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0
829#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00 829#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
830#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8 830#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00
831#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000 831#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
832#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16 832#define AR_PHY_SPECTRAL_SCAN_COUNT 0x0FFF0000
833#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000 833#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
834#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24 834#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x10000000
835#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 28
836#define AR_PHY_SPECTRAL_SCAN_PRIORITY 0x20000000
837#define AR_PHY_SPECTRAL_SCAN_PRIORITY_S 29
838#define AR_PHY_SPECTRAL_SCAN_USE_ERR5 0x40000000
839#define AR_PHY_SPECTRAL_SCAN_USE_ERR5_S 30
840#define AR_PHY_SPECTRAL_SCAN_COMPRESSED_RPT 0x80000000
841#define AR_PHY_SPECTRAL_SCAN_COMPRESSED_RPT_S 31
842
835#define AR_PHY_CHANNEL_STATUS_RX_CLEAR 0x00000004 843#define AR_PHY_CHANNEL_STATUS_RX_CLEAR 0x00000004
836#define AR_PHY_RTT_CTRL_ENA_RADIO_RETENTION 0x00000001 844#define AR_PHY_RTT_CTRL_ENA_RADIO_RETENTION 0x00000001
837#define AR_PHY_RTT_CTRL_ENA_RADIO_RETENTION_S 0 845#define AR_PHY_RTT_CTRL_ENA_RADIO_RETENTION_S 0
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
index 1bd3a3d2210..6e1756bc383 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -337,12 +337,7 @@ static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = {
337 {0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000}, 337 {0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000},
338}; 338};
339 339
340static const u32 ar9331_1p1_baseband_core_txfir_coeff_japan_2484[][2] = { 340#define ar9331_1p1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
341 /* Addr allmodes */
342 {0x0000a398, 0x00000000},
343 {0x0000a39c, 0x6f7f0301},
344 {0x0000a3a0, 0xca9228ee},
345};
346 341
347static const u32 ar9331_1p1_xtal_25M[][2] = { 342static const u32 ar9331_1p1_xtal_25M[][2] = {
348 /* Addr allmodes */ 343 /* Addr allmodes */
@@ -783,17 +778,7 @@ static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
783 {0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000}, 778 {0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000},
784}; 779};
785 780
786static const u32 ar9331_1p1_mac_postamble[][5] = { 781#define ar9331_1p1_mac_postamble ar9300_2p2_mac_postamble
787 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
788 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
789 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
790 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
791 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
792 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
793 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
794 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
795 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
796};
797 782
798static const u32 ar9331_1p1_soc_preamble[][2] = { 783static const u32 ar9331_1p1_soc_preamble[][2] = {
799 /* Addr allmodes */ 784 /* Addr allmodes */
@@ -1112,38 +1097,4 @@ static const u32 ar9331_common_tx_gain_offset1_1[][1] = {
1112 {0x00000000}, 1097 {0x00000000},
1113}; 1098};
1114 1099
1115static const u32 ar9331_1p1_chansel_xtal_25M[] = {
1116 0x0101479e,
1117 0x0101d027,
1118 0x010258af,
1119 0x0102e138,
1120 0x010369c0,
1121 0x0103f249,
1122 0x01047ad1,
1123 0x0105035a,
1124 0x01058be2,
1125 0x0106146b,
1126 0x01069cf3,
1127 0x0107257c,
1128 0x0107ae04,
1129 0x0108f5b2,
1130};
1131
1132static const u32 ar9331_1p1_chansel_xtal_40M[] = {
1133 0x00a0ccbe,
1134 0x00a12213,
1135 0x00a17769,
1136 0x00a1ccbe,
1137 0x00a22213,
1138 0x00a27769,
1139 0x00a2ccbe,
1140 0x00a32213,
1141 0x00a37769,
1142 0x00a3ccbe,
1143 0x00a42213,
1144 0x00a47769,
1145 0x00a4ccbe,
1146 0x00a5998b,
1147};
1148
1149#endif /* INITVALS_9330_1P1_H */ 1100#endif /* INITVALS_9330_1P1_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
index 0e6ca0834b3..57ed8a11217 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2011 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
3 * 4 *
4 * Permission to use, copy, modify, and/or distribute this software for any 5 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
@@ -17,8 +18,8 @@
17#ifndef INITVALS_9330_1P2_H 18#ifndef INITVALS_9330_1P2_H
18#define INITVALS_9330_1P2_H 19#define INITVALS_9330_1P2_H
19 20
20static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p2[][5] = { 21static const u32 ar9331_modes_high_ob_db_tx_gain_1p2[][5] = {
21 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 22 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
22 {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7}, 23 {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
23 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000}, 24 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
24 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002}, 25 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
@@ -102,8 +103,14 @@ static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p2[][5] = {
102 {0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004}, 103 {0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
103}; 104};
104 105
106#define ar9331_modes_high_power_tx_gain_1p2 ar9331_modes_high_ob_db_tx_gain_1p2
107
108#define ar9331_modes_low_ob_db_tx_gain_1p2 ar9331_modes_high_power_tx_gain_1p2
109
110#define ar9331_modes_lowest_ob_db_tx_gain_1p2 ar9331_modes_low_ob_db_tx_gain_1p2
111
105static const u32 ar9331_1p2_baseband_postamble[][5] = { 112static const u32 ar9331_1p2_baseband_postamble[][5] = {
106 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 113 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
107 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005}, 114 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
108 {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e}, 115 {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
109 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0}, 116 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
@@ -147,191 +154,6 @@ static const u32 ar9331_1p2_baseband_postamble[][5] = {
147 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 154 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
148}; 155};
149 156
150static const u32 ar9331_modes_high_ob_db_tx_gain_1p2[][5] = {
151 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
152 {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
153 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
154 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
155 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
156 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
157 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
158 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
159 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
160 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
161 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
162 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
163 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
164 {0x0000a52c, 0x41023e85, 0x41023e85, 0x3f001620, 0x3f001620},
165 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x41001621, 0x41001621},
166 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x44001640, 0x44001640},
167 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x46001641, 0x46001641},
168 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x48001642, 0x48001642},
169 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4b001644, 0x4b001644},
170 {0x0000a544, 0x6502feca, 0x6502feca, 0x4e001a81, 0x4e001a81},
171 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x51001a83, 0x51001a83},
172 {0x0000a54c, 0x7203feca, 0x7203feca, 0x54001c84, 0x54001c84},
173 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x57001ce3, 0x57001ce3},
174 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5b001ce5, 0x5b001ce5},
175 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5f001ce9, 0x5f001ce9},
176 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001eec, 0x66001eec},
177 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x66001eec, 0x66001eec},
178 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001eec, 0x66001eec},
179 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
180 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
181 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
182 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
183 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
184 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
185 {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
186 {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
187 {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
188 {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
189 {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
190 {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
191 {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
192 {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
193 {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
194 {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
195 {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
196 {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
197 {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
198 {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
199 {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
200 {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
201 {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
202 {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
203 {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
204 {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
205 {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
206 {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
207 {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
208 {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
209 {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
210 {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
211 {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
212 {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
213 {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
214 {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
215 {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
216 {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
217 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
218 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
219 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
220 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
221 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
222 {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
223 {0x0000a618, 0x02008501, 0x02008501, 0x02008501, 0x02008501},
224 {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
225 {0x0000a620, 0x0300c802, 0x0300c802, 0x0300c802, 0x0300c802},
226 {0x0000a624, 0x0300cc03, 0x0300cc03, 0x0300cc03, 0x0300cc03},
227 {0x0000a628, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
228 {0x0000a62c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
229 {0x0000a630, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
230 {0x0000a634, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
231 {0x0000a638, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
232 {0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
233};
234
235static const u32 ar9331_modes_low_ob_db_tx_gain_1p2[][5] = {
236 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
237 {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
238 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
239 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
240 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
241 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
242 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
243 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
244 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
245 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
246 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
247 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
248 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
249 {0x0000a52c, 0x41023e85, 0x41023e85, 0x3f001620, 0x3f001620},
250 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x41001621, 0x41001621},
251 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x44001640, 0x44001640},
252 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x46001641, 0x46001641},
253 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x48001642, 0x48001642},
254 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4b001644, 0x4b001644},
255 {0x0000a544, 0x6502feca, 0x6502feca, 0x4e001a81, 0x4e001a81},
256 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x51001a83, 0x51001a83},
257 {0x0000a54c, 0x7203feca, 0x7203feca, 0x54001c84, 0x54001c84},
258 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x57001ce3, 0x57001ce3},
259 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5b001ce5, 0x5b001ce5},
260 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5f001ce9, 0x5f001ce9},
261 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001eec, 0x66001eec},
262 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x66001eec, 0x66001eec},
263 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001eec, 0x66001eec},
264 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
265 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
266 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
267 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
268 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
269 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
270 {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
271 {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
272 {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
273 {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
274 {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
275 {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
276 {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
277 {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
278 {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
279 {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
280 {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
281 {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
282 {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
283 {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
284 {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
285 {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
286 {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
287 {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
288 {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
289 {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
290 {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
291 {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
292 {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
293 {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
294 {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
295 {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
296 {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
297 {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
298 {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
299 {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
300 {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
301 {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
302 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
303 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
304 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
305 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
306 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
307 {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
308 {0x0000a618, 0x02008501, 0x02008501, 0x02008501, 0x02008501},
309 {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
310 {0x0000a620, 0x0300c802, 0x0300c802, 0x0300c802, 0x0300c802},
311 {0x0000a624, 0x0300cc03, 0x0300cc03, 0x0300cc03, 0x0300cc03},
312 {0x0000a628, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
313 {0x0000a62c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
314 {0x0000a630, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
315 {0x0000a634, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
316 {0x0000a638, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
317 {0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
318};
319
320static const u32 ar9331_1p2_baseband_core_txfir_coeff_japan_2484[][2] = {
321 /* Addr allmodes */
322 {0x0000a398, 0x00000000},
323 {0x0000a39c, 0x6f7f0301},
324 {0x0000a3a0, 0xca9228ee},
325};
326
327static const u32 ar9331_1p2_xtal_25M[][2] = {
328 /* Addr allmodes */
329 {0x00007038, 0x000002f8},
330 {0x00008244, 0x0010f3d7},
331 {0x0000824c, 0x0001e7ae},
332 {0x0001609c, 0x0f508f29},
333};
334
335static const u32 ar9331_1p2_radio_core[][2] = { 157static const u32 ar9331_1p2_radio_core[][2] = {
336 /* Addr allmodes */ 158 /* Addr allmodes */
337 {0x00016000, 0x36db6db6}, 159 {0x00016000, 0x36db6db6},
@@ -397,684 +219,24 @@ static const u32 ar9331_1p2_radio_core[][2] = {
397 {0x000163d4, 0x00000000}, 219 {0x000163d4, 0x00000000},
398}; 220};
399 221
400static const u32 ar9331_1p2_soc_postamble[][5] = { 222#define ar9331_1p2_baseband_core_txfir_coeff_japan_2484 ar9331_1p1_baseband_core_txfir_coeff_japan_2484
401 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
402 {0x00007010, 0x00000022, 0x00000022, 0x00000022, 0x00000022},
403};
404 223
405static const u32 ar9331_common_wo_xlna_rx_gain_1p2[][2] = { 224#define ar9331_1p2_xtal_25M ar9331_1p1_xtal_25M
406 /* Addr allmodes */
407 {0x0000a000, 0x00060005},
408 {0x0000a004, 0x00810080},
409 {0x0000a008, 0x00830082},
410 {0x0000a00c, 0x00850084},
411 {0x0000a010, 0x01820181},
412 {0x0000a014, 0x01840183},
413 {0x0000a018, 0x01880185},
414 {0x0000a01c, 0x018a0189},
415 {0x0000a020, 0x02850284},
416 {0x0000a024, 0x02890288},
417 {0x0000a028, 0x028b028a},
418 {0x0000a02c, 0x03850384},
419 {0x0000a030, 0x03890388},
420 {0x0000a034, 0x038b038a},
421 {0x0000a038, 0x038d038c},
422 {0x0000a03c, 0x03910390},
423 {0x0000a040, 0x03930392},
424 {0x0000a044, 0x03950394},
425 {0x0000a048, 0x00000396},
426 {0x0000a04c, 0x00000000},
427 {0x0000a050, 0x00000000},
428 {0x0000a054, 0x00000000},
429 {0x0000a058, 0x00000000},
430 {0x0000a05c, 0x00000000},
431 {0x0000a060, 0x00000000},
432 {0x0000a064, 0x00000000},
433 {0x0000a068, 0x00000000},
434 {0x0000a06c, 0x00000000},
435 {0x0000a070, 0x00000000},
436 {0x0000a074, 0x00000000},
437 {0x0000a078, 0x00000000},
438 {0x0000a07c, 0x00000000},
439 {0x0000a080, 0x28282828},
440 {0x0000a084, 0x28282828},
441 {0x0000a088, 0x28282828},
442 {0x0000a08c, 0x28282828},
443 {0x0000a090, 0x28282828},
444 {0x0000a094, 0x24242428},
445 {0x0000a098, 0x171e1e1e},
446 {0x0000a09c, 0x02020b0b},
447 {0x0000a0a0, 0x02020202},
448 {0x0000a0a4, 0x00000000},
449 {0x0000a0a8, 0x00000000},
450 {0x0000a0ac, 0x00000000},
451 {0x0000a0b0, 0x00000000},
452 {0x0000a0b4, 0x00000000},
453 {0x0000a0b8, 0x00000000},
454 {0x0000a0bc, 0x00000000},
455 {0x0000a0c0, 0x22072208},
456 {0x0000a0c4, 0x22052206},
457 {0x0000a0c8, 0x22032204},
458 {0x0000a0cc, 0x22012202},
459 {0x0000a0d0, 0x221f2200},
460 {0x0000a0d4, 0x221d221e},
461 {0x0000a0d8, 0x33023303},
462 {0x0000a0dc, 0x33003301},
463 {0x0000a0e0, 0x331e331f},
464 {0x0000a0e4, 0x4402331d},
465 {0x0000a0e8, 0x44004401},
466 {0x0000a0ec, 0x441e441f},
467 {0x0000a0f0, 0x55025503},
468 {0x0000a0f4, 0x55005501},
469 {0x0000a0f8, 0x551e551f},
470 {0x0000a0fc, 0x6602551d},
471 {0x0000a100, 0x66006601},
472 {0x0000a104, 0x661e661f},
473 {0x0000a108, 0x7703661d},
474 {0x0000a10c, 0x77017702},
475 {0x0000a110, 0x00007700},
476 {0x0000a114, 0x00000000},
477 {0x0000a118, 0x00000000},
478 {0x0000a11c, 0x00000000},
479 {0x0000a120, 0x00000000},
480 {0x0000a124, 0x00000000},
481 {0x0000a128, 0x00000000},
482 {0x0000a12c, 0x00000000},
483 {0x0000a130, 0x00000000},
484 {0x0000a134, 0x00000000},
485 {0x0000a138, 0x00000000},
486 {0x0000a13c, 0x00000000},
487 {0x0000a140, 0x001f0000},
488 {0x0000a144, 0x111f1100},
489 {0x0000a148, 0x111d111e},
490 {0x0000a14c, 0x111b111c},
491 {0x0000a150, 0x22032204},
492 {0x0000a154, 0x22012202},
493 {0x0000a158, 0x221f2200},
494 {0x0000a15c, 0x221d221e},
495 {0x0000a160, 0x33013302},
496 {0x0000a164, 0x331f3300},
497 {0x0000a168, 0x4402331e},
498 {0x0000a16c, 0x44004401},
499 {0x0000a170, 0x441e441f},
500 {0x0000a174, 0x55015502},
501 {0x0000a178, 0x551f5500},
502 {0x0000a17c, 0x6602551e},
503 {0x0000a180, 0x66006601},
504 {0x0000a184, 0x661e661f},
505 {0x0000a188, 0x7703661d},
506 {0x0000a18c, 0x77017702},
507 {0x0000a190, 0x00007700},
508 {0x0000a194, 0x00000000},
509 {0x0000a198, 0x00000000},
510 {0x0000a19c, 0x00000000},
511 {0x0000a1a0, 0x00000000},
512 {0x0000a1a4, 0x00000000},
513 {0x0000a1a8, 0x00000000},
514 {0x0000a1ac, 0x00000000},
515 {0x0000a1b0, 0x00000000},
516 {0x0000a1b4, 0x00000000},
517 {0x0000a1b8, 0x00000000},
518 {0x0000a1bc, 0x00000000},
519 {0x0000a1c0, 0x00000000},
520 {0x0000a1c4, 0x00000000},
521 {0x0000a1c8, 0x00000000},
522 {0x0000a1cc, 0x00000000},
523 {0x0000a1d0, 0x00000000},
524 {0x0000a1d4, 0x00000000},
525 {0x0000a1d8, 0x00000000},
526 {0x0000a1dc, 0x00000000},
527 {0x0000a1e0, 0x00000000},
528 {0x0000a1e4, 0x00000000},
529 {0x0000a1e8, 0x00000000},
530 {0x0000a1ec, 0x00000000},
531 {0x0000a1f0, 0x00000396},
532 {0x0000a1f4, 0x00000396},
533 {0x0000a1f8, 0x00000396},
534 {0x0000a1fc, 0x00000296},
535};
536 225
537static const u32 ar9331_1p2_baseband_core[][2] = { 226#define ar9331_1p2_xtal_40M ar9331_1p1_xtal_40M
538 /* Addr allmodes */
539 {0x00009800, 0xafe68e30},
540 {0x00009804, 0xfd14e000},
541 {0x00009808, 0x9c0a8f6b},
542 {0x0000980c, 0x04800000},
543 {0x00009814, 0x9280c00a},
544 {0x00009818, 0x00000000},
545 {0x0000981c, 0x00020028},
546 {0x00009834, 0x5f3ca3de},
547 {0x00009838, 0x0108ecff},
548 {0x0000983c, 0x14750600},
549 {0x00009880, 0x201fff00},
550 {0x00009884, 0x00001042},
551 {0x000098a4, 0x00200400},
552 {0x000098b0, 0x32840bbe},
553 {0x000098d0, 0x004b6a8e},
554 {0x000098d4, 0x00000820},
555 {0x000098dc, 0x00000000},
556 {0x000098f0, 0x00000000},
557 {0x000098f4, 0x00000000},
558 {0x00009c04, 0x00000000},
559 {0x00009c08, 0x03200000},
560 {0x00009c0c, 0x00000000},
561 {0x00009c10, 0x00000000},
562 {0x00009c14, 0x00046384},
563 {0x00009c18, 0x05b6b440},
564 {0x00009c1c, 0x00b6b440},
565 {0x00009d00, 0xc080a333},
566 {0x00009d04, 0x40206c10},
567 {0x00009d08, 0x009c4060},
568 {0x00009d0c, 0x1883800a},
569 {0x00009d10, 0x01834061},
570 {0x00009d14, 0x00c00400},
571 {0x00009d18, 0x00000000},
572 {0x00009e08, 0x0038233c},
573 {0x00009e24, 0x9927b515},
574 {0x00009e28, 0x12ef0200},
575 {0x00009e30, 0x06336f77},
576 {0x00009e34, 0x6af6532f},
577 {0x00009e38, 0x0cc80c00},
578 {0x00009e40, 0x0d261820},
579 {0x00009e4c, 0x00001004},
580 {0x00009e50, 0x00ff03f1},
581 {0x00009fc0, 0x803e4788},
582 {0x00009fc4, 0x0001efb5},
583 {0x00009fcc, 0x40000014},
584 {0x0000a20c, 0x00000000},
585 {0x0000a220, 0x00000000},
586 {0x0000a224, 0x00000000},
587 {0x0000a228, 0x10002310},
588 {0x0000a23c, 0x00000000},
589 {0x0000a244, 0x0c000000},
590 {0x0000a2a0, 0x00000001},
591 {0x0000a2c0, 0x00000001},
592 {0x0000a2c8, 0x00000000},
593 {0x0000a2cc, 0x18c43433},
594 {0x0000a2d4, 0x00000000},
595 {0x0000a2dc, 0x00000000},
596 {0x0000a2e0, 0x00000000},
597 {0x0000a2e4, 0x00000000},
598 {0x0000a2e8, 0x00000000},
599 {0x0000a2ec, 0x00000000},
600 {0x0000a2f0, 0x00000000},
601 {0x0000a2f4, 0x00000000},
602 {0x0000a2f8, 0x00000000},
603 {0x0000a344, 0x00000000},
604 {0x0000a34c, 0x00000000},
605 {0x0000a350, 0x0000a000},
606 {0x0000a364, 0x00000000},
607 {0x0000a370, 0x00000000},
608 {0x0000a390, 0x00000001},
609 {0x0000a394, 0x00000444},
610 {0x0000a398, 0x001f0e0f},
611 {0x0000a39c, 0x0075393f},
612 {0x0000a3a0, 0xb79f6427},
613 {0x0000a3a4, 0x00000000},
614 {0x0000a3a8, 0xaaaaaaaa},
615 {0x0000a3ac, 0x3c466478},
616 {0x0000a3c0, 0x20202020},
617 {0x0000a3c4, 0x22222220},
618 {0x0000a3c8, 0x20200020},
619 {0x0000a3cc, 0x20202020},
620 {0x0000a3d0, 0x20202020},
621 {0x0000a3d4, 0x20202020},
622 {0x0000a3d8, 0x20202020},
623 {0x0000a3dc, 0x20202020},
624 {0x0000a3e0, 0x20202020},
625 {0x0000a3e4, 0x20202020},
626 {0x0000a3e8, 0x20202020},
627 {0x0000a3ec, 0x20202020},
628 {0x0000a3f0, 0x00000000},
629 {0x0000a3f4, 0x00000006},
630 {0x0000a3f8, 0x0cdbd380},
631 {0x0000a3fc, 0x000f0f01},
632 {0x0000a400, 0x8fa91f01},
633 {0x0000a404, 0x00000000},
634 {0x0000a408, 0x0e79e5c6},
635 {0x0000a40c, 0x00820820},
636 {0x0000a414, 0x1ce739ce},
637 {0x0000a418, 0x2d001dce},
638 {0x0000a41c, 0x1ce739ce},
639 {0x0000a420, 0x000001ce},
640 {0x0000a424, 0x1ce739ce},
641 {0x0000a428, 0x000001ce},
642 {0x0000a42c, 0x1ce739ce},
643 {0x0000a430, 0x1ce739ce},
644 {0x0000a434, 0x00000000},
645 {0x0000a438, 0x00001801},
646 {0x0000a43c, 0x00000000},
647 {0x0000a440, 0x00000000},
648 {0x0000a444, 0x00000000},
649 {0x0000a448, 0x04000000},
650 {0x0000a44c, 0x00000001},
651 {0x0000a450, 0x00010000},
652 {0x0000a458, 0x00000000},
653 {0x0000a640, 0x00000000},
654 {0x0000a644, 0x3fad9d74},
655 {0x0000a648, 0x0048060a},
656 {0x0000a64c, 0x00003c37},
657 {0x0000a670, 0x03020100},
658 {0x0000a674, 0x09080504},
659 {0x0000a678, 0x0d0c0b0a},
660 {0x0000a67c, 0x13121110},
661 {0x0000a680, 0x31301514},
662 {0x0000a684, 0x35343332},
663 {0x0000a688, 0x00000036},
664 {0x0000a690, 0x00000838},
665 {0x0000a7c0, 0x00000000},
666 {0x0000a7c4, 0xfffffffc},
667 {0x0000a7c8, 0x00000000},
668 {0x0000a7cc, 0x00000000},
669 {0x0000a7d0, 0x00000000},
670 {0x0000a7d4, 0x00000004},
671 {0x0000a7dc, 0x00000001},
672};
673 227
674static const u32 ar9331_modes_high_power_tx_gain_1p2[][5] = { 228#define ar9331_1p2_baseband_core ar9331_1p1_baseband_core
675 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
676 {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
677 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
678 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
679 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
680 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
681 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
682 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
683 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
684 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
685 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
686 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
687 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
688 {0x0000a52c, 0x41023e85, 0x41023e85, 0x3f001620, 0x3f001620},
689 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x41001621, 0x41001621},
690 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x44001640, 0x44001640},
691 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x46001641, 0x46001641},
692 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x48001642, 0x48001642},
693 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4b001644, 0x4b001644},
694 {0x0000a544, 0x6502feca, 0x6502feca, 0x4e001a81, 0x4e001a81},
695 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x51001a83, 0x51001a83},
696 {0x0000a54c, 0x7203feca, 0x7203feca, 0x54001c84, 0x54001c84},
697 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x57001ce3, 0x57001ce3},
698 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5b001ce5, 0x5b001ce5},
699 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5f001ce9, 0x5f001ce9},
700 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001eec, 0x66001eec},
701 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x66001eec, 0x66001eec},
702 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001eec, 0x66001eec},
703 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
704 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
705 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
706 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
707 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
708 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
709 {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
710 {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
711 {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
712 {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
713 {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
714 {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
715 {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
716 {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
717 {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
718 {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
719 {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
720 {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
721 {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
722 {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
723 {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
724 {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
725 {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
726 {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
727 {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
728 {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
729 {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
730 {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
731 {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
732 {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
733 {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
734 {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
735 {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
736 {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
737 {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
738 {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
739 {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
740 {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
741 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
742 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
743 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
744 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
745 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
746 {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
747 {0x0000a618, 0x02008501, 0x02008501, 0x02008501, 0x02008501},
748 {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
749 {0x0000a620, 0x0300c802, 0x0300c802, 0x0300c802, 0x0300c802},
750 {0x0000a624, 0x0300cc03, 0x0300cc03, 0x0300cc03, 0x0300cc03},
751 {0x0000a628, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
752 {0x0000a62c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
753 {0x0000a630, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
754 {0x0000a634, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
755 {0x0000a638, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
756 {0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
757};
758 229
759static const u32 ar9331_1p2_mac_postamble[][5] = { 230#define ar9331_1p2_soc_postamble ar9331_1p1_soc_postamble
760 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
761 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
762 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
763 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
764 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
765 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
766 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
767 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
768 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
769};
770 231
771static const u32 ar9331_1p2_soc_preamble[][2] = { 232#define ar9331_1p2_mac_postamble ar9331_1p1_mac_postamble
772 /* Addr allmodes */
773 {0x00007020, 0x00000000},
774 {0x00007034, 0x00000002},
775 {0x00007038, 0x000002f8},
776};
777 233
778static const u32 ar9331_1p2_xtal_40M[][2] = { 234#define ar9331_1p2_soc_preamble ar9331_1p1_soc_preamble
779 /* Addr allmodes */
780 {0x00007038, 0x000004c2},
781 {0x00008244, 0x0010f400},
782 {0x0000824c, 0x0001e800},
783 {0x0001609c, 0x0b283f31},
784};
785 235
786static const u32 ar9331_1p2_mac_core[][2] = { 236#define ar9331_1p2_mac_core ar9331_1p1_mac_core
787 /* Addr allmodes */
788 {0x00000008, 0x00000000},
789 {0x00000030, 0x00020085},
790 {0x00000034, 0x00000005},
791 {0x00000040, 0x00000000},
792 {0x00000044, 0x00000000},
793 {0x00000048, 0x00000008},
794 {0x0000004c, 0x00000010},
795 {0x00000050, 0x00000000},
796 {0x00001040, 0x002ffc0f},
797 {0x00001044, 0x002ffc0f},
798 {0x00001048, 0x002ffc0f},
799 {0x0000104c, 0x002ffc0f},
800 {0x00001050, 0x002ffc0f},
801 {0x00001054, 0x002ffc0f},
802 {0x00001058, 0x002ffc0f},
803 {0x0000105c, 0x002ffc0f},
804 {0x00001060, 0x002ffc0f},
805 {0x00001064, 0x002ffc0f},
806 {0x000010f0, 0x00000100},
807 {0x00001270, 0x00000000},
808 {0x000012b0, 0x00000000},
809 {0x000012f0, 0x00000000},
810 {0x0000143c, 0x00000000},
811 {0x0000147c, 0x00000000},
812 {0x00008000, 0x00000000},
813 {0x00008004, 0x00000000},
814 {0x00008008, 0x00000000},
815 {0x0000800c, 0x00000000},
816 {0x00008018, 0x00000000},
817 {0x00008020, 0x00000000},
818 {0x00008038, 0x00000000},
819 {0x0000803c, 0x00000000},
820 {0x00008040, 0x00000000},
821 {0x00008044, 0x00000000},
822 {0x00008048, 0x00000000},
823 {0x0000804c, 0xffffffff},
824 {0x00008054, 0x00000000},
825 {0x00008058, 0x00000000},
826 {0x0000805c, 0x000fc78f},
827 {0x00008060, 0x0000000f},
828 {0x00008064, 0x00000000},
829 {0x00008070, 0x00000310},
830 {0x00008074, 0x00000020},
831 {0x00008078, 0x00000000},
832 {0x0000809c, 0x0000000f},
833 {0x000080a0, 0x00000000},
834 {0x000080a4, 0x02ff0000},
835 {0x000080a8, 0x0e070605},
836 {0x000080ac, 0x0000000d},
837 {0x000080b0, 0x00000000},
838 {0x000080b4, 0x00000000},
839 {0x000080b8, 0x00000000},
840 {0x000080bc, 0x00000000},
841 {0x000080c0, 0x2a800000},
842 {0x000080c4, 0x06900168},
843 {0x000080c8, 0x13881c20},
844 {0x000080cc, 0x01f40000},
845 {0x000080d0, 0x00252500},
846 {0x000080d4, 0x00a00000},
847 {0x000080d8, 0x00400000},
848 {0x000080dc, 0x00000000},
849 {0x000080e0, 0xffffffff},
850 {0x000080e4, 0x0000ffff},
851 {0x000080e8, 0x3f3f3f3f},
852 {0x000080ec, 0x00000000},
853 {0x000080f0, 0x00000000},
854 {0x000080f4, 0x00000000},
855 {0x000080fc, 0x00020000},
856 {0x00008100, 0x00000000},
857 {0x00008108, 0x00000052},
858 {0x0000810c, 0x00000000},
859 {0x00008110, 0x00000000},
860 {0x00008114, 0x000007ff},
861 {0x00008118, 0x000000aa},
862 {0x0000811c, 0x00003210},
863 {0x00008124, 0x00000000},
864 {0x00008128, 0x00000000},
865 {0x0000812c, 0x00000000},
866 {0x00008130, 0x00000000},
867 {0x00008134, 0x00000000},
868 {0x00008138, 0x00000000},
869 {0x0000813c, 0x0000ffff},
870 {0x00008144, 0xffffffff},
871 {0x00008168, 0x00000000},
872 {0x0000816c, 0x00000000},
873 {0x00008170, 0x18486200},
874 {0x00008174, 0x33332210},
875 {0x00008178, 0x00000000},
876 {0x0000817c, 0x00020000},
877 {0x000081c0, 0x00000000},
878 {0x000081c4, 0x33332210},
879 {0x000081c8, 0x00000000},
880 {0x000081cc, 0x00000000},
881 {0x000081d4, 0x00000000},
882 {0x000081ec, 0x00000000},
883 {0x000081f0, 0x00000000},
884 {0x000081f4, 0x00000000},
885 {0x000081f8, 0x00000000},
886 {0x000081fc, 0x00000000},
887 {0x00008240, 0x00100000},
888 {0x00008248, 0x00000800},
889 {0x00008250, 0x00000000},
890 {0x00008254, 0x00000000},
891 {0x00008258, 0x00000000},
892 {0x0000825c, 0x40000000},
893 {0x00008260, 0x00080922},
894 {0x00008264, 0x9d400010},
895 {0x00008268, 0xffffffff},
896 {0x0000826c, 0x0000ffff},
897 {0x00008270, 0x00000000},
898 {0x00008274, 0x40000000},
899 {0x00008278, 0x003e4180},
900 {0x0000827c, 0x00000004},
901 {0x00008284, 0x0000002c},
902 {0x00008288, 0x0000002c},
903 {0x0000828c, 0x000000ff},
904 {0x00008294, 0x00000000},
905 {0x00008298, 0x00000000},
906 {0x0000829c, 0x00000000},
907 {0x00008300, 0x00000140},
908 {0x00008314, 0x00000000},
909 {0x0000831c, 0x0000010d},
910 {0x00008328, 0x00000000},
911 {0x0000832c, 0x00000007},
912 {0x00008330, 0x00000302},
913 {0x00008334, 0x00000700},
914 {0x00008338, 0x00ff0000},
915 {0x0000833c, 0x02400000},
916 {0x00008340, 0x000107ff},
917 {0x00008344, 0xaa48105b},
918 {0x00008348, 0x008f0000},
919 {0x0000835c, 0x00000000},
920 {0x00008360, 0xffffffff},
921 {0x00008364, 0xffffffff},
922 {0x00008368, 0x00000000},
923 {0x00008370, 0x00000000},
924 {0x00008374, 0x000000ff},
925 {0x00008378, 0x00000000},
926 {0x0000837c, 0x00000000},
927 {0x00008380, 0xffffffff},
928 {0x00008384, 0xffffffff},
929 {0x00008390, 0xffffffff},
930 {0x00008394, 0xffffffff},
931 {0x00008398, 0x00000000},
932 {0x0000839c, 0x00000000},
933 {0x000083a0, 0x00000000},
934 {0x000083a4, 0x0000fa14},
935 {0x000083a8, 0x000f0c00},
936 {0x000083ac, 0x33332210},
937 {0x000083b0, 0x33332210},
938 {0x000083b4, 0x33332210},
939 {0x000083b8, 0x33332210},
940 {0x000083bc, 0x00000000},
941 {0x000083c0, 0x00000000},
942 {0x000083c4, 0x00000000},
943 {0x000083c8, 0x00000000},
944 {0x000083cc, 0x00000200},
945 {0x000083d0, 0x000301ff},
946};
947 237
948static const u32 ar9331_common_rx_gain_1p2[][2] = { 238#define ar9331_common_wo_xlna_rx_gain_1p2 ar9331_common_wo_xlna_rx_gain_1p1
949 /* Addr allmodes */ 239
950 {0x0000a000, 0x00010000}, 240#define ar9331_common_rx_gain_1p2 ar9485_common_rx_gain_1_1
951 {0x0000a004, 0x00030002},
952 {0x0000a008, 0x00050004},
953 {0x0000a00c, 0x00810080},
954 {0x0000a010, 0x01800082},
955 {0x0000a014, 0x01820181},
956 {0x0000a018, 0x01840183},
957 {0x0000a01c, 0x01880185},
958 {0x0000a020, 0x018a0189},
959 {0x0000a024, 0x02850284},
960 {0x0000a028, 0x02890288},
961 {0x0000a02c, 0x03850384},
962 {0x0000a030, 0x03890388},
963 {0x0000a034, 0x038b038a},
964 {0x0000a038, 0x038d038c},
965 {0x0000a03c, 0x03910390},
966 {0x0000a040, 0x03930392},
967 {0x0000a044, 0x03950394},
968 {0x0000a048, 0x00000396},
969 {0x0000a04c, 0x00000000},
970 {0x0000a050, 0x00000000},
971 {0x0000a054, 0x00000000},
972 {0x0000a058, 0x00000000},
973 {0x0000a05c, 0x00000000},
974 {0x0000a060, 0x00000000},
975 {0x0000a064, 0x00000000},
976 {0x0000a068, 0x00000000},
977 {0x0000a06c, 0x00000000},
978 {0x0000a070, 0x00000000},
979 {0x0000a074, 0x00000000},
980 {0x0000a078, 0x00000000},
981 {0x0000a07c, 0x00000000},
982 {0x0000a080, 0x28282828},
983 {0x0000a084, 0x28282828},
984 {0x0000a088, 0x28282828},
985 {0x0000a08c, 0x28282828},
986 {0x0000a090, 0x28282828},
987 {0x0000a094, 0x21212128},
988 {0x0000a098, 0x171c1c1c},
989 {0x0000a09c, 0x02020212},
990 {0x0000a0a0, 0x00000202},
991 {0x0000a0a4, 0x00000000},
992 {0x0000a0a8, 0x00000000},
993 {0x0000a0ac, 0x00000000},
994 {0x0000a0b0, 0x00000000},
995 {0x0000a0b4, 0x00000000},
996 {0x0000a0b8, 0x00000000},
997 {0x0000a0bc, 0x00000000},
998 {0x0000a0c0, 0x001f0000},
999 {0x0000a0c4, 0x111f1100},
1000 {0x0000a0c8, 0x111d111e},
1001 {0x0000a0cc, 0x111b111c},
1002 {0x0000a0d0, 0x22032204},
1003 {0x0000a0d4, 0x22012202},
1004 {0x0000a0d8, 0x221f2200},
1005 {0x0000a0dc, 0x221d221e},
1006 {0x0000a0e0, 0x33013302},
1007 {0x0000a0e4, 0x331f3300},
1008 {0x0000a0e8, 0x4402331e},
1009 {0x0000a0ec, 0x44004401},
1010 {0x0000a0f0, 0x441e441f},
1011 {0x0000a0f4, 0x55015502},
1012 {0x0000a0f8, 0x551f5500},
1013 {0x0000a0fc, 0x6602551e},
1014 {0x0000a100, 0x66006601},
1015 {0x0000a104, 0x661e661f},
1016 {0x0000a108, 0x7703661d},
1017 {0x0000a10c, 0x77017702},
1018 {0x0000a110, 0x00007700},
1019 {0x0000a114, 0x00000000},
1020 {0x0000a118, 0x00000000},
1021 {0x0000a11c, 0x00000000},
1022 {0x0000a120, 0x00000000},
1023 {0x0000a124, 0x00000000},
1024 {0x0000a128, 0x00000000},
1025 {0x0000a12c, 0x00000000},
1026 {0x0000a130, 0x00000000},
1027 {0x0000a134, 0x00000000},
1028 {0x0000a138, 0x00000000},
1029 {0x0000a13c, 0x00000000},
1030 {0x0000a140, 0x001f0000},
1031 {0x0000a144, 0x111f1100},
1032 {0x0000a148, 0x111d111e},
1033 {0x0000a14c, 0x111b111c},
1034 {0x0000a150, 0x22032204},
1035 {0x0000a154, 0x22012202},
1036 {0x0000a158, 0x221f2200},
1037 {0x0000a15c, 0x221d221e},
1038 {0x0000a160, 0x33013302},
1039 {0x0000a164, 0x331f3300},
1040 {0x0000a168, 0x4402331e},
1041 {0x0000a16c, 0x44004401},
1042 {0x0000a170, 0x441e441f},
1043 {0x0000a174, 0x55015502},
1044 {0x0000a178, 0x551f5500},
1045 {0x0000a17c, 0x6602551e},
1046 {0x0000a180, 0x66006601},
1047 {0x0000a184, 0x661e661f},
1048 {0x0000a188, 0x7703661d},
1049 {0x0000a18c, 0x77017702},
1050 {0x0000a190, 0x00007700},
1051 {0x0000a194, 0x00000000},
1052 {0x0000a198, 0x00000000},
1053 {0x0000a19c, 0x00000000},
1054 {0x0000a1a0, 0x00000000},
1055 {0x0000a1a4, 0x00000000},
1056 {0x0000a1a8, 0x00000000},
1057 {0x0000a1ac, 0x00000000},
1058 {0x0000a1b0, 0x00000000},
1059 {0x0000a1b4, 0x00000000},
1060 {0x0000a1b8, 0x00000000},
1061 {0x0000a1bc, 0x00000000},
1062 {0x0000a1c0, 0x00000000},
1063 {0x0000a1c4, 0x00000000},
1064 {0x0000a1c8, 0x00000000},
1065 {0x0000a1cc, 0x00000000},
1066 {0x0000a1d0, 0x00000000},
1067 {0x0000a1d4, 0x00000000},
1068 {0x0000a1d8, 0x00000000},
1069 {0x0000a1dc, 0x00000000},
1070 {0x0000a1e0, 0x00000000},
1071 {0x0000a1e4, 0x00000000},
1072 {0x0000a1e8, 0x00000000},
1073 {0x0000a1ec, 0x00000000},
1074 {0x0000a1f0, 0x00000396},
1075 {0x0000a1f4, 0x00000396},
1076 {0x0000a1f8, 0x00000396},
1077 {0x0000a1fc, 0x00000296},
1078};
1079 241
1080#endif /* INITVALS_9330_1P2_H */ 242#endif /* INITVALS_9330_1P2_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
index 815a8af1bee..1d8235e19f0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2011 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
3 * 4 *
4 * Permission to use, copy, modify, and/or distribute this software for any 5 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
@@ -18,16 +19,16 @@
18#define INITVALS_9340_H 19#define INITVALS_9340_H
19 20
20static const u32 ar9340_1p0_radio_postamble[][5] = { 21static const u32 ar9340_1p0_radio_postamble[][5] = {
21 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 22 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
22 {0x000160ac, 0xa4646800, 0xa4646800, 0xa4646800, 0xa4646800}, 23 {0x000160ac, 0xa4646800, 0xa4646800, 0xa4646800, 0xa4646800},
23 {0x0001610c, 0x08000000, 0x08000000, 0x00000000, 0x00000000}, 24 {0x0001610c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
24 {0x00016140, 0x10804000, 0x10804000, 0x50804000, 0x50804000}, 25 {0x00016140, 0x10804000, 0x10804000, 0x50804000, 0x50804000},
25 {0x0001650c, 0x08000000, 0x08000000, 0x00000000, 0x00000000}, 26 {0x0001650c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
26 {0x00016540, 0x10804000, 0x10804000, 0x50804000, 0x50804000}, 27 {0x00016540, 0x10804000, 0x10804000, 0x50804000, 0x50804000},
27}; 28};
28 29
29static const u32 ar9340Modes_lowest_ob_db_tx_gain_table_1p0[][5] = { 30static const u32 ar9340Modes_lowest_ob_db_tx_gain_table_1p0[][5] = {
30 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 31 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
31 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 32 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
32 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 33 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
33 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002}, 34 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -99,21 +100,10 @@ static const u32 ar9340Modes_lowest_ob_db_tx_gain_table_1p0[][5] = {
99 {0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266}, 100 {0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
100}; 101};
101 102
102static const u32 ar9340Modes_fast_clock_1p0[][3] = { 103#define ar9340Modes_fast_clock_1p0 ar9300Modes_fast_clock_2p2
103 /* Addr 5G_HT20 5G_HT40 */
104 {0x00001030, 0x00000268, 0x000004d0},
105 {0x00001070, 0x0000018c, 0x00000318},
106 {0x000010b0, 0x00000fd0, 0x00001fa0},
107 {0x00008014, 0x044c044c, 0x08980898},
108 {0x0000801c, 0x148ec02b, 0x148ec057},
109 {0x00008318, 0x000044c0, 0x00008980},
110 {0x00009e00, 0x03721821, 0x03721821},
111 {0x0000a230, 0x0000000b, 0x00000016},
112 {0x0000a254, 0x00000898, 0x00001130},
113};
114 104
115static const u32 ar9340_1p0_radio_core[][2] = { 105static const u32 ar9340_1p0_radio_core[][2] = {
116 /* Addr allmodes */ 106 /* Addr allmodes */
117 {0x00016000, 0x36db6db6}, 107 {0x00016000, 0x36db6db6},
118 {0x00016004, 0x6db6db40}, 108 {0x00016004, 0x6db6db40},
119 {0x00016008, 0x73f00000}, 109 {0x00016008, 0x73f00000},
@@ -146,15 +136,13 @@ static const u32 ar9340_1p0_radio_core[][2] = {
146 {0x00016100, 0x04cb0001}, 136 {0x00016100, 0x04cb0001},
147 {0x00016104, 0xfff80000}, 137 {0x00016104, 0xfff80000},
148 {0x00016108, 0x00080010}, 138 {0x00016108, 0x00080010},
149 {0x0001610c, 0x00000000},
150 {0x00016140, 0x50804008}, 139 {0x00016140, 0x50804008},
151 {0x00016144, 0x01884080}, 140 {0x00016144, 0x01884080},
152 {0x00016148, 0x000080c0}, 141 {0x00016148, 0x000080c0},
153 {0x00016280, 0x01000015}, 142 {0x00016280, 0x01000015},
154 {0x00016284, 0x05530000}, 143 {0x00016284, 0x15530000},
155 {0x00016288, 0x00318000}, 144 {0x00016288, 0x00318000},
156 {0x0001628c, 0x50000000}, 145 {0x0001628c, 0x50000000},
157 {0x00016290, 0x4080294f},
158 {0x00016380, 0x00000000}, 146 {0x00016380, 0x00000000},
159 {0x00016384, 0x00000000}, 147 {0x00016384, 0x00000000},
160 {0x00016388, 0x00800700}, 148 {0x00016388, 0x00800700},
@@ -219,52 +207,43 @@ static const u32 ar9340_1p0_radio_core[][2] = {
219}; 207};
220 208
221static const u32 ar9340_1p0_radio_core_40M[][2] = { 209static const u32 ar9340_1p0_radio_core_40M[][2] = {
210 /* Addr allmodes */
222 {0x0001609c, 0x02566f3a}, 211 {0x0001609c, 0x02566f3a},
223 {0x000160ac, 0xa4647c00}, 212 {0x000160ac, 0xa4647c00},
224 {0x000160b0, 0x01885f5a}, 213 {0x000160b0, 0x01885f5a},
225}; 214};
226 215
227static const u32 ar9340_1p0_mac_postamble[][5] = { 216#define ar9340_1p0_mac_postamble ar9300_2p2_mac_postamble
228 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
229 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
230 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
231 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
232 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
233 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
234 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
235 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
236 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
237};
238 217
239static const u32 ar9340_1p0_soc_postamble[][5] = { 218#define ar9340_1p0_soc_postamble ar9300_2p2_soc_postamble
240 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
241 {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
242};
243 219
244static const u32 ar9340_1p0_baseband_postamble[][5] = { 220static const u32 ar9340_1p0_baseband_postamble[][5] = {
245 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 221 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
246 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011}, 222 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
247 {0x00009820, 0x206a022e, 0x206a022e, 0x206a022e, 0x206a022e}, 223 {0x00009820, 0x206a022e, 0x206a022e, 0x206a022e, 0x206a022e},
248 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0}, 224 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
249 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881}, 225 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
250 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4}, 226 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
251 {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c}, 227 {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
252 {0x00009c00, 0x00000044, 0x000000c4, 0x000000c4, 0x00000044}, 228 {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
253 {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0}, 229 {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
254 {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020}, 230 {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
255 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2}, 231 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
256 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec88d2e, 0x7ec88d2e}, 232 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec88d2e, 0x7ec88d2e},
257 {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e}, 233 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
258 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 234 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
259 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 235 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
260 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 236 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
261 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 237 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
238 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
262 {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27}, 239 {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
263 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, 240 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
264 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 241 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
265 {0x0000a204, 0x00003fc0, 0x00003fc4, 0x00003fc4, 0x00003fc0}, 242 {0x0000a204, 0x00003ec0, 0x00003ec4, 0x00003ec4, 0x00003ec0},
266 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004}, 243 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
244 {0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f},
267 {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b}, 245 {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
246 {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
268 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018}, 247 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
269 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108}, 248 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
270 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898}, 249 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
@@ -277,11 +256,11 @@ static const u32 ar9340_1p0_baseband_postamble[][5] = {
277 {0x0000a288, 0x00000220, 0x00000220, 0x00000110, 0x00000110}, 256 {0x0000a288, 0x00000220, 0x00000220, 0x00000110, 0x00000110},
278 {0x0000a28c, 0x00011111, 0x00011111, 0x00022222, 0x00022222}, 257 {0x0000a28c, 0x00011111, 0x00011111, 0x00022222, 0x00022222},
279 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, 258 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
280 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982}, 259 {0x0000a2d0, 0x00041983, 0x00041983, 0x00041982, 0x00041982},
281 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a}, 260 {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
282 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 261 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
283 {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, 262 {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
284 {0x0000ae04, 0x00180000, 0x00180000, 0x00180000, 0x00180000}, 263 {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
285 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 264 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
286 {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, 265 {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
287 {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce}, 266 {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
@@ -289,21 +268,21 @@ static const u32 ar9340_1p0_baseband_postamble[][5] = {
289}; 268};
290 269
291static const u32 ar9340_1p0_baseband_core[][2] = { 270static const u32 ar9340_1p0_baseband_core[][2] = {
292 /* Addr allmodes */ 271 /* Addr allmodes */
293 {0x00009800, 0xafe68e30}, 272 {0x00009800, 0xafe68e30},
294 {0x00009804, 0xfd14e000}, 273 {0x00009804, 0xfd14e000},
295 {0x00009808, 0x9c0a9f6b}, 274 {0x00009808, 0x9c0a9f6b},
296 {0x0000980c, 0x04900000}, 275 {0x0000980c, 0x04900000},
297 {0x00009814, 0xb280c00a}, 276 {0x00009814, 0x3280c00a},
298 {0x00009818, 0x00000000}, 277 {0x00009818, 0x00000000},
299 {0x0000981c, 0x00020028}, 278 {0x0000981c, 0x00020028},
300 {0x00009834, 0x5f3ca3de}, 279 {0x00009834, 0x6400a190},
301 {0x00009838, 0x0108ecff}, 280 {0x00009838, 0x0108ecff},
302 {0x0000983c, 0x14750600}, 281 {0x0000983c, 0x14000600},
303 {0x00009880, 0x201fff00}, 282 {0x00009880, 0x201fff00},
304 {0x00009884, 0x00001042}, 283 {0x00009884, 0x00001042},
305 {0x000098a4, 0x00200400}, 284 {0x000098a4, 0x00200400},
306 {0x000098b0, 0x52440bbe}, 285 {0x000098b0, 0x32840bbe},
307 {0x000098d0, 0x004b6a8e}, 286 {0x000098d0, 0x004b6a8e},
308 {0x000098d4, 0x00000820}, 287 {0x000098d4, 0x00000820},
309 {0x000098dc, 0x00000000}, 288 {0x000098dc, 0x00000000},
@@ -329,7 +308,6 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
329 {0x00009e30, 0x06336f77}, 308 {0x00009e30, 0x06336f77},
330 {0x00009e34, 0x6af6532f}, 309 {0x00009e34, 0x6af6532f},
331 {0x00009e38, 0x0cc80c00}, 310 {0x00009e38, 0x0cc80c00},
332 {0x00009e3c, 0xcf946222},
333 {0x00009e40, 0x0d261820}, 311 {0x00009e40, 0x0d261820},
334 {0x00009e4c, 0x00001004}, 312 {0x00009e4c, 0x00001004},
335 {0x00009e50, 0x00ff03f1}, 313 {0x00009e50, 0x00ff03f1},
@@ -342,8 +320,6 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
342 {0x0000a220, 0x00000000}, 320 {0x0000a220, 0x00000000},
343 {0x0000a224, 0x00000000}, 321 {0x0000a224, 0x00000000},
344 {0x0000a228, 0x10002310}, 322 {0x0000a228, 0x10002310},
345 {0x0000a22c, 0x01036a1e},
346 {0x0000a234, 0x10000fff},
347 {0x0000a23c, 0x00000000}, 323 {0x0000a23c, 0x00000000},
348 {0x0000a244, 0x0c000000}, 324 {0x0000a244, 0x0c000000},
349 {0x0000a2a0, 0x00000001}, 325 {0x0000a2a0, 0x00000001},
@@ -351,10 +327,6 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
351 {0x0000a2c8, 0x00000000}, 327 {0x0000a2c8, 0x00000000},
352 {0x0000a2cc, 0x18c43433}, 328 {0x0000a2cc, 0x18c43433},
353 {0x0000a2d4, 0x00000000}, 329 {0x0000a2d4, 0x00000000},
354 {0x0000a2dc, 0x00000000},
355 {0x0000a2e0, 0x00000000},
356 {0x0000a2e4, 0x00000000},
357 {0x0000a2e8, 0x00000000},
358 {0x0000a2ec, 0x00000000}, 330 {0x0000a2ec, 0x00000000},
359 {0x0000a2f0, 0x00000000}, 331 {0x0000a2f0, 0x00000000},
360 {0x0000a2f4, 0x00000000}, 332 {0x0000a2f4, 0x00000000},
@@ -385,7 +357,7 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
385 {0x0000a3e8, 0x20202020}, 357 {0x0000a3e8, 0x20202020},
386 {0x0000a3ec, 0x20202020}, 358 {0x0000a3ec, 0x20202020},
387 {0x0000a3f0, 0x00000000}, 359 {0x0000a3f0, 0x00000000},
388 {0x0000a3f4, 0x00000246}, 360 {0x0000a3f4, 0x00000000},
389 {0x0000a3f8, 0x0cdbd380}, 361 {0x0000a3f8, 0x0cdbd380},
390 {0x0000a3fc, 0x000f0f01}, 362 {0x0000a3fc, 0x000f0f01},
391 {0x0000a400, 0x8fa91f01}, 363 {0x0000a400, 0x8fa91f01},
@@ -402,33 +374,17 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
402 {0x0000a430, 0x1ce739ce}, 374 {0x0000a430, 0x1ce739ce},
403 {0x0000a434, 0x00000000}, 375 {0x0000a434, 0x00000000},
404 {0x0000a438, 0x00001801}, 376 {0x0000a438, 0x00001801},
405 {0x0000a43c, 0x00000000}, 377 {0x0000a43c, 0x00100000},
406 {0x0000a440, 0x00000000}, 378 {0x0000a440, 0x00000000},
407 {0x0000a444, 0x00000000}, 379 {0x0000a444, 0x00000000},
408 {0x0000a448, 0x04000080}, 380 {0x0000a448, 0x05000080},
409 {0x0000a44c, 0x00000001}, 381 {0x0000a44c, 0x00000001},
410 {0x0000a450, 0x00010000}, 382 {0x0000a450, 0x00010000},
411 {0x0000a458, 0x00000000}, 383 {0x0000a458, 0x00000000},
412 {0x0000a600, 0x00000000},
413 {0x0000a604, 0x00000000},
414 {0x0000a608, 0x00000000},
415 {0x0000a60c, 0x00000000},
416 {0x0000a610, 0x00000000},
417 {0x0000a614, 0x00000000},
418 {0x0000a618, 0x00000000},
419 {0x0000a61c, 0x00000000},
420 {0x0000a620, 0x00000000},
421 {0x0000a624, 0x00000000},
422 {0x0000a628, 0x00000000},
423 {0x0000a62c, 0x00000000},
424 {0x0000a630, 0x00000000},
425 {0x0000a634, 0x00000000},
426 {0x0000a638, 0x00000000},
427 {0x0000a63c, 0x00000000},
428 {0x0000a640, 0x00000000}, 384 {0x0000a640, 0x00000000},
429 {0x0000a644, 0x3fad9d74}, 385 {0x0000a644, 0x3fad9d74},
430 {0x0000a648, 0x0048060a}, 386 {0x0000a648, 0x0048060a},
431 {0x0000a64c, 0x00000637}, 387 {0x0000a64c, 0x00003c37},
432 {0x0000a670, 0x03020100}, 388 {0x0000a670, 0x03020100},
433 {0x0000a674, 0x09080504}, 389 {0x0000a674, 0x09080504},
434 {0x0000a678, 0x0d0c0b0a}, 390 {0x0000a678, 0x0d0c0b0a},
@@ -451,10 +407,6 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
451 {0x0000a8f4, 0x00000000}, 407 {0x0000a8f4, 0x00000000},
452 {0x0000b2d0, 0x00000080}, 408 {0x0000b2d0, 0x00000080},
453 {0x0000b2d4, 0x00000000}, 409 {0x0000b2d4, 0x00000000},
454 {0x0000b2dc, 0x00000000},
455 {0x0000b2e0, 0x00000000},
456 {0x0000b2e4, 0x00000000},
457 {0x0000b2e8, 0x00000000},
458 {0x0000b2ec, 0x00000000}, 410 {0x0000b2ec, 0x00000000},
459 {0x0000b2f0, 0x00000000}, 411 {0x0000b2f0, 0x00000000},
460 {0x0000b2f4, 0x00000000}, 412 {0x0000b2f4, 0x00000000},
@@ -465,80 +417,108 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
465}; 417};
466 418
467static const u32 ar9340Modes_high_power_tx_gain_table_1p0[][5] = { 419static const u32 ar9340Modes_high_power_tx_gain_table_1p0[][5] = {
468 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 420 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
421 {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
422 {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
423 {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
424 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
425 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
426 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
427 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
428 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
429 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
430 {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
431 {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
432 {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
433 {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
434 {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
435 {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
436 {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
437 {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
438 {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
439 {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
440 {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
441 {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
442 {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
443 {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
444 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
469 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9}, 445 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
470 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, 446 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
471 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002}, 447 {0x0000a504, 0x04002222, 0x04002222, 0x02000001, 0x02000001},
472 {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004}, 448 {0x0000a508, 0x09002421, 0x09002421, 0x05000003, 0x05000003},
473 {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200}, 449 {0x0000a50c, 0x0d002621, 0x0d002621, 0x0a000005, 0x0a000005},
474 {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202}, 450 {0x0000a510, 0x13004620, 0x13004620, 0x0e000201, 0x0e000201},
475 {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400}, 451 {0x0000a514, 0x19004a20, 0x19004a20, 0x11000203, 0x11000203},
476 {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402}, 452 {0x0000a518, 0x1d004e20, 0x1d004e20, 0x14000401, 0x14000401},
477 {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404}, 453 {0x0000a51c, 0x21005420, 0x21005420, 0x18000403, 0x18000403},
478 {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603}, 454 {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000602, 0x1b000602},
479 {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02}, 455 {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000802, 0x1f000802},
480 {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04}, 456 {0x0000a528, 0x2f005e42, 0x2f005e42, 0x21000620, 0x21000620},
481 {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20}, 457 {0x0000a52c, 0x33005e44, 0x33005e44, 0x25000820, 0x25000820},
482 {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20}, 458 {0x0000a530, 0x38005e65, 0x38005e65, 0x29000822, 0x29000822},
483 {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22}, 459 {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2d000824, 0x2d000824},
484 {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24}, 460 {0x0000a538, 0x40005e6b, 0x40005e6b, 0x30000828, 0x30000828},
485 {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640}, 461 {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x3400082a, 0x3400082a},
486 {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660}, 462 {0x0000a540, 0x49005e72, 0x49005e72, 0x38000849, 0x38000849},
487 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861}, 463 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b000a2c, 0x3b000a2c},
488 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81}, 464 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e000e2b, 0x3e000e2b},
489 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83}, 465 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42000e2d, 0x42000e2d},
490 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84}, 466 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x4500124a, 0x4500124a},
491 {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3}, 467 {0x0000a554, 0x61027f12, 0x61027f12, 0x4900124c, 0x4900124c},
492 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5}, 468 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c00126c, 0x4c00126c},
493 {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9}, 469 {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x4f00128c, 0x4f00128c},
494 {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb}, 470 {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x52001290, 0x52001290},
495 {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, 471 {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
496 {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, 472 {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
497 {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, 473 {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
498 {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, 474 {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
499 {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, 475 {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
500 {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, 476 {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
501 {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, 477 {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
502 {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000}, 478 {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
503 {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002}, 479 {0x0000a584, 0x04802222, 0x04802222, 0x02800001, 0x02800001},
504 {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004}, 480 {0x0000a588, 0x09802421, 0x09802421, 0x05800003, 0x05800003},
505 {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200}, 481 {0x0000a58c, 0x0d802621, 0x0d802621, 0x0a800005, 0x0a800005},
506 {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202}, 482 {0x0000a590, 0x13804620, 0x13804620, 0x0e800201, 0x0e800201},
507 {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400}, 483 {0x0000a594, 0x19804a20, 0x19804a20, 0x11800203, 0x11800203},
508 {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402}, 484 {0x0000a598, 0x1d804e20, 0x1d804e20, 0x14800401, 0x14800401},
509 {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404}, 485 {0x0000a59c, 0x21805420, 0x21805420, 0x18800403, 0x18800403},
510 {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603}, 486 {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800602, 0x1b800602},
511 {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02}, 487 {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800802, 0x1f800802},
512 {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04}, 488 {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x21800620, 0x21800620},
513 {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20}, 489 {0x0000a5ac, 0x33805e44, 0x33805e44, 0x25800820, 0x25800820},
514 {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20}, 490 {0x0000a5b0, 0x38805e65, 0x38805e65, 0x29800822, 0x29800822},
515 {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22}, 491 {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2d800824, 0x2d800824},
516 {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24}, 492 {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x30800828, 0x30800828},
517 {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640}, 493 {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x3480082a, 0x3480082a},
518 {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660}, 494 {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38800849, 0x38800849},
519 {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861}, 495 {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b800a2c, 0x3b800a2c},
520 {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81}, 496 {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e800e2b, 0x3e800e2b},
521 {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83}, 497 {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42800e2d, 0x42800e2d},
522 {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84}, 498 {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x4580124a, 0x4580124a},
523 {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3}, 499 {0x0000a5d4, 0x61827f12, 0x61827f12, 0x4980124c, 0x4980124c},
524 {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5}, 500 {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c80126c, 0x4c80126c},
525 {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9}, 501 {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x4f80128c, 0x4f80128c},
526 {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb}, 502 {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x52801290, 0x52801290},
527 {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 503 {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
528 {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 504 {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
529 {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 505 {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
530 {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 506 {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
531 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 507 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
532 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 508 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
533 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 509 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
534 {0x00016044, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db}, 510 {0x00016044, 0x056db2db, 0x056db2db, 0x022492db, 0x022492db},
535 {0x00016048, 0x24925266, 0x24925266, 0x24925266, 0x24925266}, 511 {0x00016048, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
536 {0x00016444, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db}, 512 {0x00016444, 0x056db2db, 0x056db2db, 0x022492db, 0x022492db},
537 {0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266}, 513 {0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
538}; 514};
539 515
540static const u32 ar9340Modes_high_ob_db_tx_gain_table_1p0[][5] = { 516static const u32 ar9340Modes_high_ob_db_tx_gain_table_1p0[][5] = {
541 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 517 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
518 {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
519 {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
520 {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
521 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
542 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9}, 522 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
543 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, 523 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
544 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002}, 524 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
@@ -559,7 +539,7 @@ static const u32 ar9340Modes_high_ob_db_tx_gain_table_1p0[][5] = {
559 {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660}, 539 {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
560 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861}, 540 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
561 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81}, 541 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
562 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83}, 542 {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
563 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84}, 543 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
564 {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3}, 544 {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
565 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5}, 545 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
@@ -604,13 +584,43 @@ static const u32 ar9340Modes_high_ob_db_tx_gain_table_1p0[][5] = {
604 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 584 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
605 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 585 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
606 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 586 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
587 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
588 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
589 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
590 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
591 {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
592 {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
593 {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
594 {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
595 {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
596 {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
597 {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
598 {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
599 {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
600 {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
601 {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
602 {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
603 {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
604 {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
605 {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
606 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
607 {0x00016044, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4}, 607 {0x00016044, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
608 {0x00016048, 0x8e481266, 0x8e481266, 0x8e481266, 0x8e481266}, 608 {0x00016048, 0x8e481666, 0x8e481666, 0x8e481266, 0x8e481266},
609 {0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015},
609 {0x00016444, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4}, 610 {0x00016444, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
610 {0x00016448, 0x8e481266, 0x8e481266, 0x8e481266, 0x8e481266}, 611 {0x00016448, 0x8e481666, 0x8e481666, 0x8e481266, 0x8e481266},
611}; 612};
613
612static const u32 ar9340Modes_ub124_tx_gain_table_1p0[][5] = { 614static const u32 ar9340Modes_ub124_tx_gain_table_1p0[][5] = {
613 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 615 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
616 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
617 {0x00009820, 0x206a022e, 0x206a022e, 0x206a00ae, 0x206a00ae},
618 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
619 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec82d2e, 0x7ec82d2e},
620 {0x0000a2dc, 0xfef5d402, 0xfef5d402, 0xfdab5b52, 0xfdab5b52},
621 {0x0000a2e0, 0xfe896600, 0xfe896600, 0xfd339c84, 0xfd339c84},
622 {0x0000a2e4, 0xff01f800, 0xff01f800, 0xfec3e000, 0xfec3e000},
623 {0x0000a2e8, 0xfffe0000, 0xfffe0000, 0xfffc0000, 0xfffc0000},
614 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9}, 624 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
615 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, 625 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
616 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002}, 626 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
@@ -676,15 +686,34 @@ static const u32 ar9340Modes_ub124_tx_gain_table_1p0[][5] = {
676 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 686 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
677 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 687 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
678 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 688 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
679 {0x00016044, 0x036db2db, 0x036db2db, 0x036db2db, 0x036db2db}, 689 {0x00016044, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
680 {0x00016048, 0x69b65266, 0x69b65266, 0x69b65266, 0x69b65266}, 690 {0x00016048, 0x8e480086, 0x8e480086, 0x8e480086, 0x8e480086},
681 {0x00016444, 0x036db2db, 0x036db2db, 0x036db2db, 0x036db2db}, 691 {0x00016444, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
682 {0x00016448, 0x69b65266, 0x69b65266, 0x69b65266, 0x69b65266}, 692 {0x00016448, 0x8e480086, 0x8e480086, 0x8e480086, 0x8e480086},
693 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
694 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
695 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
696 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
697 {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
698 {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
699 {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
700 {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
701 {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
702 {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
703 {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
704 {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
705 {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
706 {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
707 {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
708 {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
709 {0x0000b2dc, 0xfef5d402, 0xfef5d402, 0xfdab5b52, 0xfdab5b52},
710 {0x0000b2e0, 0xfe896600, 0xfe896600, 0xfd339c84, 0xfd339c84},
711 {0x0000b2e4, 0xff01f800, 0xff01f800, 0xfec3e000, 0xfec3e000},
712 {0x0000b2e8, 0xfffe0000, 0xfffe0000, 0xfffc0000, 0xfffc0000},
683}; 713};
684 714
685
686static const u32 ar9340Common_rx_gain_table_1p0[][2] = { 715static const u32 ar9340Common_rx_gain_table_1p0[][2] = {
687 /* Addr allmodes */ 716 /* Addr allmodes */
688 {0x0000a000, 0x00010000}, 717 {0x0000a000, 0x00010000},
689 {0x0000a004, 0x00030002}, 718 {0x0000a004, 0x00030002},
690 {0x0000a008, 0x00050004}, 719 {0x0000a008, 0x00050004},
@@ -845,14 +874,14 @@ static const u32 ar9340Common_rx_gain_table_1p0[][2] = {
845 {0x0000b074, 0x00000000}, 874 {0x0000b074, 0x00000000},
846 {0x0000b078, 0x00000000}, 875 {0x0000b078, 0x00000000},
847 {0x0000b07c, 0x00000000}, 876 {0x0000b07c, 0x00000000},
848 {0x0000b080, 0x32323232}, 877 {0x0000b080, 0x23232323},
849 {0x0000b084, 0x2f2f3232}, 878 {0x0000b084, 0x21232323},
850 {0x0000b088, 0x23282a2d}, 879 {0x0000b088, 0x19191c1e},
851 {0x0000b08c, 0x1c1e2123}, 880 {0x0000b08c, 0x12141417},
852 {0x0000b090, 0x14171919}, 881 {0x0000b090, 0x07070e0e},
853 {0x0000b094, 0x0e0e1214}, 882 {0x0000b094, 0x03030305},
854 {0x0000b098, 0x03050707}, 883 {0x0000b098, 0x00000003},
855 {0x0000b09c, 0x00030303}, 884 {0x0000b09c, 0x00000000},
856 {0x0000b0a0, 0x00000000}, 885 {0x0000b0a0, 0x00000000},
857 {0x0000b0a4, 0x00000000}, 886 {0x0000b0a4, 0x00000000},
858 {0x0000b0a8, 0x00000000}, 887 {0x0000b0a8, 0x00000000},
@@ -944,7 +973,11 @@ static const u32 ar9340Common_rx_gain_table_1p0[][2] = {
944}; 973};
945 974
946static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = { 975static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = {
947 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 976 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
977 {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
978 {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
979 {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
980 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
948 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 981 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
949 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 982 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
950 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002}, 983 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -952,8 +985,8 @@ static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = {
952 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200}, 985 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
953 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202}, 986 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
954 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400}, 987 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
955 {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402}, 988 {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
956 {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404}, 989 {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
957 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603}, 990 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
958 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02}, 991 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
959 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04}, 992 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
@@ -965,19 +998,19 @@ static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = {
965 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660}, 998 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
966 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861}, 999 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
967 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81}, 1000 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
968 {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83}, 1001 {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83},
969 {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84}, 1002 {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84},
970 {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3}, 1003 {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3},
971 {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5}, 1004 {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5},
972 {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9}, 1005 {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9},
973 {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb}, 1006 {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb},
974 {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec}, 1007 {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
975 {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec}, 1008 {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
976 {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec}, 1009 {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
977 {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec}, 1010 {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
978 {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec}, 1011 {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
979 {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec}, 1012 {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
980 {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec}, 1013 {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
981 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000}, 1014 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
982 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002}, 1015 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
983 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004}, 1016 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
@@ -1010,14 +1043,40 @@ static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = {
1010 {0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec}, 1043 {0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1011 {0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec}, 1044 {0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1012 {0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec}, 1045 {0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1046 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1047 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1048 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1049 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1050 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1051 {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
1052 {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
1053 {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
1054 {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
1055 {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
1056 {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
1057 {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
1058 {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1059 {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1060 {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1061 {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1062 {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
1063 {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
1064 {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
1065 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1013 {0x00016044, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db}, 1066 {0x00016044, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db},
1014 {0x00016048, 0x24925266, 0x24925266, 0x24925266, 0x24925266}, 1067 {0x00016048, 0x24925666, 0x24925666, 0x24925266, 0x24925266},
1068 {0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015},
1069 {0x00016288, 0xf0318000, 0xf0318000, 0xf0318000, 0xf0318000},
1015 {0x00016444, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db}, 1070 {0x00016444, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db},
1016 {0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266}, 1071 {0x00016448, 0x24925666, 0x24925666, 0x24925266, 0x24925266},
1017}; 1072};
1018 1073
1019static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = { 1074static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = {
1020 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 1075 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1076 {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
1077 {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
1078 {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
1079 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1021 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 1080 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
1022 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1081 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1023 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002}, 1082 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -1025,8 +1084,8 @@ static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = {
1025 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200}, 1084 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
1026 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202}, 1085 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
1027 {0x0000a514, 0x1c000223, 0x1c000223, 0x11000400, 0x11000400}, 1086 {0x0000a514, 0x1c000223, 0x1c000223, 0x11000400, 0x11000400},
1028 {0x0000a518, 0x21020220, 0x21020220, 0x15000402, 0x15000402}, 1087 {0x0000a518, 0x21002220, 0x21002220, 0x15000402, 0x15000402},
1029 {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404}, 1088 {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
1030 {0x0000a520, 0x2b022220, 0x2b022220, 0x1b000603, 0x1b000603}, 1089 {0x0000a520, 0x2b022220, 0x2b022220, 0x1b000603, 0x1b000603},
1031 {0x0000a524, 0x2f022222, 0x2f022222, 0x1f000a02, 0x1f000a02}, 1090 {0x0000a524, 0x2f022222, 0x2f022222, 0x1f000a02, 0x1f000a02},
1032 {0x0000a528, 0x34022225, 0x34022225, 0x23000a04, 0x23000a04}, 1091 {0x0000a528, 0x34022225, 0x34022225, 0x23000a04, 0x23000a04},
@@ -1038,19 +1097,19 @@ static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = {
1038 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x38001660, 0x38001660}, 1097 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x38001660, 0x38001660},
1039 {0x0000a544, 0x5302266c, 0x5302266c, 0x3b001861, 0x3b001861}, 1098 {0x0000a544, 0x5302266c, 0x5302266c, 0x3b001861, 0x3b001861},
1040 {0x0000a548, 0x5702286c, 0x5702286c, 0x3e001a81, 0x3e001a81}, 1099 {0x0000a548, 0x5702286c, 0x5702286c, 0x3e001a81, 0x3e001a81},
1041 {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x42001a83, 0x42001a83}, 1100 {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x42001a83, 0x42001a83},
1042 {0x0000a550, 0x61042a6c, 0x61042a6c, 0x44001c84, 0x44001c84}, 1101 {0x0000a550, 0x61024a6c, 0x61024a6c, 0x44001c84, 0x44001c84},
1043 {0x0000a554, 0x66062a6c, 0x66062a6c, 0x48001ce3, 0x48001ce3}, 1102 {0x0000a554, 0x66026a6c, 0x66026a6c, 0x48001ce3, 0x48001ce3},
1044 {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x4c001ce5, 0x4c001ce5}, 1103 {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x4c001ce5, 0x4c001ce5},
1045 {0x0000a55c, 0x7006308c, 0x7006308c, 0x50001ce9, 0x50001ce9}, 1104 {0x0000a55c, 0x7002708c, 0x7002708c, 0x50001ce9, 0x50001ce9},
1046 {0x0000a560, 0x730a308a, 0x730a308a, 0x54001ceb, 0x54001ceb}, 1105 {0x0000a560, 0x7302b08a, 0x7302b08a, 0x54001ceb, 0x54001ceb},
1047 {0x0000a564, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec}, 1106 {0x0000a564, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
1048 {0x0000a568, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec}, 1107 {0x0000a568, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
1049 {0x0000a56c, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec}, 1108 {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
1050 {0x0000a570, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec}, 1109 {0x0000a570, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
1051 {0x0000a574, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec}, 1110 {0x0000a574, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
1052 {0x0000a578, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec}, 1111 {0x0000a578, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
1053 {0x0000a57c, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec}, 1112 {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
1054 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000}, 1113 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
1055 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002}, 1114 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
1056 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004}, 1115 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
@@ -1083,14 +1142,36 @@ static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = {
1083 {0x0000a5f4, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec}, 1142 {0x0000a5f4, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
1084 {0x0000a5f8, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec}, 1143 {0x0000a5f8, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
1085 {0x0000a5fc, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec}, 1144 {0x0000a5fc, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
1145 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1146 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1147 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1148 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1149 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1150 {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
1151 {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
1152 {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
1153 {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
1154 {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
1155 {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
1156 {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
1157 {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1158 {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1159 {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1160 {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1161 {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
1162 {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
1163 {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
1164 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1086 {0x00016044, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4}, 1165 {0x00016044, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4},
1087 {0x00016048, 0x24927266, 0x24927266, 0x8e483266, 0x8e483266}, 1166 {0x00016048, 0x24925666, 0x24925666, 0x8e481266, 0x8e481266},
1167 {0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015},
1168 {0x00016288, 0x30318000, 0x30318000, 0x00318000, 0x00318000},
1088 {0x00016444, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4}, 1169 {0x00016444, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4},
1089 {0x00016448, 0x24927266, 0x24927266, 0x8e482266, 0x8e482266}, 1170 {0x00016448, 0x24925666, 0x24925666, 0x8e481266, 0x8e481266},
1090}; 1171};
1091 1172
1092static const u32 ar9340_1p0_mac_core[][2] = { 1173static const u32 ar9340_1p0_mac_core[][2] = {
1093 /* Addr allmodes */ 1174 /* Addr allmodes */
1094 {0x00000008, 0x00000000}, 1175 {0x00000008, 0x00000000},
1095 {0x00000030, 0x00020085}, 1176 {0x00000030, 0x00020085},
1096 {0x00000034, 0x00000005}, 1177 {0x00000034, 0x00000005},
@@ -1119,6 +1200,7 @@ static const u32 ar9340_1p0_mac_core[][2] = {
1119 {0x00008004, 0x00000000}, 1200 {0x00008004, 0x00000000},
1120 {0x00008008, 0x00000000}, 1201 {0x00008008, 0x00000000},
1121 {0x0000800c, 0x00000000}, 1202 {0x0000800c, 0x00000000},
1203 {0x00008010, 0x00080800},
1122 {0x00008018, 0x00000000}, 1204 {0x00008018, 0x00000000},
1123 {0x00008020, 0x00000000}, 1205 {0x00008020, 0x00000000},
1124 {0x00008038, 0x00000000}, 1206 {0x00008038, 0x00000000},
@@ -1146,7 +1228,7 @@ static const u32 ar9340_1p0_mac_core[][2] = {
1146 {0x000080bc, 0x00000000}, 1228 {0x000080bc, 0x00000000},
1147 {0x000080c0, 0x2a800000}, 1229 {0x000080c0, 0x2a800000},
1148 {0x000080c4, 0x06900168}, 1230 {0x000080c4, 0x06900168},
1149 {0x000080c8, 0x13881c20}, 1231 {0x000080c8, 0x13881c22},
1150 {0x000080cc, 0x01f40000}, 1232 {0x000080cc, 0x01f40000},
1151 {0x000080d0, 0x00252500}, 1233 {0x000080d0, 0x00252500},
1152 {0x000080d4, 0x00a00000}, 1234 {0x000080d4, 0x00a00000},
@@ -1250,276 +1332,17 @@ static const u32 ar9340_1p0_mac_core[][2] = {
1250 {0x000083c4, 0x00000000}, 1332 {0x000083c4, 0x00000000},
1251 {0x000083c8, 0x00000000}, 1333 {0x000083c8, 0x00000000},
1252 {0x000083cc, 0x00000200}, 1334 {0x000083cc, 0x00000200},
1253 {0x000083d0, 0x000301ff}, 1335 {0x000083d0, 0x000101ff},
1254}; 1336};
1255 1337
1256static const u32 ar9340Common_wo_xlna_rx_gain_table_1p0[][2] = { 1338#define ar9340Common_wo_xlna_rx_gain_table_1p0 ar9300Common_wo_xlna_rx_gain_table_2p2
1257 /* Addr allmodes */
1258 {0x0000a000, 0x00010000},
1259 {0x0000a004, 0x00030002},
1260 {0x0000a008, 0x00050004},
1261 {0x0000a00c, 0x00810080},
1262 {0x0000a010, 0x00830082},
1263 {0x0000a014, 0x01810180},
1264 {0x0000a018, 0x01830182},
1265 {0x0000a01c, 0x01850184},
1266 {0x0000a020, 0x01890188},
1267 {0x0000a024, 0x018b018a},
1268 {0x0000a028, 0x018d018c},
1269 {0x0000a02c, 0x03820190},
1270 {0x0000a030, 0x03840383},
1271 {0x0000a034, 0x03880385},
1272 {0x0000a038, 0x038a0389},
1273 {0x0000a03c, 0x038c038b},
1274 {0x0000a040, 0x0390038d},
1275 {0x0000a044, 0x03920391},
1276 {0x0000a048, 0x03940393},
1277 {0x0000a04c, 0x03960395},
1278 {0x0000a050, 0x00000000},
1279 {0x0000a054, 0x00000000},
1280 {0x0000a058, 0x00000000},
1281 {0x0000a05c, 0x00000000},
1282 {0x0000a060, 0x00000000},
1283 {0x0000a064, 0x00000000},
1284 {0x0000a068, 0x00000000},
1285 {0x0000a06c, 0x00000000},
1286 {0x0000a070, 0x00000000},
1287 {0x0000a074, 0x00000000},
1288 {0x0000a078, 0x00000000},
1289 {0x0000a07c, 0x00000000},
1290 {0x0000a080, 0x29292929},
1291 {0x0000a084, 0x29292929},
1292 {0x0000a088, 0x29292929},
1293 {0x0000a08c, 0x29292929},
1294 {0x0000a090, 0x22292929},
1295 {0x0000a094, 0x1d1d2222},
1296 {0x0000a098, 0x0c111117},
1297 {0x0000a09c, 0x00030303},
1298 {0x0000a0a0, 0x00000000},
1299 {0x0000a0a4, 0x00000000},
1300 {0x0000a0a8, 0x00000000},
1301 {0x0000a0ac, 0x00000000},
1302 {0x0000a0b0, 0x00000000},
1303 {0x0000a0b4, 0x00000000},
1304 {0x0000a0b8, 0x00000000},
1305 {0x0000a0bc, 0x00000000},
1306 {0x0000a0c0, 0x001f0000},
1307 {0x0000a0c4, 0x01000101},
1308 {0x0000a0c8, 0x011e011f},
1309 {0x0000a0cc, 0x011c011d},
1310 {0x0000a0d0, 0x02030204},
1311 {0x0000a0d4, 0x02010202},
1312 {0x0000a0d8, 0x021f0200},
1313 {0x0000a0dc, 0x0302021e},
1314 {0x0000a0e0, 0x03000301},
1315 {0x0000a0e4, 0x031e031f},
1316 {0x0000a0e8, 0x0402031d},
1317 {0x0000a0ec, 0x04000401},
1318 {0x0000a0f0, 0x041e041f},
1319 {0x0000a0f4, 0x0502041d},
1320 {0x0000a0f8, 0x05000501},
1321 {0x0000a0fc, 0x051e051f},
1322 {0x0000a100, 0x06010602},
1323 {0x0000a104, 0x061f0600},
1324 {0x0000a108, 0x061d061e},
1325 {0x0000a10c, 0x07020703},
1326 {0x0000a110, 0x07000701},
1327 {0x0000a114, 0x00000000},
1328 {0x0000a118, 0x00000000},
1329 {0x0000a11c, 0x00000000},
1330 {0x0000a120, 0x00000000},
1331 {0x0000a124, 0x00000000},
1332 {0x0000a128, 0x00000000},
1333 {0x0000a12c, 0x00000000},
1334 {0x0000a130, 0x00000000},
1335 {0x0000a134, 0x00000000},
1336 {0x0000a138, 0x00000000},
1337 {0x0000a13c, 0x00000000},
1338 {0x0000a140, 0x001f0000},
1339 {0x0000a144, 0x01000101},
1340 {0x0000a148, 0x011e011f},
1341 {0x0000a14c, 0x011c011d},
1342 {0x0000a150, 0x02030204},
1343 {0x0000a154, 0x02010202},
1344 {0x0000a158, 0x021f0200},
1345 {0x0000a15c, 0x0302021e},
1346 {0x0000a160, 0x03000301},
1347 {0x0000a164, 0x031e031f},
1348 {0x0000a168, 0x0402031d},
1349 {0x0000a16c, 0x04000401},
1350 {0x0000a170, 0x041e041f},
1351 {0x0000a174, 0x0502041d},
1352 {0x0000a178, 0x05000501},
1353 {0x0000a17c, 0x051e051f},
1354 {0x0000a180, 0x06010602},
1355 {0x0000a184, 0x061f0600},
1356 {0x0000a188, 0x061d061e},
1357 {0x0000a18c, 0x07020703},
1358 {0x0000a190, 0x07000701},
1359 {0x0000a194, 0x00000000},
1360 {0x0000a198, 0x00000000},
1361 {0x0000a19c, 0x00000000},
1362 {0x0000a1a0, 0x00000000},
1363 {0x0000a1a4, 0x00000000},
1364 {0x0000a1a8, 0x00000000},
1365 {0x0000a1ac, 0x00000000},
1366 {0x0000a1b0, 0x00000000},
1367 {0x0000a1b4, 0x00000000},
1368 {0x0000a1b8, 0x00000000},
1369 {0x0000a1bc, 0x00000000},
1370 {0x0000a1c0, 0x00000000},
1371 {0x0000a1c4, 0x00000000},
1372 {0x0000a1c8, 0x00000000},
1373 {0x0000a1cc, 0x00000000},
1374 {0x0000a1d0, 0x00000000},
1375 {0x0000a1d4, 0x00000000},
1376 {0x0000a1d8, 0x00000000},
1377 {0x0000a1dc, 0x00000000},
1378 {0x0000a1e0, 0x00000000},
1379 {0x0000a1e4, 0x00000000},
1380 {0x0000a1e8, 0x00000000},
1381 {0x0000a1ec, 0x00000000},
1382 {0x0000a1f0, 0x00000396},
1383 {0x0000a1f4, 0x00000396},
1384 {0x0000a1f8, 0x00000396},
1385 {0x0000a1fc, 0x00000196},
1386 {0x0000b000, 0x00010000},
1387 {0x0000b004, 0x00030002},
1388 {0x0000b008, 0x00050004},
1389 {0x0000b00c, 0x00810080},
1390 {0x0000b010, 0x00830082},
1391 {0x0000b014, 0x01810180},
1392 {0x0000b018, 0x01830182},
1393 {0x0000b01c, 0x01850184},
1394 {0x0000b020, 0x02810280},
1395 {0x0000b024, 0x02830282},
1396 {0x0000b028, 0x02850284},
1397 {0x0000b02c, 0x02890288},
1398 {0x0000b030, 0x028b028a},
1399 {0x0000b034, 0x0388028c},
1400 {0x0000b038, 0x038a0389},
1401 {0x0000b03c, 0x038c038b},
1402 {0x0000b040, 0x0390038d},
1403 {0x0000b044, 0x03920391},
1404 {0x0000b048, 0x03940393},
1405 {0x0000b04c, 0x03960395},
1406 {0x0000b050, 0x00000000},
1407 {0x0000b054, 0x00000000},
1408 {0x0000b058, 0x00000000},
1409 {0x0000b05c, 0x00000000},
1410 {0x0000b060, 0x00000000},
1411 {0x0000b064, 0x00000000},
1412 {0x0000b068, 0x00000000},
1413 {0x0000b06c, 0x00000000},
1414 {0x0000b070, 0x00000000},
1415 {0x0000b074, 0x00000000},
1416 {0x0000b078, 0x00000000},
1417 {0x0000b07c, 0x00000000},
1418 {0x0000b080, 0x32323232},
1419 {0x0000b084, 0x2f2f3232},
1420 {0x0000b088, 0x23282a2d},
1421 {0x0000b08c, 0x1c1e2123},
1422 {0x0000b090, 0x14171919},
1423 {0x0000b094, 0x0e0e1214},
1424 {0x0000b098, 0x03050707},
1425 {0x0000b09c, 0x00030303},
1426 {0x0000b0a0, 0x00000000},
1427 {0x0000b0a4, 0x00000000},
1428 {0x0000b0a8, 0x00000000},
1429 {0x0000b0ac, 0x00000000},
1430 {0x0000b0b0, 0x00000000},
1431 {0x0000b0b4, 0x00000000},
1432 {0x0000b0b8, 0x00000000},
1433 {0x0000b0bc, 0x00000000},
1434 {0x0000b0c0, 0x003f0020},
1435 {0x0000b0c4, 0x00400041},
1436 {0x0000b0c8, 0x0140005f},
1437 {0x0000b0cc, 0x0160015f},
1438 {0x0000b0d0, 0x017e017f},
1439 {0x0000b0d4, 0x02410242},
1440 {0x0000b0d8, 0x025f0240},
1441 {0x0000b0dc, 0x027f0260},
1442 {0x0000b0e0, 0x0341027e},
1443 {0x0000b0e4, 0x035f0340},
1444 {0x0000b0e8, 0x037f0360},
1445 {0x0000b0ec, 0x04400441},
1446 {0x0000b0f0, 0x0460045f},
1447 {0x0000b0f4, 0x0541047f},
1448 {0x0000b0f8, 0x055f0540},
1449 {0x0000b0fc, 0x057f0560},
1450 {0x0000b100, 0x06400641},
1451 {0x0000b104, 0x0660065f},
1452 {0x0000b108, 0x067e067f},
1453 {0x0000b10c, 0x07410742},
1454 {0x0000b110, 0x075f0740},
1455 {0x0000b114, 0x077f0760},
1456 {0x0000b118, 0x07800781},
1457 {0x0000b11c, 0x07a0079f},
1458 {0x0000b120, 0x07c107bf},
1459 {0x0000b124, 0x000007c0},
1460 {0x0000b128, 0x00000000},
1461 {0x0000b12c, 0x00000000},
1462 {0x0000b130, 0x00000000},
1463 {0x0000b134, 0x00000000},
1464 {0x0000b138, 0x00000000},
1465 {0x0000b13c, 0x00000000},
1466 {0x0000b140, 0x003f0020},
1467 {0x0000b144, 0x00400041},
1468 {0x0000b148, 0x0140005f},
1469 {0x0000b14c, 0x0160015f},
1470 {0x0000b150, 0x017e017f},
1471 {0x0000b154, 0x02410242},
1472 {0x0000b158, 0x025f0240},
1473 {0x0000b15c, 0x027f0260},
1474 {0x0000b160, 0x0341027e},
1475 {0x0000b164, 0x035f0340},
1476 {0x0000b168, 0x037f0360},
1477 {0x0000b16c, 0x04400441},
1478 {0x0000b170, 0x0460045f},
1479 {0x0000b174, 0x0541047f},
1480 {0x0000b178, 0x055f0540},
1481 {0x0000b17c, 0x057f0560},
1482 {0x0000b180, 0x06400641},
1483 {0x0000b184, 0x0660065f},
1484 {0x0000b188, 0x067e067f},
1485 {0x0000b18c, 0x07410742},
1486 {0x0000b190, 0x075f0740},
1487 {0x0000b194, 0x077f0760},
1488 {0x0000b198, 0x07800781},
1489 {0x0000b19c, 0x07a0079f},
1490 {0x0000b1a0, 0x07c107bf},
1491 {0x0000b1a4, 0x000007c0},
1492 {0x0000b1a8, 0x00000000},
1493 {0x0000b1ac, 0x00000000},
1494 {0x0000b1b0, 0x00000000},
1495 {0x0000b1b4, 0x00000000},
1496 {0x0000b1b8, 0x00000000},
1497 {0x0000b1bc, 0x00000000},
1498 {0x0000b1c0, 0x00000000},
1499 {0x0000b1c4, 0x00000000},
1500 {0x0000b1c8, 0x00000000},
1501 {0x0000b1cc, 0x00000000},
1502 {0x0000b1d0, 0x00000000},
1503 {0x0000b1d4, 0x00000000},
1504 {0x0000b1d8, 0x00000000},
1505 {0x0000b1dc, 0x00000000},
1506 {0x0000b1e0, 0x00000000},
1507 {0x0000b1e4, 0x00000000},
1508 {0x0000b1e8, 0x00000000},
1509 {0x0000b1ec, 0x00000000},
1510 {0x0000b1f0, 0x00000396},
1511 {0x0000b1f4, 0x00000396},
1512 {0x0000b1f8, 0x00000396},
1513 {0x0000b1fc, 0x00000196},
1514};
1515 1339
1516static const u32 ar9340_1p0_soc_preamble[][2] = { 1340static const u32 ar9340_1p0_soc_preamble[][2] = {
1517 /* Addr allmodes */ 1341 /* Addr allmodes */
1518 {0x000040a4, 0x00a0c1c9},
1519 {0x00007008, 0x00000000}, 1342 {0x00007008, 0x00000000},
1520 {0x00007020, 0x00000000}, 1343 {0x00007020, 0x00000000},
1521 {0x00007034, 0x00000002}, 1344 {0x00007034, 0x00000002},
1522 {0x00007038, 0x000004c2}, 1345 {0x00007038, 0x000004c2},
1523}; 1346};
1524 1347
1525#endif 1348#endif /* INITVALS_9340_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 1d6658e139b..4ef7dcccaa2 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
3 * 4 *
4 * Permission to use, copy, modify, and/or distribute this software for any 5 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
@@ -52,7 +53,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
52 {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020}, 53 {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
53 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8}, 54 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
54 {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e}, 55 {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
55 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x33795d5e}, 56 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x32395d5e},
56 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 57 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
57 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 58 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
58 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 59 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
@@ -61,7 +62,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
61 {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, 62 {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
62 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, 63 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
63 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 64 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
64 {0x0000a204, 0x013187c0, 0x013187c4, 0x013187c4, 0x013187c0}, 65 {0x0000a204, 0x01318fc0, 0x01318fc4, 0x01318fc4, 0x01318fc0},
65 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004}, 66 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
66 {0x0000a22c, 0x01026a2f, 0x01026a27, 0x01026a2f, 0x01026a2f}, 67 {0x0000a22c, 0x01026a2f, 0x01026a27, 0x01026a2f, 0x01026a2f},
67 {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b}, 68 {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
@@ -958,7 +959,7 @@ static const u32 ar9462_2p0_radio_core[][2] = {
958 {0x0001604c, 0x2699e04f}, 959 {0x0001604c, 0x2699e04f},
959 {0x00016050, 0x6db6db6c}, 960 {0x00016050, 0x6db6db6c},
960 {0x00016058, 0x6c200000}, 961 {0x00016058, 0x6c200000},
961 {0x00016080, 0x00040000}, 962 {0x00016080, 0x000c0000},
962 {0x00016084, 0x9a68048c}, 963 {0x00016084, 0x9a68048c},
963 {0x00016088, 0x54214514}, 964 {0x00016088, 0x54214514},
964 {0x0001608c, 0x1203040b}, 965 {0x0001608c, 0x1203040b},
@@ -981,7 +982,7 @@ static const u32 ar9462_2p0_radio_core[][2] = {
981 {0x00016144, 0x02084080}, 982 {0x00016144, 0x02084080},
982 {0x00016148, 0x000080c0}, 983 {0x00016148, 0x000080c0},
983 {0x00016280, 0x050a0001}, 984 {0x00016280, 0x050a0001},
984 {0x00016284, 0x3d841400}, 985 {0x00016284, 0x3d841418},
985 {0x00016288, 0x00000000}, 986 {0x00016288, 0x00000000},
986 {0x0001628c, 0xe3000000}, 987 {0x0001628c, 0xe3000000},
987 {0x00016290, 0xa1005080}, 988 {0x00016290, 0xa1005080},
@@ -1007,6 +1008,7 @@ static const u32 ar9462_2p0_radio_core[][2] = {
1007 1008
1008static const u32 ar9462_2p0_soc_preamble[][2] = { 1009static const u32 ar9462_2p0_soc_preamble[][2] = {
1009 /* Addr allmodes */ 1010 /* Addr allmodes */
1011 {0x000040a4, 0x00a0c1c9},
1010 {0x00007020, 0x00000000}, 1012 {0x00007020, 0x00000000},
1011 {0x00007034, 0x00000002}, 1013 {0x00007034, 0x00000002},
1012 {0x00007038, 0x000004c2}, 1014 {0x00007038, 0x000004c2},
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index d16d029f81a..fb4497fc7a3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2010-2011 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
3 * 4 *
4 * Permission to use, copy, modify, and/or distribute this software for any 5 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
@@ -17,360 +18,151 @@
17#ifndef INITVALS_9485_H 18#ifndef INITVALS_9485_H
18#define INITVALS_9485_H 19#define INITVALS_9485_H
19 20
20static const u32 ar9485_1_1_mac_core[][2] = { 21/* AR9485 1.0 */
21 /* Addr allmodes */
22 {0x00000008, 0x00000000},
23 {0x00000030, 0x00020085},
24 {0x00000034, 0x00000005},
25 {0x00000040, 0x00000000},
26 {0x00000044, 0x00000000},
27 {0x00000048, 0x00000008},
28 {0x0000004c, 0x00000010},
29 {0x00000050, 0x00000000},
30 {0x00001040, 0x002ffc0f},
31 {0x00001044, 0x002ffc0f},
32 {0x00001048, 0x002ffc0f},
33 {0x0000104c, 0x002ffc0f},
34 {0x00001050, 0x002ffc0f},
35 {0x00001054, 0x002ffc0f},
36 {0x00001058, 0x002ffc0f},
37 {0x0000105c, 0x002ffc0f},
38 {0x00001060, 0x002ffc0f},
39 {0x00001064, 0x002ffc0f},
40 {0x000010f0, 0x00000100},
41 {0x00001270, 0x00000000},
42 {0x000012b0, 0x00000000},
43 {0x000012f0, 0x00000000},
44 {0x0000143c, 0x00000000},
45 {0x0000147c, 0x00000000},
46 {0x00008000, 0x00000000},
47 {0x00008004, 0x00000000},
48 {0x00008008, 0x00000000},
49 {0x0000800c, 0x00000000},
50 {0x00008018, 0x00000000},
51 {0x00008020, 0x00000000},
52 {0x00008038, 0x00000000},
53 {0x0000803c, 0x00000000},
54 {0x00008040, 0x00000000},
55 {0x00008044, 0x00000000},
56 {0x00008048, 0x00000000},
57 {0x0000804c, 0xffffffff},
58 {0x00008054, 0x00000000},
59 {0x00008058, 0x00000000},
60 {0x0000805c, 0x000fc78f},
61 {0x00008060, 0x0000000f},
62 {0x00008064, 0x00000000},
63 {0x00008070, 0x00000310},
64 {0x00008074, 0x00000020},
65 {0x00008078, 0x00000000},
66 {0x0000809c, 0x0000000f},
67 {0x000080a0, 0x00000000},
68 {0x000080a4, 0x02ff0000},
69 {0x000080a8, 0x0e070605},
70 {0x000080ac, 0x0000000d},
71 {0x000080b0, 0x00000000},
72 {0x000080b4, 0x00000000},
73 {0x000080b8, 0x00000000},
74 {0x000080bc, 0x00000000},
75 {0x000080c0, 0x2a800000},
76 {0x000080c4, 0x06900168},
77 {0x000080c8, 0x13881c22},
78 {0x000080cc, 0x01f40000},
79 {0x000080d0, 0x00252500},
80 {0x000080d4, 0x00a00000},
81 {0x000080d8, 0x00400000},
82 {0x000080dc, 0x00000000},
83 {0x000080e0, 0xffffffff},
84 {0x000080e4, 0x0000ffff},
85 {0x000080e8, 0x3f3f3f3f},
86 {0x000080ec, 0x00000000},
87 {0x000080f0, 0x00000000},
88 {0x000080f4, 0x00000000},
89 {0x000080fc, 0x00020000},
90 {0x00008100, 0x00000000},
91 {0x00008108, 0x00000052},
92 {0x0000810c, 0x00000000},
93 {0x00008110, 0x00000000},
94 {0x00008114, 0x000007ff},
95 {0x00008118, 0x000000aa},
96 {0x0000811c, 0x00003210},
97 {0x00008124, 0x00000000},
98 {0x00008128, 0x00000000},
99 {0x0000812c, 0x00000000},
100 {0x00008130, 0x00000000},
101 {0x00008134, 0x00000000},
102 {0x00008138, 0x00000000},
103 {0x0000813c, 0x0000ffff},
104 {0x00008144, 0xffffffff},
105 {0x00008168, 0x00000000},
106 {0x0000816c, 0x00000000},
107 {0x00008170, 0x18486200},
108 {0x00008174, 0x33332210},
109 {0x00008178, 0x00000000},
110 {0x0000817c, 0x00020000},
111 {0x000081c0, 0x00000000},
112 {0x000081c4, 0x33332210},
113 {0x000081d4, 0x00000000},
114 {0x000081ec, 0x00000000},
115 {0x000081f0, 0x00000000},
116 {0x000081f4, 0x00000000},
117 {0x000081f8, 0x00000000},
118 {0x000081fc, 0x00000000},
119 {0x00008240, 0x00100000},
120 {0x00008244, 0x0010f400},
121 {0x00008248, 0x00000800},
122 {0x0000824c, 0x0001e800},
123 {0x00008250, 0x00000000},
124 {0x00008254, 0x00000000},
125 {0x00008258, 0x00000000},
126 {0x0000825c, 0x40000000},
127 {0x00008260, 0x00080922},
128 {0x00008264, 0x9ca00010},
129 {0x00008268, 0xffffffff},
130 {0x0000826c, 0x0000ffff},
131 {0x00008270, 0x00000000},
132 {0x00008274, 0x40000000},
133 {0x00008278, 0x003e4180},
134 {0x0000827c, 0x00000004},
135 {0x00008284, 0x0000002c},
136 {0x00008288, 0x0000002c},
137 {0x0000828c, 0x000000ff},
138 {0x00008294, 0x00000000},
139 {0x00008298, 0x00000000},
140 {0x0000829c, 0x00000000},
141 {0x00008300, 0x00000140},
142 {0x00008314, 0x00000000},
143 {0x0000831c, 0x0000010d},
144 {0x00008328, 0x00000000},
145 {0x0000832c, 0x00000007},
146 {0x00008330, 0x00000302},
147 {0x00008334, 0x00000700},
148 {0x00008338, 0x00ff0000},
149 {0x0000833c, 0x02400000},
150 {0x00008340, 0x000107ff},
151 {0x00008344, 0xa248105b},
152 {0x00008348, 0x008f0000},
153 {0x0000835c, 0x00000000},
154 {0x00008360, 0xffffffff},
155 {0x00008364, 0xffffffff},
156 {0x00008368, 0x00000000},
157 {0x00008370, 0x00000000},
158 {0x00008374, 0x000000ff},
159 {0x00008378, 0x00000000},
160 {0x0000837c, 0x00000000},
161 {0x00008380, 0xffffffff},
162 {0x00008384, 0xffffffff},
163 {0x00008390, 0xffffffff},
164 {0x00008394, 0xffffffff},
165 {0x00008398, 0x00000000},
166 {0x0000839c, 0x00000000},
167 {0x000083a0, 0x00000000},
168 {0x000083a4, 0x0000fa14},
169 {0x000083a8, 0x000f0c00},
170 {0x000083ac, 0x33332210},
171 {0x000083b0, 0x33332210},
172 {0x000083b4, 0x33332210},
173 {0x000083b8, 0x33332210},
174 {0x000083bc, 0x00000000},
175 {0x000083c0, 0x00000000},
176 {0x000083c4, 0x00000000},
177 {0x000083c8, 0x00000000},
178 {0x000083cc, 0x00000200},
179 {0x000083d0, 0x000301ff},
180};
181 22
182static const u32 ar9485_1_1_baseband_core[][2] = { 23#define ar9485_1_1_mac_postamble ar9300_2p2_mac_postamble
183 /* Addr allmodes */
184 {0x00009800, 0xafe68e30},
185 {0x00009804, 0xfd14e000},
186 {0x00009808, 0x9c0a8f6b},
187 {0x0000980c, 0x04800000},
188 {0x00009814, 0x9280c00a},
189 {0x00009818, 0x00000000},
190 {0x0000981c, 0x00020028},
191 {0x00009834, 0x5f3ca3de},
192 {0x00009838, 0x0108ecff},
193 {0x0000983c, 0x14750600},
194 {0x00009880, 0x201fff00},
195 {0x00009884, 0x00001042},
196 {0x000098a4, 0x00200400},
197 {0x000098b0, 0x52440bbe},
198 {0x000098d0, 0x004b6a8e},
199 {0x000098d4, 0x00000820},
200 {0x000098dc, 0x00000000},
201 {0x000098f0, 0x00000000},
202 {0x000098f4, 0x00000000},
203 {0x00009c04, 0x00000000},
204 {0x00009c08, 0x03200000},
205 {0x00009c0c, 0x00000000},
206 {0x00009c10, 0x00000000},
207 {0x00009c14, 0x00046384},
208 {0x00009c18, 0x05b6b440},
209 {0x00009c1c, 0x00b6b440},
210 {0x00009d00, 0xc080a333},
211 {0x00009d04, 0x40206c10},
212 {0x00009d08, 0x009c4060},
213 {0x00009d0c, 0x1883800a},
214 {0x00009d10, 0x01834061},
215 {0x00009d14, 0x00c00400},
216 {0x00009d18, 0x00000000},
217 {0x00009d1c, 0x00000000},
218 {0x00009e08, 0x0038233c},
219 {0x00009e24, 0x9927b515},
220 {0x00009e28, 0x12ef0200},
221 {0x00009e30, 0x06336f77},
222 {0x00009e34, 0x6af6532f},
223 {0x00009e38, 0x0cc80c00},
224 {0x00009e40, 0x0d261820},
225 {0x00009e4c, 0x00001004},
226 {0x00009e50, 0x00ff03f1},
227 {0x00009fc0, 0x80be4788},
228 {0x00009fc4, 0x0001efb5},
229 {0x00009fcc, 0x40000014},
230 {0x0000a20c, 0x00000000},
231 {0x0000a210, 0x00000000},
232 {0x0000a220, 0x00000000},
233 {0x0000a224, 0x00000000},
234 {0x0000a228, 0x10002310},
235 {0x0000a23c, 0x00000000},
236 {0x0000a244, 0x0c000000},
237 {0x0000a2a0, 0x00000001},
238 {0x0000a2c0, 0x00000001},
239 {0x0000a2c8, 0x00000000},
240 {0x0000a2cc, 0x18c43433},
241 {0x0000a2d4, 0x00000000},
242 {0x0000a2dc, 0x00000000},
243 {0x0000a2e0, 0x00000000},
244 {0x0000a2e4, 0x00000000},
245 {0x0000a2e8, 0x00000000},
246 {0x0000a2ec, 0x00000000},
247 {0x0000a2f0, 0x00000000},
248 {0x0000a2f4, 0x00000000},
249 {0x0000a2f8, 0x00000000},
250 {0x0000a344, 0x00000000},
251 {0x0000a34c, 0x00000000},
252 {0x0000a350, 0x0000a000},
253 {0x0000a364, 0x00000000},
254 {0x0000a370, 0x00000000},
255 {0x0000a390, 0x00000001},
256 {0x0000a394, 0x00000444},
257 {0x0000a398, 0x001f0e0f},
258 {0x0000a39c, 0x0075393f},
259 {0x0000a3a0, 0xb79f6427},
260 {0x0000a3a4, 0x000000ff},
261 {0x0000a3a8, 0x3b3b3b3b},
262 {0x0000a3ac, 0x2f2f2f2f},
263 {0x0000a3c0, 0x20202020},
264 {0x0000a3c4, 0x22222220},
265 {0x0000a3c8, 0x20200020},
266 {0x0000a3cc, 0x20202020},
267 {0x0000a3d0, 0x20202020},
268 {0x0000a3d4, 0x20202020},
269 {0x0000a3d8, 0x20202020},
270 {0x0000a3dc, 0x20202020},
271 {0x0000a3e0, 0x20202020},
272 {0x0000a3e4, 0x20202020},
273 {0x0000a3e8, 0x20202020},
274 {0x0000a3ec, 0x20202020},
275 {0x0000a3f0, 0x00000000},
276 {0x0000a3f4, 0x00000006},
277 {0x0000a3f8, 0x0cdbd380},
278 {0x0000a3fc, 0x000f0f01},
279 {0x0000a400, 0x8fa91f01},
280 {0x0000a404, 0x00000000},
281 {0x0000a408, 0x0e79e5c6},
282 {0x0000a40c, 0x00820820},
283 {0x0000a414, 0x1ce739cf},
284 {0x0000a418, 0x2d0019ce},
285 {0x0000a41c, 0x1ce739ce},
286 {0x0000a420, 0x000001ce},
287 {0x0000a424, 0x1ce739ce},
288 {0x0000a428, 0x000001ce},
289 {0x0000a42c, 0x1ce739ce},
290 {0x0000a430, 0x1ce739ce},
291 {0x0000a434, 0x00000000},
292 {0x0000a438, 0x00001801},
293 {0x0000a43c, 0x00000000},
294 {0x0000a440, 0x00000000},
295 {0x0000a444, 0x00000000},
296 {0x0000a448, 0x04000000},
297 {0x0000a44c, 0x00000001},
298 {0x0000a450, 0x00010000},
299 {0x0000a5c4, 0xbfad9d74},
300 {0x0000a5c8, 0x0048060a},
301 {0x0000a5cc, 0x00000637},
302 {0x0000a760, 0x03020100},
303 {0x0000a764, 0x09080504},
304 {0x0000a768, 0x0d0c0b0a},
305 {0x0000a76c, 0x13121110},
306 {0x0000a770, 0x31301514},
307 {0x0000a774, 0x35343332},
308 {0x0000a778, 0x00000036},
309 {0x0000a780, 0x00000838},
310 {0x0000a7c0, 0x00000000},
311 {0x0000a7c4, 0xfffffffc},
312 {0x0000a7c8, 0x00000000},
313 {0x0000a7cc, 0x00000000},
314 {0x0000a7d0, 0x00000000},
315 {0x0000a7d4, 0x00000004},
316 {0x0000a7dc, 0x00000000},
317};
318 24
319static const u32 ar9485Common_1_1[][2] = { 25static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
320 /* Addr allmodes */ 26 /* Addr allmodes */
321 {0x00007010, 0x00000022}, 27 {0x00018c00, 0x18012e5e},
322 {0x00007020, 0x00000000}, 28 {0x00018c04, 0x000801d8},
323 {0x00007034, 0x00000002}, 29 {0x00018c08, 0x0000080c},
324 {0x00007038, 0x000004c2},
325}; 30};
326 31
327static const u32 ar9485_1_1_baseband_postamble[][5] = { 32static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
328 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 33 /* Addr allmodes */
329 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005}, 34 {0x0000a000, 0x00060005},
330 {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e}, 35 {0x0000a004, 0x00810080},
331 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0}, 36 {0x0000a008, 0x00830082},
332 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881}, 37 {0x0000a00c, 0x00850084},
333 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4}, 38 {0x0000a010, 0x01820181},
334 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c}, 39 {0x0000a014, 0x01840183},
335 {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044}, 40 {0x0000a018, 0x01880185},
336 {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0}, 41 {0x0000a01c, 0x018a0189},
337 {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020}, 42 {0x0000a020, 0x02850284},
338 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2}, 43 {0x0000a024, 0x02890288},
339 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e}, 44 {0x0000a028, 0x028b028a},
340 {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e}, 45 {0x0000a02c, 0x03850384},
341 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 46 {0x0000a030, 0x03890388},
342 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 47 {0x0000a034, 0x038b038a},
343 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 48 {0x0000a038, 0x038d038c},
344 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 49 {0x0000a03c, 0x03910390},
345 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222}, 50 {0x0000a040, 0x03930392},
346 {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324}, 51 {0x0000a044, 0x03950394},
347 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010}, 52 {0x0000a048, 0x00000396},
348 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 53 {0x0000a04c, 0x00000000},
349 {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0}, 54 {0x0000a050, 0x00000000},
350 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004}, 55 {0x0000a054, 0x00000000},
351 {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b}, 56 {0x0000a058, 0x00000000},
352 {0x0000a234, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff}, 57 {0x0000a05c, 0x00000000},
353 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018}, 58 {0x0000a060, 0x00000000},
354 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108}, 59 {0x0000a064, 0x00000000},
355 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898}, 60 {0x0000a068, 0x00000000},
356 {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002}, 61 {0x0000a06c, 0x00000000},
357 {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e}, 62 {0x0000a070, 0x00000000},
358 {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501}, 63 {0x0000a074, 0x00000000},
359 {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e}, 64 {0x0000a078, 0x00000000},
360 {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b}, 65 {0x0000a07c, 0x00000000},
361 {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0}, 66 {0x0000a080, 0x28282828},
362 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 67 {0x0000a084, 0x28282828},
363 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 68 {0x0000a088, 0x28282828},
364 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, 69 {0x0000a08c, 0x28282828},
365 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982}, 70 {0x0000a090, 0x28282828},
366 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a}, 71 {0x0000a094, 0x24242428},
367 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 72 {0x0000a098, 0x171e1e1e},
368 {0x0000be04, 0x00802020, 0x00802020, 0x00802020, 0x00802020}, 73 {0x0000a09c, 0x02020b0b},
369 {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 74 {0x0000a0a0, 0x02020202},
75 {0x0000a0a4, 0x00000000},
76 {0x0000a0a8, 0x00000000},
77 {0x0000a0ac, 0x00000000},
78 {0x0000a0b0, 0x00000000},
79 {0x0000a0b4, 0x00000000},
80 {0x0000a0b8, 0x00000000},
81 {0x0000a0bc, 0x00000000},
82 {0x0000a0c0, 0x22072208},
83 {0x0000a0c4, 0x22052206},
84 {0x0000a0c8, 0x22032204},
85 {0x0000a0cc, 0x22012202},
86 {0x0000a0d0, 0x221f2200},
87 {0x0000a0d4, 0x221d221e},
88 {0x0000a0d8, 0x33023303},
89 {0x0000a0dc, 0x33003301},
90 {0x0000a0e0, 0x331e331f},
91 {0x0000a0e4, 0x4402331d},
92 {0x0000a0e8, 0x44004401},
93 {0x0000a0ec, 0x441e441f},
94 {0x0000a0f0, 0x55025503},
95 {0x0000a0f4, 0x55005501},
96 {0x0000a0f8, 0x551e551f},
97 {0x0000a0fc, 0x6602551d},
98 {0x0000a100, 0x66006601},
99 {0x0000a104, 0x661e661f},
100 {0x0000a108, 0x7703661d},
101 {0x0000a10c, 0x77017702},
102 {0x0000a110, 0x00007700},
103 {0x0000a114, 0x00000000},
104 {0x0000a118, 0x00000000},
105 {0x0000a11c, 0x00000000},
106 {0x0000a120, 0x00000000},
107 {0x0000a124, 0x00000000},
108 {0x0000a128, 0x00000000},
109 {0x0000a12c, 0x00000000},
110 {0x0000a130, 0x00000000},
111 {0x0000a134, 0x00000000},
112 {0x0000a138, 0x00000000},
113 {0x0000a13c, 0x00000000},
114 {0x0000a140, 0x001f0000},
115 {0x0000a144, 0x111f1100},
116 {0x0000a148, 0x111d111e},
117 {0x0000a14c, 0x111b111c},
118 {0x0000a150, 0x22032204},
119 {0x0000a154, 0x22012202},
120 {0x0000a158, 0x221f2200},
121 {0x0000a15c, 0x221d221e},
122 {0x0000a160, 0x33013302},
123 {0x0000a164, 0x331f3300},
124 {0x0000a168, 0x4402331e},
125 {0x0000a16c, 0x44004401},
126 {0x0000a170, 0x441e441f},
127 {0x0000a174, 0x55015502},
128 {0x0000a178, 0x551f5500},
129 {0x0000a17c, 0x6602551e},
130 {0x0000a180, 0x66006601},
131 {0x0000a184, 0x661e661f},
132 {0x0000a188, 0x7703661d},
133 {0x0000a18c, 0x77017702},
134 {0x0000a190, 0x00007700},
135 {0x0000a194, 0x00000000},
136 {0x0000a198, 0x00000000},
137 {0x0000a19c, 0x00000000},
138 {0x0000a1a0, 0x00000000},
139 {0x0000a1a4, 0x00000000},
140 {0x0000a1a8, 0x00000000},
141 {0x0000a1ac, 0x00000000},
142 {0x0000a1b0, 0x00000000},
143 {0x0000a1b4, 0x00000000},
144 {0x0000a1b8, 0x00000000},
145 {0x0000a1bc, 0x00000000},
146 {0x0000a1c0, 0x00000000},
147 {0x0000a1c4, 0x00000000},
148 {0x0000a1c8, 0x00000000},
149 {0x0000a1cc, 0x00000000},
150 {0x0000a1d0, 0x00000000},
151 {0x0000a1d4, 0x00000000},
152 {0x0000a1d8, 0x00000000},
153 {0x0000a1dc, 0x00000000},
154 {0x0000a1e0, 0x00000000},
155 {0x0000a1e4, 0x00000000},
156 {0x0000a1e8, 0x00000000},
157 {0x0000a1ec, 0x00000000},
158 {0x0000a1f0, 0x00000396},
159 {0x0000a1f4, 0x00000396},
160 {0x0000a1f8, 0x00000396},
161 {0x0000a1fc, 0x00000296},
370}; 162};
371 163
372static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = { 164static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
373 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 165 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
374 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002}, 166 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
375 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8}, 167 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
376 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 168 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -442,102 +234,34 @@ static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
442 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260}, 234 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
443}; 235};
444 236
445static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = { 237#define ar9485Modes_high_ob_db_tx_gain_1_1 ar9485Modes_high_power_tx_gain_1_1
446 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
447 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
448 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
449 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
450 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
451 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
452 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
453 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
454 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
455 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
456 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
457 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
458 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
459 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
460 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
461 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
462 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
463 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
464 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
465 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
466 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
467 {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
468 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
469 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
470 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
471 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
472 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
473 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
474 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
475 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
476 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
477 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
478 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
479 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
480 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
481 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
482 {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
483 {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
484 {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
485 {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
486 {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
487 {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
488 {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
489 {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
490 {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
491 {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
492 {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
493 {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
494 {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
495 {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
496 {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
497 {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
498 {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
499 {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
500 {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
501 {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
502 {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
503 {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
504 {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
505 {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
506 {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
507 {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
508 {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
509 {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
510 {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
511 {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
512 {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
513 {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
514 {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
515 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
516};
517 238
518static const u32 ar9485_1_1_radio_postamble[][2] = { 239#define ar9485Modes_low_ob_db_tx_gain_1_1 ar9485Modes_high_ob_db_tx_gain_1_1
519 /* Addr allmodes */
520 {0x0001609c, 0x0b283f31},
521 {0x000160ac, 0x24611800},
522 {0x000160b0, 0x03284f3e},
523 {0x0001610c, 0x00170000},
524 {0x00016140, 0x50804008},
525};
526 240
527static const u32 ar9485_1_1_mac_postamble[][5] = { 241#define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1
528 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 242
529 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160}, 243static const u32 ar9485_1_1[][2] = {
530 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c}, 244 /* Addr allmodes */
531 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38}, 245 {0x0000a580, 0x00000000},
532 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00}, 246 {0x0000a584, 0x00000000},
533 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b}, 247 {0x0000a588, 0x00000000},
534 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810}, 248 {0x0000a58c, 0x00000000},
535 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a}, 249 {0x0000a590, 0x00000000},
536 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440}, 250 {0x0000a594, 0x00000000},
251 {0x0000a598, 0x00000000},
252 {0x0000a59c, 0x00000000},
253 {0x0000a5a0, 0x00000000},
254 {0x0000a5a4, 0x00000000},
255 {0x0000a5a8, 0x00000000},
256 {0x0000a5ac, 0x00000000},
257 {0x0000a5b0, 0x00000000},
258 {0x0000a5b4, 0x00000000},
259 {0x0000a5b8, 0x00000000},
260 {0x0000a5bc, 0x00000000},
537}; 261};
538 262
539static const u32 ar9485_1_1_radio_core[][2] = { 263static const u32 ar9485_1_1_radio_core[][2] = {
540 /* Addr allmodes */ 264 /* Addr allmodes */
541 {0x00016000, 0x36db6db6}, 265 {0x00016000, 0x36db6db6},
542 {0x00016004, 0x6db6db40}, 266 {0x00016004, 0x6db6db40},
543 {0x00016008, 0x73800000}, 267 {0x00016008, 0x73800000},
@@ -601,294 +325,145 @@ static const u32 ar9485_1_1_radio_core[][2] = {
601 {0x00016c44, 0x12000000}, 325 {0x00016c44, 0x12000000},
602}; 326};
603 327
604static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = { 328static const u32 ar9485_1_1_baseband_core[][2] = {
605 /* Addr allmodes */ 329 /* Addr allmodes */
606 {0x00018c00, 0x18052e5e}, 330 {0x00009800, 0xafe68e30},
607 {0x00018c04, 0x000801d8}, 331 {0x00009804, 0xfd14e000},
608 {0x00018c08, 0x0000080c}, 332 {0x00009808, 0x9c0a8f6b},
609}; 333 {0x0000980c, 0x04800000},
610 334 {0x00009814, 0x9280c00a},
611static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = { 335 {0x00009818, 0x00000000},
612 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 336 {0x0000981c, 0x00020028},
613 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002}, 337 {0x00009834, 0x5f3ca3de},
614 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8}, 338 {0x00009838, 0x0108ecff},
615 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 339 {0x0000983c, 0x14750600},
616 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000}, 340 {0x00009880, 0x201fff00},
617 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002}, 341 {0x00009884, 0x00001042},
618 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004}, 342 {0x000098a4, 0x00200400},
619 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200}, 343 {0x000098b0, 0x52440bbe},
620 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202}, 344 {0x000098d0, 0x004b6a8e},
621 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400}, 345 {0x000098d4, 0x00000820},
622 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402}, 346 {0x000098dc, 0x00000000},
623 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404}, 347 {0x000098f0, 0x00000000},
624 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603}, 348 {0x000098f4, 0x00000000},
625 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605}, 349 {0x00009c04, 0x00000000},
626 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03}, 350 {0x00009c08, 0x03200000},
627 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04}, 351 {0x00009c0c, 0x00000000},
628 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20}, 352 {0x00009c10, 0x00000000},
629 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21}, 353 {0x00009c14, 0x00046384},
630 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62}, 354 {0x00009c18, 0x05b6b440},
631 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63}, 355 {0x00009c1c, 0x00b6b440},
632 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65}, 356 {0x00009d00, 0xc080a333},
633 {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66}, 357 {0x00009d04, 0x40206c10},
634 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645}, 358 {0x00009d08, 0x009c4060},
635 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865}, 359 {0x00009d0c, 0x1883800a},
636 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86}, 360 {0x00009d10, 0x01834061},
637 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9}, 361 {0x00009d14, 0x00c00400},
638 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb}, 362 {0x00009d18, 0x00000000},
639 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb}, 363 {0x00009d1c, 0x00000000},
640 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb}, 364 {0x00009e08, 0x0038233c},
641 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb}, 365 {0x00009e24, 0x9927b515},
642 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 366 {0x00009e28, 0x12ef0200},
643 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 367 {0x00009e30, 0x06336f77},
644 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 368 {0x00009e34, 0x6af6532f},
645 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 369 {0x00009e38, 0x0cc80c00},
646 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 370 {0x00009e40, 0x0d261820},
647 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 371 {0x00009e4c, 0x00001004},
648 {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 372 {0x00009e50, 0x00ff03f1},
649 {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 373 {0x00009fc0, 0x80be4788},
650 {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 374 {0x00009fc4, 0x0001efb5},
651 {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 375 {0x00009fcc, 0x40000014},
652 {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 376 {0x0000a20c, 0x00000000},
653 {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 377 {0x0000a210, 0x00000000},
654 {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 378 {0x0000a220, 0x00000000},
655 {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 379 {0x0000a224, 0x00000000},
656 {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 380 {0x0000a228, 0x10002310},
657 {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 381 {0x0000a23c, 0x00000000},
658 {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 382 {0x0000a244, 0x0c000000},
659 {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 383 {0x0000a2a0, 0x00000001},
660 {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 384 {0x0000a2c0, 0x00000001},
661 {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 385 {0x0000a2c8, 0x00000000},
662 {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 386 {0x0000a2cc, 0x18c43433},
663 {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 387 {0x0000a2d4, 0x00000000},
664 {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 388 {0x0000a2dc, 0x00000000},
665 {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 389 {0x0000a2e0, 0x00000000},
666 {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 390 {0x0000a2e4, 0x00000000},
667 {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 391 {0x0000a2e8, 0x00000000},
668 {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 392 {0x0000a2ec, 0x00000000},
669 {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 393 {0x0000a2f0, 0x00000000},
670 {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 394 {0x0000a2f4, 0x00000000},
671 {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 395 {0x0000a2f8, 0x00000000},
672 {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 396 {0x0000a344, 0x00000000},
673 {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 397 {0x0000a34c, 0x00000000},
674 {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 398 {0x0000a350, 0x0000a000},
675 {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 399 {0x0000a364, 0x00000000},
676 {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 400 {0x0000a370, 0x00000000},
677 {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 401 {0x0000a390, 0x00000001},
678 {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 402 {0x0000a394, 0x00000444},
679 {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 403 {0x0000a398, 0x001f0e0f},
680 {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db}, 404 {0x0000a39c, 0x0075393f},
681 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260}, 405 {0x0000a3a0, 0xb79f6427},
682}; 406 {0x0000a3a4, 0x000000ff},
683 407 {0x0000a3a8, 0x3b3b3b3b},
684static const u32 ar9485_1_1[][2] = { 408 {0x0000a3ac, 0x2f2f2f2f},
685 /* Addr allmodes */ 409 {0x0000a3c0, 0x20202020},
686 {0x0000a580, 0x00000000}, 410 {0x0000a3c4, 0x22222220},
687 {0x0000a584, 0x00000000}, 411 {0x0000a3c8, 0x20200020},
688 {0x0000a588, 0x00000000}, 412 {0x0000a3cc, 0x20202020},
689 {0x0000a58c, 0x00000000}, 413 {0x0000a3d0, 0x20202020},
690 {0x0000a590, 0x00000000}, 414 {0x0000a3d4, 0x20202020},
691 {0x0000a594, 0x00000000}, 415 {0x0000a3d8, 0x20202020},
692 {0x0000a598, 0x00000000}, 416 {0x0000a3dc, 0x20202020},
693 {0x0000a59c, 0x00000000}, 417 {0x0000a3e0, 0x20202020},
694 {0x0000a5a0, 0x00000000}, 418 {0x0000a3e4, 0x20202020},
695 {0x0000a5a4, 0x00000000}, 419 {0x0000a3e8, 0x20202020},
696 {0x0000a5a8, 0x00000000}, 420 {0x0000a3ec, 0x20202020},
697 {0x0000a5ac, 0x00000000}, 421 {0x0000a3f0, 0x00000000},
698 {0x0000a5b0, 0x00000000}, 422 {0x0000a3f4, 0x00000006},
699 {0x0000a5b4, 0x00000000}, 423 {0x0000a3f8, 0x0cdbd380},
700 {0x0000a5b8, 0x00000000}, 424 {0x0000a3fc, 0x000f0f01},
701 {0x0000a5bc, 0x00000000}, 425 {0x0000a400, 0x8fa91f01},
702}; 426 {0x0000a404, 0x00000000},
703 427 {0x0000a408, 0x0e79e5c6},
704static const u32 ar9485_modes_green_ob_db_tx_gain_1_1[][5] = { 428 {0x0000a40c, 0x00820820},
705 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 429 {0x0000a414, 0x1ce739cf},
706 {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003}, 430 {0x0000a418, 0x2d0019ce},
707 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8}, 431 {0x0000a41c, 0x1ce739ce},
708 {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000}, 432 {0x0000a420, 0x000001ce},
709 {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006}, 433 {0x0000a424, 0x1ce739ce},
710 {0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201}, 434 {0x0000a428, 0x000001ce},
711 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x06000203, 0x06000203}, 435 {0x0000a42c, 0x1ce739ce},
712 {0x0000a50c, 0x11062202, 0x11062202, 0x0a000401, 0x0a000401}, 436 {0x0000a430, 0x1ce739ce},
713 {0x0000a510, 0x17022e00, 0x17022e00, 0x0e000403, 0x0e000403}, 437 {0x0000a434, 0x00000000},
714 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x12000405, 0x12000405}, 438 {0x0000a438, 0x00001801},
715 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x15000604, 0x15000604}, 439 {0x0000a43c, 0x00000000},
716 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x18000605, 0x18000605}, 440 {0x0000a440, 0x00000000},
717 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x1c000a04, 0x1c000a04}, 441 {0x0000a444, 0x00000000},
718 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x21000a06, 0x21000a06}, 442 {0x0000a448, 0x04000000},
719 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x29000a24, 0x29000a24}, 443 {0x0000a44c, 0x00000001},
720 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2f000e21, 0x2f000e21}, 444 {0x0000a450, 0x00010000},
721 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000e20, 0x31000e20}, 445 {0x0000a5c4, 0xbfad9d74},
722 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x33000e20, 0x33000e20}, 446 {0x0000a5c8, 0x0048060a},
723 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62}, 447 {0x0000a5cc, 0x00000637},
724 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63}, 448 {0x0000a760, 0x03020100},
725 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65}, 449 {0x0000a764, 0x09080504},
726 {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66}, 450 {0x0000a768, 0x0d0c0b0a},
727 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645}, 451 {0x0000a76c, 0x13121110},
728 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865}, 452 {0x0000a770, 0x31301514},
729 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86}, 453 {0x0000a774, 0x35343332},
730 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9}, 454 {0x0000a778, 0x00000036},
731 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb}, 455 {0x0000a780, 0x00000838},
732 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb}, 456 {0x0000a7c0, 0x00000000},
733 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb}, 457 {0x0000a7c4, 0xfffffffc},
734 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb}, 458 {0x0000a7c8, 0x00000000},
735 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 459 {0x0000a7cc, 0x00000000},
736 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 460 {0x0000a7d0, 0x00000000},
737 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 461 {0x0000a7d4, 0x00000004},
738 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 462 {0x0000a7dc, 0x00000000},
739 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
740 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
741 {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
742 {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
743 {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
744 {0x0000b50c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
745 {0x0000b510, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
746 {0x0000b514, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
747 {0x0000b518, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
748 {0x0000b51c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
749 {0x0000b520, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
750 {0x0000b524, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
751 {0x0000b528, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
752 {0x0000b52c, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a},
753 {0x0000b530, 0x0000003a, 0x0000003a, 0x0000003a, 0x0000003a},
754 {0x0000b534, 0x0000004a, 0x0000004a, 0x0000004a, 0x0000004a},
755 {0x0000b538, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
756 {0x0000b53c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
757 {0x0000b540, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
758 {0x0000b544, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
759 {0x0000b548, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
760 {0x0000b54c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
761 {0x0000b550, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
762 {0x0000b554, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
763 {0x0000b558, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
764 {0x0000b55c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
765 {0x0000b560, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
766 {0x0000b564, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
767 {0x0000b568, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
768 {0x0000b56c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
769 {0x0000b570, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
770 {0x0000b574, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
771 {0x0000b578, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
772 {0x0000b57c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
773 {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
774 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
775};
776
777static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
778 /* Addr allmodes */
779 {0x00018c00, 0x18013e5e},
780 {0x00018c04, 0x000801d8},
781 {0x00018c08, 0x0000080c},
782};
783
784static const u32 ar9485_1_1_soc_preamble[][2] = {
785 /* Addr allmodes */
786 {0x00004014, 0xba280400},
787 {0x00004090, 0x00aa10aa},
788 {0x000040a4, 0x00a0c9c9},
789 {0x00007010, 0x00000022},
790 {0x00007020, 0x00000000},
791 {0x00007034, 0x00000002},
792 {0x00007038, 0x000004c2},
793 {0x00007048, 0x00000002},
794};
795
796static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = {
797 /* Addr allmodes */
798 {0x0000a398, 0x00000000},
799 {0x0000a39c, 0x6f7f0301},
800 {0x0000a3a0, 0xca9228ee},
801};
802
803static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
804 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
805 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
806 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
807 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
808 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
809 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
810 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
811 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
812 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
813 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
814 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
815 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
816 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
817 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
818 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
819 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
820 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
821 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
822 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
823 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
824 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
825 {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
826 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
827 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
828 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
829 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
830 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
831 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
832 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
833 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
834 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
835 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
836 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
837 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
838 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
839 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
840 {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
841 {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
842 {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
843 {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
844 {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
845 {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
846 {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
847 {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
848 {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
849 {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
850 {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
851 {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
852 {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
853 {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
854 {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
855 {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
856 {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
857 {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
858 {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
859 {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
860 {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
861 {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
862 {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
863 {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
864 {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
865 {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
866 {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
867 {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
868 {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
869 {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
870 {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
871 {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
872 {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
873 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
874};
875
876static const u32 ar9485_fast_clock_1_1_baseband_postamble[][3] = {
877 /* Addr 5G_HT2 5G_HT40 */
878 {0x00009e00, 0x03721821, 0x03721821},
879 {0x0000a230, 0x0000400b, 0x00004016},
880 {0x0000a254, 0x00000898, 0x00001130},
881};
882
883static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
884 /* Addr allmodes */
885 {0x00018c00, 0x18012e5e},
886 {0x00018c04, 0x000801d8},
887 {0x00018c08, 0x0000080c},
888}; 463};
889 464
890static const u32 ar9485_common_rx_gain_1_1[][2] = { 465static const u32 ar9485_common_rx_gain_1_1[][2] = {
891 /* Addr allmodes */ 466 /* Addr allmodes */
892 {0x0000a000, 0x00010000}, 467 {0x0000a000, 0x00010000},
893 {0x0000a004, 0x00030002}, 468 {0x0000a004, 0x00030002},
894 {0x0000a008, 0x00050004}, 469 {0x0000a008, 0x00050004},
@@ -1019,143 +594,260 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = {
1019 {0x0000a1fc, 0x00000296}, 594 {0x0000a1fc, 0x00000296},
1020}; 595};
1021 596
597static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = {
598 /* Addr allmodes */
599 {0x00018c00, 0x18052e5e},
600 {0x00018c04, 0x000801d8},
601 {0x00018c08, 0x0000080c},
602};
603
1022static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = { 604static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = {
1023 /* Addr allmodes */ 605 /* Addr allmodes */
1024 {0x00018c00, 0x18053e5e}, 606 {0x00018c00, 0x18053e5e},
1025 {0x00018c04, 0x000801d8}, 607 {0x00018c04, 0x000801d8},
1026 {0x00018c08, 0x0000080c}, 608 {0x00018c08, 0x0000080c},
1027}; 609};
1028 610
1029static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = { 611static const u32 ar9485_1_1_soc_preamble[][2] = {
1030 /* Addr allmodes */ 612 /* Addr allmodes */
1031 {0x0000a000, 0x00060005}, 613 {0x00004014, 0xba280400},
1032 {0x0000a004, 0x00810080}, 614 {0x00004090, 0x00aa10aa},
1033 {0x0000a008, 0x00830082}, 615 {0x000040a4, 0x00a0c9c9},
1034 {0x0000a00c, 0x00850084}, 616 {0x00007010, 0x00000022},
1035 {0x0000a010, 0x01820181}, 617 {0x00007020, 0x00000000},
1036 {0x0000a014, 0x01840183}, 618 {0x00007034, 0x00000002},
1037 {0x0000a018, 0x01880185}, 619 {0x00007038, 0x000004c2},
1038 {0x0000a01c, 0x018a0189}, 620 {0x00007048, 0x00000002},
1039 {0x0000a020, 0x02850284}, 621};
1040 {0x0000a024, 0x02890288}, 622
1041 {0x0000a028, 0x028b028a}, 623static const u32 ar9485_fast_clock_1_1_baseband_postamble[][3] = {
1042 {0x0000a02c, 0x03850384}, 624 /* Addr 5G_HT20 5G_HT40 */
1043 {0x0000a030, 0x03890388}, 625 {0x00009e00, 0x03721821, 0x03721821},
1044 {0x0000a034, 0x038b038a}, 626 {0x0000a230, 0x0000400b, 0x00004016},
1045 {0x0000a038, 0x038d038c}, 627 {0x0000a254, 0x00000898, 0x00001130},
1046 {0x0000a03c, 0x03910390}, 628};
1047 {0x0000a040, 0x03930392}, 629
1048 {0x0000a044, 0x03950394}, 630static const u32 ar9485_1_1_baseband_postamble[][5] = {
1049 {0x0000a048, 0x00000396}, 631 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1050 {0x0000a04c, 0x00000000}, 632 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
1051 {0x0000a050, 0x00000000}, 633 {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
1052 {0x0000a054, 0x00000000}, 634 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
1053 {0x0000a058, 0x00000000}, 635 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
1054 {0x0000a05c, 0x00000000}, 636 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
1055 {0x0000a060, 0x00000000}, 637 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
1056 {0x0000a064, 0x00000000}, 638 {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
1057 {0x0000a068, 0x00000000}, 639 {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
1058 {0x0000a06c, 0x00000000}, 640 {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
1059 {0x0000a070, 0x00000000}, 641 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
1060 {0x0000a074, 0x00000000}, 642 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
1061 {0x0000a078, 0x00000000}, 643 {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
1062 {0x0000a07c, 0x00000000}, 644 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1063 {0x0000a080, 0x28282828}, 645 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
1064 {0x0000a084, 0x28282828}, 646 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
1065 {0x0000a088, 0x28282828}, 647 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
1066 {0x0000a08c, 0x28282828}, 648 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
1067 {0x0000a090, 0x28282828}, 649 {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
1068 {0x0000a094, 0x24242428}, 650 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
1069 {0x0000a098, 0x171e1e1e}, 651 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
1070 {0x0000a09c, 0x02020b0b}, 652 {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
1071 {0x0000a0a0, 0x02020202}, 653 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
1072 {0x0000a0a4, 0x00000000}, 654 {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
1073 {0x0000a0a8, 0x00000000}, 655 {0x0000a234, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff},
1074 {0x0000a0ac, 0x00000000}, 656 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
1075 {0x0000a0b0, 0x00000000}, 657 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
1076 {0x0000a0b4, 0x00000000}, 658 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
1077 {0x0000a0b8, 0x00000000}, 659 {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
1078 {0x0000a0bc, 0x00000000}, 660 {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
1079 {0x0000a0c0, 0x22072208}, 661 {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
1080 {0x0000a0c4, 0x22052206}, 662 {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
1081 {0x0000a0c8, 0x22032204}, 663 {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
1082 {0x0000a0cc, 0x22012202}, 664 {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
1083 {0x0000a0d0, 0x221f2200}, 665 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1084 {0x0000a0d4, 0x221d221e}, 666 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1085 {0x0000a0d8, 0x33023303}, 667 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
1086 {0x0000a0dc, 0x33003301}, 668 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
1087 {0x0000a0e0, 0x331e331f}, 669 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
1088 {0x0000a0e4, 0x4402331d}, 670 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1089 {0x0000a0e8, 0x44004401}, 671 {0x0000be04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
1090 {0x0000a0ec, 0x441e441f}, 672 {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1091 {0x0000a0f0, 0x55025503}, 673};
1092 {0x0000a0f4, 0x55005501}, 674
1093 {0x0000a0f8, 0x551e551f}, 675static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
1094 {0x0000a0fc, 0x6602551d}, 676 /* Addr allmodes */
1095 {0x0000a100, 0x66006601}, 677 {0x00018c00, 0x18013e5e},
1096 {0x0000a104, 0x661e661f}, 678 {0x00018c04, 0x000801d8},
1097 {0x0000a108, 0x7703661d}, 679 {0x00018c08, 0x0000080c},
1098 {0x0000a10c, 0x77017702}, 680};
1099 {0x0000a110, 0x00007700}, 681
1100 {0x0000a114, 0x00000000}, 682static const u32 ar9485_1_1_radio_postamble[][2] = {
1101 {0x0000a118, 0x00000000}, 683 /* Addr allmodes */
1102 {0x0000a11c, 0x00000000}, 684 {0x0001609c, 0x0b283f31},
1103 {0x0000a120, 0x00000000}, 685 {0x000160ac, 0x24611800},
1104 {0x0000a124, 0x00000000}, 686 {0x000160b0, 0x03284f3e},
1105 {0x0000a128, 0x00000000}, 687 {0x0001610c, 0x00170000},
1106 {0x0000a12c, 0x00000000}, 688 {0x00016140, 0x50804008},
1107 {0x0000a130, 0x00000000}, 689};
1108 {0x0000a134, 0x00000000}, 690
1109 {0x0000a138, 0x00000000}, 691static const u32 ar9485_1_1_mac_core[][2] = {
1110 {0x0000a13c, 0x00000000}, 692 /* Addr allmodes */
1111 {0x0000a140, 0x001f0000}, 693 {0x00000008, 0x00000000},
1112 {0x0000a144, 0x111f1100}, 694 {0x00000030, 0x00020085},
1113 {0x0000a148, 0x111d111e}, 695 {0x00000034, 0x00000005},
1114 {0x0000a14c, 0x111b111c}, 696 {0x00000040, 0x00000000},
1115 {0x0000a150, 0x22032204}, 697 {0x00000044, 0x00000000},
1116 {0x0000a154, 0x22012202}, 698 {0x00000048, 0x00000008},
1117 {0x0000a158, 0x221f2200}, 699 {0x0000004c, 0x00000010},
1118 {0x0000a15c, 0x221d221e}, 700 {0x00000050, 0x00000000},
1119 {0x0000a160, 0x33013302}, 701 {0x00001040, 0x002ffc0f},
1120 {0x0000a164, 0x331f3300}, 702 {0x00001044, 0x002ffc0f},
1121 {0x0000a168, 0x4402331e}, 703 {0x00001048, 0x002ffc0f},
1122 {0x0000a16c, 0x44004401}, 704 {0x0000104c, 0x002ffc0f},
1123 {0x0000a170, 0x441e441f}, 705 {0x00001050, 0x002ffc0f},
1124 {0x0000a174, 0x55015502}, 706 {0x00001054, 0x002ffc0f},
1125 {0x0000a178, 0x551f5500}, 707 {0x00001058, 0x002ffc0f},
1126 {0x0000a17c, 0x6602551e}, 708 {0x0000105c, 0x002ffc0f},
1127 {0x0000a180, 0x66006601}, 709 {0x00001060, 0x002ffc0f},
1128 {0x0000a184, 0x661e661f}, 710 {0x00001064, 0x002ffc0f},
1129 {0x0000a188, 0x7703661d}, 711 {0x000010f0, 0x00000100},
1130 {0x0000a18c, 0x77017702}, 712 {0x00001270, 0x00000000},
1131 {0x0000a190, 0x00007700}, 713 {0x000012b0, 0x00000000},
1132 {0x0000a194, 0x00000000}, 714 {0x000012f0, 0x00000000},
1133 {0x0000a198, 0x00000000}, 715 {0x0000143c, 0x00000000},
1134 {0x0000a19c, 0x00000000}, 716 {0x0000147c, 0x00000000},
1135 {0x0000a1a0, 0x00000000}, 717 {0x00008000, 0x00000000},
1136 {0x0000a1a4, 0x00000000}, 718 {0x00008004, 0x00000000},
1137 {0x0000a1a8, 0x00000000}, 719 {0x00008008, 0x00000000},
1138 {0x0000a1ac, 0x00000000}, 720 {0x0000800c, 0x00000000},
1139 {0x0000a1b0, 0x00000000}, 721 {0x00008018, 0x00000000},
1140 {0x0000a1b4, 0x00000000}, 722 {0x00008020, 0x00000000},
1141 {0x0000a1b8, 0x00000000}, 723 {0x00008038, 0x00000000},
1142 {0x0000a1bc, 0x00000000}, 724 {0x0000803c, 0x00000000},
1143 {0x0000a1c0, 0x00000000}, 725 {0x00008040, 0x00000000},
1144 {0x0000a1c4, 0x00000000}, 726 {0x00008044, 0x00000000},
1145 {0x0000a1c8, 0x00000000}, 727 {0x00008048, 0x00000000},
1146 {0x0000a1cc, 0x00000000}, 728 {0x0000804c, 0xffffffff},
1147 {0x0000a1d0, 0x00000000}, 729 {0x00008054, 0x00000000},
1148 {0x0000a1d4, 0x00000000}, 730 {0x00008058, 0x00000000},
1149 {0x0000a1d8, 0x00000000}, 731 {0x0000805c, 0x000fc78f},
1150 {0x0000a1dc, 0x00000000}, 732 {0x00008060, 0x0000000f},
1151 {0x0000a1e0, 0x00000000}, 733 {0x00008064, 0x00000000},
1152 {0x0000a1e4, 0x00000000}, 734 {0x00008070, 0x00000310},
1153 {0x0000a1e8, 0x00000000}, 735 {0x00008074, 0x00000020},
1154 {0x0000a1ec, 0x00000000}, 736 {0x00008078, 0x00000000},
1155 {0x0000a1f0, 0x00000396}, 737 {0x0000809c, 0x0000000f},
1156 {0x0000a1f4, 0x00000396}, 738 {0x000080a0, 0x00000000},
1157 {0x0000a1f8, 0x00000396}, 739 {0x000080a4, 0x02ff0000},
1158 {0x0000a1fc, 0x00000296}, 740 {0x000080a8, 0x0e070605},
741 {0x000080ac, 0x0000000d},
742 {0x000080b0, 0x00000000},
743 {0x000080b4, 0x00000000},
744 {0x000080b8, 0x00000000},
745 {0x000080bc, 0x00000000},
746 {0x000080c0, 0x2a800000},
747 {0x000080c4, 0x06900168},
748 {0x000080c8, 0x13881c22},
749 {0x000080cc, 0x01f40000},
750 {0x000080d0, 0x00252500},
751 {0x000080d4, 0x00a00000},
752 {0x000080d8, 0x00400000},
753 {0x000080dc, 0x00000000},
754 {0x000080e0, 0xffffffff},
755 {0x000080e4, 0x0000ffff},
756 {0x000080e8, 0x3f3f3f3f},
757 {0x000080ec, 0x00000000},
758 {0x000080f0, 0x00000000},
759 {0x000080f4, 0x00000000},
760 {0x000080fc, 0x00020000},
761 {0x00008100, 0x00000000},
762 {0x00008108, 0x00000052},
763 {0x0000810c, 0x00000000},
764 {0x00008110, 0x00000000},
765 {0x00008114, 0x000007ff},
766 {0x00008118, 0x000000aa},
767 {0x0000811c, 0x00003210},
768 {0x00008124, 0x00000000},
769 {0x00008128, 0x00000000},
770 {0x0000812c, 0x00000000},
771 {0x00008130, 0x00000000},
772 {0x00008134, 0x00000000},
773 {0x00008138, 0x00000000},
774 {0x0000813c, 0x0000ffff},
775 {0x00008144, 0xffffffff},
776 {0x00008168, 0x00000000},
777 {0x0000816c, 0x00000000},
778 {0x00008170, 0x18486200},
779 {0x00008174, 0x33332210},
780 {0x00008178, 0x00000000},
781 {0x0000817c, 0x00020000},
782 {0x000081c0, 0x00000000},
783 {0x000081c4, 0x33332210},
784 {0x000081d4, 0x00000000},
785 {0x000081ec, 0x00000000},
786 {0x000081f0, 0x00000000},
787 {0x000081f4, 0x00000000},
788 {0x000081f8, 0x00000000},
789 {0x000081fc, 0x00000000},
790 {0x00008240, 0x00100000},
791 {0x00008244, 0x0010f400},
792 {0x00008248, 0x00000800},
793 {0x0000824c, 0x0001e800},
794 {0x00008250, 0x00000000},
795 {0x00008254, 0x00000000},
796 {0x00008258, 0x00000000},
797 {0x0000825c, 0x40000000},
798 {0x00008260, 0x00080922},
799 {0x00008264, 0x9ca00010},
800 {0x00008268, 0xffffffff},
801 {0x0000826c, 0x0000ffff},
802 {0x00008270, 0x00000000},
803 {0x00008274, 0x40000000},
804 {0x00008278, 0x003e4180},
805 {0x0000827c, 0x00000004},
806 {0x00008284, 0x0000002c},
807 {0x00008288, 0x0000002c},
808 {0x0000828c, 0x000000ff},
809 {0x00008294, 0x00000000},
810 {0x00008298, 0x00000000},
811 {0x0000829c, 0x00000000},
812 {0x00008300, 0x00000140},
813 {0x00008314, 0x00000000},
814 {0x0000831c, 0x0000010d},
815 {0x00008328, 0x00000000},
816 {0x0000832c, 0x00000007},
817 {0x00008330, 0x00000302},
818 {0x00008334, 0x00000700},
819 {0x00008338, 0x00ff0000},
820 {0x0000833c, 0x02400000},
821 {0x00008340, 0x000107ff},
822 {0x00008344, 0xa248105b},
823 {0x00008348, 0x008f0000},
824 {0x0000835c, 0x00000000},
825 {0x00008360, 0xffffffff},
826 {0x00008364, 0xffffffff},
827 {0x00008368, 0x00000000},
828 {0x00008370, 0x00000000},
829 {0x00008374, 0x000000ff},
830 {0x00008378, 0x00000000},
831 {0x0000837c, 0x00000000},
832 {0x00008380, 0xffffffff},
833 {0x00008384, 0xffffffff},
834 {0x00008390, 0xffffffff},
835 {0x00008394, 0xffffffff},
836 {0x00008398, 0x00000000},
837 {0x0000839c, 0x00000000},
838 {0x000083a0, 0x00000000},
839 {0x000083a4, 0x0000fa14},
840 {0x000083a8, 0x000f0c00},
841 {0x000083ac, 0x33332210},
842 {0x000083b0, 0x33332210},
843 {0x000083b4, 0x33332210},
844 {0x000083b8, 0x33332210},
845 {0x000083bc, 0x00000000},
846 {0x000083c0, 0x00000000},
847 {0x000083c4, 0x00000000},
848 {0x000083c8, 0x00000000},
849 {0x000083cc, 0x00000200},
850 {0x000083d0, 0x000301ff},
1159}; 851};
1160 852
1161#endif 853#endif /* INITVALS_9485_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index 06b3f0df9fa..6e1915aee71 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
3 * 4 *
4 * Permission to use, copy, modify, and/or distribute this software for any 5 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
@@ -19,18 +20,7 @@
19 20
20/* AR9580 1.0 */ 21/* AR9580 1.0 */
21 22
22static const u32 ar9580_1p0_modes_fast_clock[][3] = { 23#define ar9580_1p0_modes_fast_clock ar9300Modes_fast_clock_2p2
23 /* Addr 5G_HT20 5G_HT40 */
24 {0x00001030, 0x00000268, 0x000004d0},
25 {0x00001070, 0x0000018c, 0x00000318},
26 {0x000010b0, 0x00000fd0, 0x00001fa0},
27 {0x00008014, 0x044c044c, 0x08980898},
28 {0x0000801c, 0x148ec02b, 0x148ec057},
29 {0x00008318, 0x000044c0, 0x00008980},
30 {0x00009e00, 0x0372131c, 0x0372131c},
31 {0x0000a230, 0x0000000b, 0x00000016},
32 {0x0000a254, 0x00000898, 0x00001130},
33};
34 24
35static const u32 ar9580_1p0_radio_postamble[][5] = { 25static const u32 ar9580_1p0_radio_postamble[][5] = {
36 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 26 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
@@ -208,17 +198,7 @@ static const u32 ar9580_1p0_baseband_core[][2] = {
208 {0x0000c420, 0x00000000}, 198 {0x0000c420, 0x00000000},
209}; 199};
210 200
211static const u32 ar9580_1p0_mac_postamble[][5] = { 201#define ar9580_1p0_mac_postamble ar9300_2p2_mac_postamble
212 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
213 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
214 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
215 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
216 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
217 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
218 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
219 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
220 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
221};
222 202
223static const u32 ar9580_1p0_low_ob_db_tx_gain_table[][5] = { 203static const u32 ar9580_1p0_low_ob_db_tx_gain_table[][5] = {
224 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 204 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
@@ -326,111 +306,7 @@ static const u32 ar9580_1p0_low_ob_db_tx_gain_table[][5] = {
326 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, 306 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
327}; 307};
328 308
329static const u32 ar9580_1p0_high_power_tx_gain_table[][5] = { 309#define ar9580_1p0_high_power_tx_gain_table ar9580_1p0_low_ob_db_tx_gain_table
330 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
331 {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
332 {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
333 {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
334 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
335 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
336 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
337 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
338 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
339 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
340 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
341 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
342 {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
343 {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
344 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
345 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
346 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
347 {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
348 {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
349 {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
350 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
351 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
352 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
353 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
354 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
355 {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83},
356 {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84},
357 {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3},
358 {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5},
359 {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9},
360 {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb},
361 {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
362 {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
363 {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
364 {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
365 {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
366 {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
367 {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
368 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
369 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
370 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
371 {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
372 {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
373 {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
374 {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
375 {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
376 {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
377 {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
378 {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
379 {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
380 {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
381 {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
382 {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
383 {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
384 {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
385 {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
386 {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
387 {0x0000a5cc, 0x5c82486b, 0x5c82486b, 0x47801a83, 0x47801a83},
388 {0x0000a5d0, 0x61824a6c, 0x61824a6c, 0x4a801c84, 0x4a801c84},
389 {0x0000a5d4, 0x66826a6c, 0x66826a6c, 0x4e801ce3, 0x4e801ce3},
390 {0x0000a5d8, 0x6b826e6c, 0x6b826e6c, 0x52801ce5, 0x52801ce5},
391 {0x0000a5dc, 0x7082708c, 0x7082708c, 0x56801ce9, 0x56801ce9},
392 {0x0000a5e0, 0x7382b08a, 0x7382b08a, 0x5a801ceb, 0x5a801ceb},
393 {0x0000a5e4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
394 {0x0000a5e8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
395 {0x0000a5ec, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
396 {0x0000a5f0, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
397 {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
398 {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
399 {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
400 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
401 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
402 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
403 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
404 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
405 {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
406 {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
407 {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
408 {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
409 {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
410 {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
411 {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
412 {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
413 {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
414 {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
415 {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
416 {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
417 {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
418 {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
419 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
420 {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
421 {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
422 {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
423 {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
424 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
425 {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
426 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
427 {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
428 {0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
429 {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
430 {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
431 {0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
432 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
433};
434 310
435static const u32 ar9580_1p0_lowest_ob_db_tx_gain_table[][5] = { 311static const u32 ar9580_1p0_lowest_ob_db_tx_gain_table[][5] = {
436 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 312 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
@@ -538,12 +414,7 @@ static const u32 ar9580_1p0_lowest_ob_db_tx_gain_table[][5] = {
538 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, 414 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
539}; 415};
540 416
541static const u32 ar9580_1p0_baseband_core_txfir_coeff_japan_2484[][2] = { 417#define ar9580_1p0_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
542 /* Addr allmodes */
543 {0x0000a398, 0x00000000},
544 {0x0000a39c, 0x6f7f0301},
545 {0x0000a3a0, 0xca9228ee},
546};
547 418
548static const u32 ar9580_1p0_mac_core[][2] = { 419static const u32 ar9580_1p0_mac_core[][2] = {
549 /* Addr allmodes */ 420 /* Addr allmodes */
@@ -808,376 +679,11 @@ static const u32 ar9580_1p0_mixed_ob_db_tx_gain_table[][5] = {
808 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, 679 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
809}; 680};
810 681
811static const u32 ar9580_1p0_wo_xlna_rx_gain_table[][2] = { 682#define ar9580_1p0_wo_xlna_rx_gain_table ar9300Common_wo_xlna_rx_gain_table_2p2
812 /* Addr allmodes */
813 {0x0000a000, 0x00010000},
814 {0x0000a004, 0x00030002},
815 {0x0000a008, 0x00050004},
816 {0x0000a00c, 0x00810080},
817 {0x0000a010, 0x00830082},
818 {0x0000a014, 0x01810180},
819 {0x0000a018, 0x01830182},
820 {0x0000a01c, 0x01850184},
821 {0x0000a020, 0x01890188},
822 {0x0000a024, 0x018b018a},
823 {0x0000a028, 0x018d018c},
824 {0x0000a02c, 0x03820190},
825 {0x0000a030, 0x03840383},
826 {0x0000a034, 0x03880385},
827 {0x0000a038, 0x038a0389},
828 {0x0000a03c, 0x038c038b},
829 {0x0000a040, 0x0390038d},
830 {0x0000a044, 0x03920391},
831 {0x0000a048, 0x03940393},
832 {0x0000a04c, 0x03960395},
833 {0x0000a050, 0x00000000},
834 {0x0000a054, 0x00000000},
835 {0x0000a058, 0x00000000},
836 {0x0000a05c, 0x00000000},
837 {0x0000a060, 0x00000000},
838 {0x0000a064, 0x00000000},
839 {0x0000a068, 0x00000000},
840 {0x0000a06c, 0x00000000},
841 {0x0000a070, 0x00000000},
842 {0x0000a074, 0x00000000},
843 {0x0000a078, 0x00000000},
844 {0x0000a07c, 0x00000000},
845 {0x0000a080, 0x29292929},
846 {0x0000a084, 0x29292929},
847 {0x0000a088, 0x29292929},
848 {0x0000a08c, 0x29292929},
849 {0x0000a090, 0x22292929},
850 {0x0000a094, 0x1d1d2222},
851 {0x0000a098, 0x0c111117},
852 {0x0000a09c, 0x00030303},
853 {0x0000a0a0, 0x00000000},
854 {0x0000a0a4, 0x00000000},
855 {0x0000a0a8, 0x00000000},
856 {0x0000a0ac, 0x00000000},
857 {0x0000a0b0, 0x00000000},
858 {0x0000a0b4, 0x00000000},
859 {0x0000a0b8, 0x00000000},
860 {0x0000a0bc, 0x00000000},
861 {0x0000a0c0, 0x001f0000},
862 {0x0000a0c4, 0x01000101},
863 {0x0000a0c8, 0x011e011f},
864 {0x0000a0cc, 0x011c011d},
865 {0x0000a0d0, 0x02030204},
866 {0x0000a0d4, 0x02010202},
867 {0x0000a0d8, 0x021f0200},
868 {0x0000a0dc, 0x0302021e},
869 {0x0000a0e0, 0x03000301},
870 {0x0000a0e4, 0x031e031f},
871 {0x0000a0e8, 0x0402031d},
872 {0x0000a0ec, 0x04000401},
873 {0x0000a0f0, 0x041e041f},
874 {0x0000a0f4, 0x0502041d},
875 {0x0000a0f8, 0x05000501},
876 {0x0000a0fc, 0x051e051f},
877 {0x0000a100, 0x06010602},
878 {0x0000a104, 0x061f0600},
879 {0x0000a108, 0x061d061e},
880 {0x0000a10c, 0x07020703},
881 {0x0000a110, 0x07000701},
882 {0x0000a114, 0x00000000},
883 {0x0000a118, 0x00000000},
884 {0x0000a11c, 0x00000000},
885 {0x0000a120, 0x00000000},
886 {0x0000a124, 0x00000000},
887 {0x0000a128, 0x00000000},
888 {0x0000a12c, 0x00000000},
889 {0x0000a130, 0x00000000},
890 {0x0000a134, 0x00000000},
891 {0x0000a138, 0x00000000},
892 {0x0000a13c, 0x00000000},
893 {0x0000a140, 0x001f0000},
894 {0x0000a144, 0x01000101},
895 {0x0000a148, 0x011e011f},
896 {0x0000a14c, 0x011c011d},
897 {0x0000a150, 0x02030204},
898 {0x0000a154, 0x02010202},
899 {0x0000a158, 0x021f0200},
900 {0x0000a15c, 0x0302021e},
901 {0x0000a160, 0x03000301},
902 {0x0000a164, 0x031e031f},
903 {0x0000a168, 0x0402031d},
904 {0x0000a16c, 0x04000401},
905 {0x0000a170, 0x041e041f},
906 {0x0000a174, 0x0502041d},
907 {0x0000a178, 0x05000501},
908 {0x0000a17c, 0x051e051f},
909 {0x0000a180, 0x06010602},
910 {0x0000a184, 0x061f0600},
911 {0x0000a188, 0x061d061e},
912 {0x0000a18c, 0x07020703},
913 {0x0000a190, 0x07000701},
914 {0x0000a194, 0x00000000},
915 {0x0000a198, 0x00000000},
916 {0x0000a19c, 0x00000000},
917 {0x0000a1a0, 0x00000000},
918 {0x0000a1a4, 0x00000000},
919 {0x0000a1a8, 0x00000000},
920 {0x0000a1ac, 0x00000000},
921 {0x0000a1b0, 0x00000000},
922 {0x0000a1b4, 0x00000000},
923 {0x0000a1b8, 0x00000000},
924 {0x0000a1bc, 0x00000000},
925 {0x0000a1c0, 0x00000000},
926 {0x0000a1c4, 0x00000000},
927 {0x0000a1c8, 0x00000000},
928 {0x0000a1cc, 0x00000000},
929 {0x0000a1d0, 0x00000000},
930 {0x0000a1d4, 0x00000000},
931 {0x0000a1d8, 0x00000000},
932 {0x0000a1dc, 0x00000000},
933 {0x0000a1e0, 0x00000000},
934 {0x0000a1e4, 0x00000000},
935 {0x0000a1e8, 0x00000000},
936 {0x0000a1ec, 0x00000000},
937 {0x0000a1f0, 0x00000396},
938 {0x0000a1f4, 0x00000396},
939 {0x0000a1f8, 0x00000396},
940 {0x0000a1fc, 0x00000196},
941 {0x0000b000, 0x00010000},
942 {0x0000b004, 0x00030002},
943 {0x0000b008, 0x00050004},
944 {0x0000b00c, 0x00810080},
945 {0x0000b010, 0x00830082},
946 {0x0000b014, 0x01810180},
947 {0x0000b018, 0x01830182},
948 {0x0000b01c, 0x01850184},
949 {0x0000b020, 0x02810280},
950 {0x0000b024, 0x02830282},
951 {0x0000b028, 0x02850284},
952 {0x0000b02c, 0x02890288},
953 {0x0000b030, 0x028b028a},
954 {0x0000b034, 0x0388028c},
955 {0x0000b038, 0x038a0389},
956 {0x0000b03c, 0x038c038b},
957 {0x0000b040, 0x0390038d},
958 {0x0000b044, 0x03920391},
959 {0x0000b048, 0x03940393},
960 {0x0000b04c, 0x03960395},
961 {0x0000b050, 0x00000000},
962 {0x0000b054, 0x00000000},
963 {0x0000b058, 0x00000000},
964 {0x0000b05c, 0x00000000},
965 {0x0000b060, 0x00000000},
966 {0x0000b064, 0x00000000},
967 {0x0000b068, 0x00000000},
968 {0x0000b06c, 0x00000000},
969 {0x0000b070, 0x00000000},
970 {0x0000b074, 0x00000000},
971 {0x0000b078, 0x00000000},
972 {0x0000b07c, 0x00000000},
973 {0x0000b080, 0x32323232},
974 {0x0000b084, 0x2f2f3232},
975 {0x0000b088, 0x23282a2d},
976 {0x0000b08c, 0x1c1e2123},
977 {0x0000b090, 0x14171919},
978 {0x0000b094, 0x0e0e1214},
979 {0x0000b098, 0x03050707},
980 {0x0000b09c, 0x00030303},
981 {0x0000b0a0, 0x00000000},
982 {0x0000b0a4, 0x00000000},
983 {0x0000b0a8, 0x00000000},
984 {0x0000b0ac, 0x00000000},
985 {0x0000b0b0, 0x00000000},
986 {0x0000b0b4, 0x00000000},
987 {0x0000b0b8, 0x00000000},
988 {0x0000b0bc, 0x00000000},
989 {0x0000b0c0, 0x003f0020},
990 {0x0000b0c4, 0x00400041},
991 {0x0000b0c8, 0x0140005f},
992 {0x0000b0cc, 0x0160015f},
993 {0x0000b0d0, 0x017e017f},
994 {0x0000b0d4, 0x02410242},
995 {0x0000b0d8, 0x025f0240},
996 {0x0000b0dc, 0x027f0260},
997 {0x0000b0e0, 0x0341027e},
998 {0x0000b0e4, 0x035f0340},
999 {0x0000b0e8, 0x037f0360},
1000 {0x0000b0ec, 0x04400441},
1001 {0x0000b0f0, 0x0460045f},
1002 {0x0000b0f4, 0x0541047f},
1003 {0x0000b0f8, 0x055f0540},
1004 {0x0000b0fc, 0x057f0560},
1005 {0x0000b100, 0x06400641},
1006 {0x0000b104, 0x0660065f},
1007 {0x0000b108, 0x067e067f},
1008 {0x0000b10c, 0x07410742},
1009 {0x0000b110, 0x075f0740},
1010 {0x0000b114, 0x077f0760},
1011 {0x0000b118, 0x07800781},
1012 {0x0000b11c, 0x07a0079f},
1013 {0x0000b120, 0x07c107bf},
1014 {0x0000b124, 0x000007c0},
1015 {0x0000b128, 0x00000000},
1016 {0x0000b12c, 0x00000000},
1017 {0x0000b130, 0x00000000},
1018 {0x0000b134, 0x00000000},
1019 {0x0000b138, 0x00000000},
1020 {0x0000b13c, 0x00000000},
1021 {0x0000b140, 0x003f0020},
1022 {0x0000b144, 0x00400041},
1023 {0x0000b148, 0x0140005f},
1024 {0x0000b14c, 0x0160015f},
1025 {0x0000b150, 0x017e017f},
1026 {0x0000b154, 0x02410242},
1027 {0x0000b158, 0x025f0240},
1028 {0x0000b15c, 0x027f0260},
1029 {0x0000b160, 0x0341027e},
1030 {0x0000b164, 0x035f0340},
1031 {0x0000b168, 0x037f0360},
1032 {0x0000b16c, 0x04400441},
1033 {0x0000b170, 0x0460045f},
1034 {0x0000b174, 0x0541047f},
1035 {0x0000b178, 0x055f0540},
1036 {0x0000b17c, 0x057f0560},
1037 {0x0000b180, 0x06400641},
1038 {0x0000b184, 0x0660065f},
1039 {0x0000b188, 0x067e067f},
1040 {0x0000b18c, 0x07410742},
1041 {0x0000b190, 0x075f0740},
1042 {0x0000b194, 0x077f0760},
1043 {0x0000b198, 0x07800781},
1044 {0x0000b19c, 0x07a0079f},
1045 {0x0000b1a0, 0x07c107bf},
1046 {0x0000b1a4, 0x000007c0},
1047 {0x0000b1a8, 0x00000000},
1048 {0x0000b1ac, 0x00000000},
1049 {0x0000b1b0, 0x00000000},
1050 {0x0000b1b4, 0x00000000},
1051 {0x0000b1b8, 0x00000000},
1052 {0x0000b1bc, 0x00000000},
1053 {0x0000b1c0, 0x00000000},
1054 {0x0000b1c4, 0x00000000},
1055 {0x0000b1c8, 0x00000000},
1056 {0x0000b1cc, 0x00000000},
1057 {0x0000b1d0, 0x00000000},
1058 {0x0000b1d4, 0x00000000},
1059 {0x0000b1d8, 0x00000000},
1060 {0x0000b1dc, 0x00000000},
1061 {0x0000b1e0, 0x00000000},
1062 {0x0000b1e4, 0x00000000},
1063 {0x0000b1e8, 0x00000000},
1064 {0x0000b1ec, 0x00000000},
1065 {0x0000b1f0, 0x00000396},
1066 {0x0000b1f4, 0x00000396},
1067 {0x0000b1f8, 0x00000396},
1068 {0x0000b1fc, 0x00000196},
1069};
1070 683
1071static const u32 ar9580_1p0_soc_postamble[][5] = { 684#define ar9580_1p0_soc_postamble ar9300_2p2_soc_postamble
1072 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1073 {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
1074};
1075 685
1076static const u32 ar9580_1p0_high_ob_db_tx_gain_table[][5] = { 686#define ar9580_1p0_high_ob_db_tx_gain_table ar9300Modes_high_ob_db_tx_gain_table_2p2
1077 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1078 {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
1079 {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
1080 {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
1081 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1082 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
1083 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
1084 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
1085 {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
1086 {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
1087 {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
1088 {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
1089 {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
1090 {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
1091 {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
1092 {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
1093 {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
1094 {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
1095 {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
1096 {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
1097 {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
1098 {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
1099 {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
1100 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
1101 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
1102 {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
1103 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
1104 {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
1105 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
1106 {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
1107 {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
1108 {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
1109 {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
1110 {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
1111 {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
1112 {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
1113 {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
1114 {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
1115 {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
1116 {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
1117 {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
1118 {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
1119 {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
1120 {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
1121 {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
1122 {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
1123 {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
1124 {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
1125 {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
1126 {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
1127 {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
1128 {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
1129 {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
1130 {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
1131 {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
1132 {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
1133 {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
1134 {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
1135 {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
1136 {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
1137 {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
1138 {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
1139 {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
1140 {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
1141 {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
1142 {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
1143 {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
1144 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
1145 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
1146 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
1147 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1148 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1149 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1150 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1151 {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
1152 {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
1153 {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
1154 {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
1155 {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
1156 {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
1157 {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
1158 {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1159 {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1160 {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1161 {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1162 {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1163 {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
1164 {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
1165 {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
1166 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1167 {0x0000c2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
1168 {0x0000c2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
1169 {0x0000c2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
1170 {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1171 {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
1172 {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
1173 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
1174 {0x00016444, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
1175 {0x00016448, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
1176 {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
1177 {0x00016844, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
1178 {0x00016848, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
1179 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
1180};
1181 687
1182static const u32 ar9580_1p0_soc_preamble[][2] = { 688static const u32 ar9580_1p0_soc_preamble[][2] = {
1183 /* Addr allmodes */ 689 /* Addr allmodes */
@@ -1189,265 +695,7 @@ static const u32 ar9580_1p0_soc_preamble[][2] = {
1189 {0x00007048, 0x00000008}, 695 {0x00007048, 0x00000008},
1190}; 696};
1191 697
1192static const u32 ar9580_1p0_rx_gain_table[][2] = { 698#define ar9580_1p0_rx_gain_table ar9462_common_rx_gain_table_2p0
1193 /* Addr allmodes */
1194 {0x0000a000, 0x00010000},
1195 {0x0000a004, 0x00030002},
1196 {0x0000a008, 0x00050004},
1197 {0x0000a00c, 0x00810080},
1198 {0x0000a010, 0x00830082},
1199 {0x0000a014, 0x01810180},
1200 {0x0000a018, 0x01830182},
1201 {0x0000a01c, 0x01850184},
1202 {0x0000a020, 0x01890188},
1203 {0x0000a024, 0x018b018a},
1204 {0x0000a028, 0x018d018c},
1205 {0x0000a02c, 0x01910190},
1206 {0x0000a030, 0x01930192},
1207 {0x0000a034, 0x01950194},
1208 {0x0000a038, 0x038a0196},
1209 {0x0000a03c, 0x038c038b},
1210 {0x0000a040, 0x0390038d},
1211 {0x0000a044, 0x03920391},
1212 {0x0000a048, 0x03940393},
1213 {0x0000a04c, 0x03960395},
1214 {0x0000a050, 0x00000000},
1215 {0x0000a054, 0x00000000},
1216 {0x0000a058, 0x00000000},
1217 {0x0000a05c, 0x00000000},
1218 {0x0000a060, 0x00000000},
1219 {0x0000a064, 0x00000000},
1220 {0x0000a068, 0x00000000},
1221 {0x0000a06c, 0x00000000},
1222 {0x0000a070, 0x00000000},
1223 {0x0000a074, 0x00000000},
1224 {0x0000a078, 0x00000000},
1225 {0x0000a07c, 0x00000000},
1226 {0x0000a080, 0x22222229},
1227 {0x0000a084, 0x1d1d1d1d},
1228 {0x0000a088, 0x1d1d1d1d},
1229 {0x0000a08c, 0x1d1d1d1d},
1230 {0x0000a090, 0x171d1d1d},
1231 {0x0000a094, 0x11111717},
1232 {0x0000a098, 0x00030311},
1233 {0x0000a09c, 0x00000000},
1234 {0x0000a0a0, 0x00000000},
1235 {0x0000a0a4, 0x00000000},
1236 {0x0000a0a8, 0x00000000},
1237 {0x0000a0ac, 0x00000000},
1238 {0x0000a0b0, 0x00000000},
1239 {0x0000a0b4, 0x00000000},
1240 {0x0000a0b8, 0x00000000},
1241 {0x0000a0bc, 0x00000000},
1242 {0x0000a0c0, 0x001f0000},
1243 {0x0000a0c4, 0x01000101},
1244 {0x0000a0c8, 0x011e011f},
1245 {0x0000a0cc, 0x011c011d},
1246 {0x0000a0d0, 0x02030204},
1247 {0x0000a0d4, 0x02010202},
1248 {0x0000a0d8, 0x021f0200},
1249 {0x0000a0dc, 0x0302021e},
1250 {0x0000a0e0, 0x03000301},
1251 {0x0000a0e4, 0x031e031f},
1252 {0x0000a0e8, 0x0402031d},
1253 {0x0000a0ec, 0x04000401},
1254 {0x0000a0f0, 0x041e041f},
1255 {0x0000a0f4, 0x0502041d},
1256 {0x0000a0f8, 0x05000501},
1257 {0x0000a0fc, 0x051e051f},
1258 {0x0000a100, 0x06010602},
1259 {0x0000a104, 0x061f0600},
1260 {0x0000a108, 0x061d061e},
1261 {0x0000a10c, 0x07020703},
1262 {0x0000a110, 0x07000701},
1263 {0x0000a114, 0x00000000},
1264 {0x0000a118, 0x00000000},
1265 {0x0000a11c, 0x00000000},
1266 {0x0000a120, 0x00000000},
1267 {0x0000a124, 0x00000000},
1268 {0x0000a128, 0x00000000},
1269 {0x0000a12c, 0x00000000},
1270 {0x0000a130, 0x00000000},
1271 {0x0000a134, 0x00000000},
1272 {0x0000a138, 0x00000000},
1273 {0x0000a13c, 0x00000000},
1274 {0x0000a140, 0x001f0000},
1275 {0x0000a144, 0x01000101},
1276 {0x0000a148, 0x011e011f},
1277 {0x0000a14c, 0x011c011d},
1278 {0x0000a150, 0x02030204},
1279 {0x0000a154, 0x02010202},
1280 {0x0000a158, 0x021f0200},
1281 {0x0000a15c, 0x0302021e},
1282 {0x0000a160, 0x03000301},
1283 {0x0000a164, 0x031e031f},
1284 {0x0000a168, 0x0402031d},
1285 {0x0000a16c, 0x04000401},
1286 {0x0000a170, 0x041e041f},
1287 {0x0000a174, 0x0502041d},
1288 {0x0000a178, 0x05000501},
1289 {0x0000a17c, 0x051e051f},
1290 {0x0000a180, 0x06010602},
1291 {0x0000a184, 0x061f0600},
1292 {0x0000a188, 0x061d061e},
1293 {0x0000a18c, 0x07020703},
1294 {0x0000a190, 0x07000701},
1295 {0x0000a194, 0x00000000},
1296 {0x0000a198, 0x00000000},
1297 {0x0000a19c, 0x00000000},
1298 {0x0000a1a0, 0x00000000},
1299 {0x0000a1a4, 0x00000000},
1300 {0x0000a1a8, 0x00000000},
1301 {0x0000a1ac, 0x00000000},
1302 {0x0000a1b0, 0x00000000},
1303 {0x0000a1b4, 0x00000000},
1304 {0x0000a1b8, 0x00000000},
1305 {0x0000a1bc, 0x00000000},
1306 {0x0000a1c0, 0x00000000},
1307 {0x0000a1c4, 0x00000000},
1308 {0x0000a1c8, 0x00000000},
1309 {0x0000a1cc, 0x00000000},
1310 {0x0000a1d0, 0x00000000},
1311 {0x0000a1d4, 0x00000000},
1312 {0x0000a1d8, 0x00000000},
1313 {0x0000a1dc, 0x00000000},
1314 {0x0000a1e0, 0x00000000},
1315 {0x0000a1e4, 0x00000000},
1316 {0x0000a1e8, 0x00000000},
1317 {0x0000a1ec, 0x00000000},
1318 {0x0000a1f0, 0x00000396},
1319 {0x0000a1f4, 0x00000396},
1320 {0x0000a1f8, 0x00000396},
1321 {0x0000a1fc, 0x00000196},
1322 {0x0000b000, 0x00010000},
1323 {0x0000b004, 0x00030002},
1324 {0x0000b008, 0x00050004},
1325 {0x0000b00c, 0x00810080},
1326 {0x0000b010, 0x00830082},
1327 {0x0000b014, 0x01810180},
1328 {0x0000b018, 0x01830182},
1329 {0x0000b01c, 0x01850184},
1330 {0x0000b020, 0x02810280},
1331 {0x0000b024, 0x02830282},
1332 {0x0000b028, 0x02850284},
1333 {0x0000b02c, 0x02890288},
1334 {0x0000b030, 0x028b028a},
1335 {0x0000b034, 0x0388028c},
1336 {0x0000b038, 0x038a0389},
1337 {0x0000b03c, 0x038c038b},
1338 {0x0000b040, 0x0390038d},
1339 {0x0000b044, 0x03920391},
1340 {0x0000b048, 0x03940393},
1341 {0x0000b04c, 0x03960395},
1342 {0x0000b050, 0x00000000},
1343 {0x0000b054, 0x00000000},
1344 {0x0000b058, 0x00000000},
1345 {0x0000b05c, 0x00000000},
1346 {0x0000b060, 0x00000000},
1347 {0x0000b064, 0x00000000},
1348 {0x0000b068, 0x00000000},
1349 {0x0000b06c, 0x00000000},
1350 {0x0000b070, 0x00000000},
1351 {0x0000b074, 0x00000000},
1352 {0x0000b078, 0x00000000},
1353 {0x0000b07c, 0x00000000},
1354 {0x0000b080, 0x2a2d2f32},
1355 {0x0000b084, 0x21232328},
1356 {0x0000b088, 0x19191c1e},
1357 {0x0000b08c, 0x12141417},
1358 {0x0000b090, 0x07070e0e},
1359 {0x0000b094, 0x03030305},
1360 {0x0000b098, 0x00000003},
1361 {0x0000b09c, 0x00000000},
1362 {0x0000b0a0, 0x00000000},
1363 {0x0000b0a4, 0x00000000},
1364 {0x0000b0a8, 0x00000000},
1365 {0x0000b0ac, 0x00000000},
1366 {0x0000b0b0, 0x00000000},
1367 {0x0000b0b4, 0x00000000},
1368 {0x0000b0b8, 0x00000000},
1369 {0x0000b0bc, 0x00000000},
1370 {0x0000b0c0, 0x003f0020},
1371 {0x0000b0c4, 0x00400041},
1372 {0x0000b0c8, 0x0140005f},
1373 {0x0000b0cc, 0x0160015f},
1374 {0x0000b0d0, 0x017e017f},
1375 {0x0000b0d4, 0x02410242},
1376 {0x0000b0d8, 0x025f0240},
1377 {0x0000b0dc, 0x027f0260},
1378 {0x0000b0e0, 0x0341027e},
1379 {0x0000b0e4, 0x035f0340},
1380 {0x0000b0e8, 0x037f0360},
1381 {0x0000b0ec, 0x04400441},
1382 {0x0000b0f0, 0x0460045f},
1383 {0x0000b0f4, 0x0541047f},
1384 {0x0000b0f8, 0x055f0540},
1385 {0x0000b0fc, 0x057f0560},
1386 {0x0000b100, 0x06400641},
1387 {0x0000b104, 0x0660065f},
1388 {0x0000b108, 0x067e067f},
1389 {0x0000b10c, 0x07410742},
1390 {0x0000b110, 0x075f0740},
1391 {0x0000b114, 0x077f0760},
1392 {0x0000b118, 0x07800781},
1393 {0x0000b11c, 0x07a0079f},
1394 {0x0000b120, 0x07c107bf},
1395 {0x0000b124, 0x000007c0},
1396 {0x0000b128, 0x00000000},
1397 {0x0000b12c, 0x00000000},
1398 {0x0000b130, 0x00000000},
1399 {0x0000b134, 0x00000000},
1400 {0x0000b138, 0x00000000},
1401 {0x0000b13c, 0x00000000},
1402 {0x0000b140, 0x003f0020},
1403 {0x0000b144, 0x00400041},
1404 {0x0000b148, 0x0140005f},
1405 {0x0000b14c, 0x0160015f},
1406 {0x0000b150, 0x017e017f},
1407 {0x0000b154, 0x02410242},
1408 {0x0000b158, 0x025f0240},
1409 {0x0000b15c, 0x027f0260},
1410 {0x0000b160, 0x0341027e},
1411 {0x0000b164, 0x035f0340},
1412 {0x0000b168, 0x037f0360},
1413 {0x0000b16c, 0x04400441},
1414 {0x0000b170, 0x0460045f},
1415 {0x0000b174, 0x0541047f},
1416 {0x0000b178, 0x055f0540},
1417 {0x0000b17c, 0x057f0560},
1418 {0x0000b180, 0x06400641},
1419 {0x0000b184, 0x0660065f},
1420 {0x0000b188, 0x067e067f},
1421 {0x0000b18c, 0x07410742},
1422 {0x0000b190, 0x075f0740},
1423 {0x0000b194, 0x077f0760},
1424 {0x0000b198, 0x07800781},
1425 {0x0000b19c, 0x07a0079f},
1426 {0x0000b1a0, 0x07c107bf},
1427 {0x0000b1a4, 0x000007c0},
1428 {0x0000b1a8, 0x00000000},
1429 {0x0000b1ac, 0x00000000},
1430 {0x0000b1b0, 0x00000000},
1431 {0x0000b1b4, 0x00000000},
1432 {0x0000b1b8, 0x00000000},
1433 {0x0000b1bc, 0x00000000},
1434 {0x0000b1c0, 0x00000000},
1435 {0x0000b1c4, 0x00000000},
1436 {0x0000b1c8, 0x00000000},
1437 {0x0000b1cc, 0x00000000},
1438 {0x0000b1d0, 0x00000000},
1439 {0x0000b1d4, 0x00000000},
1440 {0x0000b1d8, 0x00000000},
1441 {0x0000b1dc, 0x00000000},
1442 {0x0000b1e0, 0x00000000},
1443 {0x0000b1e4, 0x00000000},
1444 {0x0000b1e8, 0x00000000},
1445 {0x0000b1ec, 0x00000000},
1446 {0x0000b1f0, 0x00000396},
1447 {0x0000b1f4, 0x00000396},
1448 {0x0000b1f8, 0x00000396},
1449 {0x0000b1fc, 0x00000196},
1450};
1451 699
1452static const u32 ar9580_1p0_radio_core[][2] = { 700static const u32 ar9580_1p0_radio_core[][2] = {
1453 /* Addr allmodes */ 701 /* Addr allmodes */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 4866550ddd9..fe39eb4c42a 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -308,6 +308,7 @@ struct ath_rx {
308 u8 defant; 308 u8 defant;
309 u8 rxotherant; 309 u8 rxotherant;
310 u32 *rxlink; 310 u32 *rxlink;
311 u32 num_pkts;
311 unsigned int rxfilter; 312 unsigned int rxfilter;
312 spinlock_t rxbuflock; 313 spinlock_t rxbuflock;
313 struct list_head rxbuf; 314 struct list_head rxbuf;
@@ -326,6 +327,9 @@ int ath_rx_init(struct ath_softc *sc, int nbufs);
326void ath_rx_cleanup(struct ath_softc *sc); 327void ath_rx_cleanup(struct ath_softc *sc);
327int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp); 328int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp);
328struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype); 329struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
330void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq);
331void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq);
332void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq);
329void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq); 333void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
330bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx); 334bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx);
331void ath_draintxq(struct ath_softc *sc, 335void ath_draintxq(struct ath_softc *sc,
@@ -415,9 +419,9 @@ int ath_beaconq_config(struct ath_softc *sc);
415void ath_set_beacon(struct ath_softc *sc); 419void ath_set_beacon(struct ath_softc *sc);
416void ath9k_set_beaconing_status(struct ath_softc *sc, bool status); 420void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
417 421
418/*******/ 422/*******************/
419/* ANI */ 423/* Link Monitoring */
420/*******/ 424/*******************/
421 425
422#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */ 426#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */
423#define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */ 427#define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */
@@ -428,7 +432,9 @@ void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
428#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */ 432#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
429 433
430#define ATH_PAPRD_TIMEOUT 100 /* msecs */ 434#define ATH_PAPRD_TIMEOUT 100 /* msecs */
435#define ATH_PLL_WORK_INTERVAL 100
431 436
437void ath_tx_complete_poll_work(struct work_struct *work);
432void ath_reset_work(struct work_struct *work); 438void ath_reset_work(struct work_struct *work);
433void ath_hw_check(struct work_struct *work); 439void ath_hw_check(struct work_struct *work);
434void ath_hw_pll_work(struct work_struct *work); 440void ath_hw_pll_work(struct work_struct *work);
@@ -437,22 +443,31 @@ void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon);
437void ath_paprd_calibrate(struct work_struct *work); 443void ath_paprd_calibrate(struct work_struct *work);
438void ath_ani_calibrate(unsigned long data); 444void ath_ani_calibrate(unsigned long data);
439void ath_start_ani(struct ath_common *common); 445void ath_start_ani(struct ath_common *common);
446int ath_update_survey_stats(struct ath_softc *sc);
447void ath_update_survey_nf(struct ath_softc *sc, int channel);
440 448
441/**********/ 449/**********/
442/* BTCOEX */ 450/* BTCOEX */
443/**********/ 451/**********/
444 452
453enum bt_op_flags {
454 BT_OP_PRIORITY_DETECTED,
455 BT_OP_SCAN,
456};
457
445struct ath_btcoex { 458struct ath_btcoex {
446 bool hw_timer_enabled; 459 bool hw_timer_enabled;
447 spinlock_t btcoex_lock; 460 spinlock_t btcoex_lock;
448 struct timer_list period_timer; /* Timer for BT period */ 461 struct timer_list period_timer; /* Timer for BT period */
449 u32 bt_priority_cnt; 462 u32 bt_priority_cnt;
450 unsigned long bt_priority_time; 463 unsigned long bt_priority_time;
464 unsigned long op_flags;
451 int bt_stomp_type; /* Types of BT stomping */ 465 int bt_stomp_type; /* Types of BT stomping */
452 u32 btcoex_no_stomp; /* in usec */ 466 u32 btcoex_no_stomp; /* in usec */
453 u32 btcoex_period; /* in usec */ 467 u32 btcoex_period; /* in usec */
454 u32 btscan_no_stomp; /* in usec */ 468 u32 btscan_no_stomp; /* in usec */
455 u32 duty_cycle; 469 u32 duty_cycle;
470 u32 bt_wait_time;
456 struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */ 471 struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
457 struct ath_mci_profile mci; 472 struct ath_mci_profile mci;
458}; 473};
@@ -514,8 +529,10 @@ static inline void ath_deinit_leds(struct ath_softc *sc)
514} 529}
515#endif 530#endif
516 531
517 532/*******************************/
518/* Antenna diversity/combining */ 533/* Antenna diversity/combining */
534/*******************************/
535
519#define ATH_ANT_RX_CURRENT_SHIFT 4 536#define ATH_ANT_RX_CURRENT_SHIFT 4
520#define ATH_ANT_RX_MAIN_SHIFT 2 537#define ATH_ANT_RX_MAIN_SHIFT 2
521#define ATH_ANT_RX_MASK 0x3 538#define ATH_ANT_RX_MASK 0x3
@@ -568,6 +585,9 @@ struct ath_ant_comb {
568 unsigned long scan_start_time; 585 unsigned long scan_start_time;
569}; 586};
570 587
588void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
589void ath_ant_comb_update(struct ath_softc *sc);
590
571/********************/ 591/********************/
572/* Main driver core */ 592/* Main driver core */
573/********************/ 593/********************/
@@ -585,15 +605,15 @@ struct ath_ant_comb {
585#define ATH_TXPOWER_MAX 100 /* .5 dBm units */ 605#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
586#define ATH_RATE_DUMMY_MARKER 0 606#define ATH_RATE_DUMMY_MARKER 0
587 607
588#define SC_OP_INVALID BIT(0) 608enum sc_op_flags {
589#define SC_OP_BEACONS BIT(1) 609 SC_OP_INVALID,
590#define SC_OP_OFFCHANNEL BIT(2) 610 SC_OP_BEACONS,
591#define SC_OP_RXFLUSH BIT(3) 611 SC_OP_RXFLUSH,
592#define SC_OP_TSF_RESET BIT(4) 612 SC_OP_TSF_RESET,
593#define SC_OP_BT_PRIORITY_DETECTED BIT(5) 613 SC_OP_ANI_RUN,
594#define SC_OP_BT_SCAN BIT(6) 614 SC_OP_PRIM_STA_VIF,
595#define SC_OP_ANI_RUN BIT(7) 615 SC_OP_HW_RESET,
596#define SC_OP_PRIM_STA_VIF BIT(8) 616};
597 617
598/* Powersave flags */ 618/* Powersave flags */
599#define PS_WAIT_FOR_BEACON BIT(0) 619#define PS_WAIT_FOR_BEACON BIT(0)
@@ -639,9 +659,9 @@ struct ath_softc {
639 struct completion paprd_complete; 659 struct completion paprd_complete;
640 660
641 unsigned int hw_busy_count; 661 unsigned int hw_busy_count;
662 unsigned long sc_flags;
642 663
643 u32 intrstatus; 664 u32 intrstatus;
644 u32 sc_flags; /* SC_OP_* */
645 u16 ps_flags; /* PS_* */ 665 u16 ps_flags; /* PS_* */
646 u16 curtxpow; 666 u16 curtxpow;
647 bool ps_enabled; 667 bool ps_enabled;
@@ -679,6 +699,7 @@ struct ath_softc {
679#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 699#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
680 struct ath_btcoex btcoex; 700 struct ath_btcoex btcoex;
681 struct ath_mci_coex mci_coex; 701 struct ath_mci_coex mci_coex;
702 struct work_struct mci_work;
682#endif 703#endif
683 704
684 struct ath_descdma txsdma; 705 struct ath_descdma txsdma;
@@ -701,6 +722,7 @@ extern int ath9k_modparam_nohwcrypt;
701extern int led_blink; 722extern int led_blink;
702extern bool is_ath9k_unloaded; 723extern bool is_ath9k_unloaded;
703 724
725u8 ath9k_parse_mpdudensity(u8 mpdudensity);
704irqreturn_t ath_isr(int irq, void *dev); 726irqreturn_t ath_isr(int irq, void *dev);
705int ath9k_init_device(u16 devid, struct ath_softc *sc, 727int ath9k_init_device(u16 devid, struct ath_softc *sc,
706 const struct ath_bus_ops *bus_ops); 728 const struct ath_bus_ops *bus_ops);
@@ -737,5 +759,4 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
737 struct ieee80211_vif *vif, 759 struct ieee80211_vif *vif,
738 struct ath9k_vif_iter_data *iter_data); 760 struct ath9k_vif_iter_data *iter_data);
739 761
740
741#endif /* ATH9K_H */ 762#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 11bc55e3d69..40775da8941 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -48,7 +48,10 @@ int ath_beaconq_config(struct ath_softc *sc)
48 txq = sc->tx.txq_map[WME_AC_BE]; 48 txq = sc->tx.txq_map[WME_AC_BE];
49 ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi_be); 49 ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi_be);
50 qi.tqi_aifs = qi_be.tqi_aifs; 50 qi.tqi_aifs = qi_be.tqi_aifs;
51 qi.tqi_cwmin = 4*qi_be.tqi_cwmin; 51 if (ah->slottime == ATH9K_SLOT_TIME_20)
52 qi.tqi_cwmin = 2*qi_be.tqi_cwmin;
53 else
54 qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
52 qi.tqi_cwmax = qi_be.tqi_cwmax; 55 qi.tqi_cwmax = qi_be.tqi_cwmax;
53 } 56 }
54 57
@@ -387,7 +390,7 @@ void ath_beacon_tasklet(unsigned long data)
387 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) { 390 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
388 ath_dbg(common, BSTUCK, "beacon is officially stuck\n"); 391 ath_dbg(common, BSTUCK, "beacon is officially stuck\n");
389 sc->beacon.bmisscnt = 0; 392 sc->beacon.bmisscnt = 0;
390 sc->sc_flags |= SC_OP_TSF_RESET; 393 set_bit(SC_OP_TSF_RESET, &sc->sc_flags);
391 ieee80211_queue_work(sc->hw, &sc->hw_reset_work); 394 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
392 } 395 }
393 396
@@ -477,16 +480,16 @@ static void ath9k_beacon_init(struct ath_softc *sc,
477 u32 next_beacon, 480 u32 next_beacon,
478 u32 beacon_period) 481 u32 beacon_period)
479{ 482{
480 if (sc->sc_flags & SC_OP_TSF_RESET) { 483 if (test_bit(SC_OP_TSF_RESET, &sc->sc_flags)) {
481 ath9k_ps_wakeup(sc); 484 ath9k_ps_wakeup(sc);
482 ath9k_hw_reset_tsf(sc->sc_ah); 485 ath9k_hw_reset_tsf(sc->sc_ah);
483 } 486 }
484 487
485 ath9k_hw_beaconinit(sc->sc_ah, next_beacon, beacon_period); 488 ath9k_hw_beaconinit(sc->sc_ah, next_beacon, beacon_period);
486 489
487 if (sc->sc_flags & SC_OP_TSF_RESET) { 490 if (test_bit(SC_OP_TSF_RESET, &sc->sc_flags)) {
488 ath9k_ps_restore(sc); 491 ath9k_ps_restore(sc);
489 sc->sc_flags &= ~SC_OP_TSF_RESET; 492 clear_bit(SC_OP_TSF_RESET, &sc->sc_flags);
490 } 493 }
491} 494}
492 495
@@ -516,7 +519,7 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
516 /* Set the computed AP beacon timers */ 519 /* Set the computed AP beacon timers */
517 520
518 ath9k_hw_disable_interrupts(ah); 521 ath9k_hw_disable_interrupts(ah);
519 sc->sc_flags |= SC_OP_TSF_RESET; 522 set_bit(SC_OP_TSF_RESET, &sc->sc_flags);
520 ath9k_beacon_init(sc, nexttbtt, intval); 523 ath9k_beacon_init(sc, nexttbtt, intval);
521 sc->beacon.bmisscnt = 0; 524 sc->beacon.bmisscnt = 0;
522 ath9k_hw_set_interrupts(ah); 525 ath9k_hw_set_interrupts(ah);
@@ -659,7 +662,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
659 u32 tsf, intval, nexttbtt; 662 u32 tsf, intval, nexttbtt;
660 663
661 ath9k_reset_beacon_status(sc); 664 ath9k_reset_beacon_status(sc);
662 if (!(sc->sc_flags & SC_OP_BEACONS)) 665 if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
663 ath9k_hw_settsf64(ah, sc->beacon.bc_tstamp); 666 ath9k_hw_settsf64(ah, sc->beacon.bc_tstamp);
664 667
665 intval = TU_TO_USEC(conf->beacon_interval); 668 intval = TU_TO_USEC(conf->beacon_interval);
@@ -724,7 +727,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
724 */ 727 */
725 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 728 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
726 (vif->type == NL80211_IFTYPE_STATION) && 729 (vif->type == NL80211_IFTYPE_STATION) &&
727 (sc->sc_flags & SC_OP_BEACONS) && 730 test_bit(SC_OP_BEACONS, &sc->sc_flags) &&
728 !avp->primary_sta_vif) { 731 !avp->primary_sta_vif) {
729 ath_dbg(common, CONFIG, 732 ath_dbg(common, CONFIG,
730 "Beacon already configured for a station interface\n"); 733 "Beacon already configured for a station interface\n");
@@ -810,7 +813,7 @@ void ath_set_beacon(struct ath_softc *sc)
810 return; 813 return;
811 } 814 }
812 815
813 sc->sc_flags |= SC_OP_BEACONS; 816 set_bit(SC_OP_BEACONS, &sc->sc_flags);
814} 817}
815 818
816void ath9k_set_beaconing_status(struct ath_softc *sc, bool status) 819void ath9k_set_beaconing_status(struct ath_softc *sc, bool status)
@@ -818,7 +821,7 @@ void ath9k_set_beaconing_status(struct ath_softc *sc, bool status)
818 struct ath_hw *ah = sc->sc_ah; 821 struct ath_hw *ah = sc->sc_ah;
819 822
820 if (!ath_has_valid_bslot(sc)) { 823 if (!ath_has_valid_bslot(sc)) {
821 sc->sc_flags &= ~SC_OP_BEACONS; 824 clear_bit(SC_OP_BEACONS, &sc->sc_flags);
822 return; 825 return;
823 } 826 }
824 827
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 1ca6da80d4a..acd437384fe 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -336,10 +336,16 @@ static void ar9003_btcoex_bt_stomp(struct ath_hw *ah,
336 enum ath_stomp_type stomp_type) 336 enum ath_stomp_type stomp_type)
337{ 337{
338 struct ath_btcoex_hw *btcoex = &ah->btcoex_hw; 338 struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
339 const u32 *weight = AR_SREV_9462(ah) ? ar9003_wlan_weights[stomp_type] : 339 const u32 *weight = ar9003_wlan_weights[stomp_type];
340 ar9462_wlan_weights[stomp_type];
341 int i; 340 int i;
342 341
342 if (AR_SREV_9462(ah)) {
343 if ((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
344 btcoex->mci.stomp_ftp)
345 stomp_type = ATH_BTCOEX_STOMP_LOW_FTP;
346 weight = ar9462_wlan_weights[stomp_type];
347 }
348
343 for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) { 349 for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
344 btcoex->bt_weight[i] = AR9300_BT_WGHT; 350 btcoex->bt_weight[i] = AR9300_BT_WGHT;
345 btcoex->wlan_weight[i] = weight[i]; 351 btcoex->wlan_weight[i] = weight[i];
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 3a1e1cfabd5..20092f98658 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -36,6 +36,9 @@
36#define ATH_BT_CNT_THRESHOLD 3 36#define ATH_BT_CNT_THRESHOLD 3
37#define ATH_BT_CNT_SCAN_THRESHOLD 15 37#define ATH_BT_CNT_SCAN_THRESHOLD 15
38 38
39#define ATH_BTCOEX_RX_WAIT_TIME 100
40#define ATH_BTCOEX_STOMP_FTP_THRESH 5
41
39#define AR9300_NUM_BT_WEIGHTS 4 42#define AR9300_NUM_BT_WEIGHTS 4
40#define AR9300_NUM_WLAN_WEIGHTS 4 43#define AR9300_NUM_WLAN_WEIGHTS 4
41/* Defines the BT AR_BT_COEX_WGHT used */ 44/* Defines the BT AR_BT_COEX_WGHT used */
@@ -80,6 +83,7 @@ struct ath9k_hw_mci {
80 u8 bt_ver_major; 83 u8 bt_ver_major;
81 u8 bt_ver_minor; 84 u8 bt_ver_minor;
82 u8 bt_state; 85 u8 bt_state;
86 u8 stomp_ftp;
83}; 87};
84 88
85struct ath_btcoex_hw { 89struct ath_btcoex_hw {
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index fde700c4e49..5c3192ffc19 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -205,10 +205,10 @@ static ssize_t write_file_disable_ani(struct file *file,
205 common->disable_ani = !!disable_ani; 205 common->disable_ani = !!disable_ani;
206 206
207 if (disable_ani) { 207 if (disable_ani) {
208 sc->sc_flags &= ~SC_OP_ANI_RUN; 208 clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
209 del_timer_sync(&common->ani.timer); 209 del_timer_sync(&common->ani.timer);
210 } else { 210 } else {
211 sc->sc_flags |= SC_OP_ANI_RUN; 211 set_bit(SC_OP_ANI_RUN, &sc->sc_flags);
212 ath_start_ani(common); 212 ath_start_ani(common);
213 } 213 }
214 214
@@ -348,8 +348,6 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
348 sc->debug.stats.istats.txok++; 348 sc->debug.stats.istats.txok++;
349 if (status & ATH9K_INT_TXURN) 349 if (status & ATH9K_INT_TXURN)
350 sc->debug.stats.istats.txurn++; 350 sc->debug.stats.istats.txurn++;
351 if (status & ATH9K_INT_MIB)
352 sc->debug.stats.istats.mib++;
353 if (status & ATH9K_INT_RXPHY) 351 if (status & ATH9K_INT_RXPHY)
354 sc->debug.stats.istats.rxphyerr++; 352 sc->debug.stats.istats.rxphyerr++;
355 if (status & ATH9K_INT_RXKCM) 353 if (status & ATH9K_INT_RXKCM)
@@ -374,6 +372,8 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
374 sc->debug.stats.istats.dtim++; 372 sc->debug.stats.istats.dtim++;
375 if (status & ATH9K_INT_TSFOOR) 373 if (status & ATH9K_INT_TSFOOR)
376 sc->debug.stats.istats.tsfoor++; 374 sc->debug.stats.istats.tsfoor++;
375 if (status & ATH9K_INT_MCI)
376 sc->debug.stats.istats.mci++;
377} 377}
378 378
379static ssize_t read_file_interrupt(struct file *file, char __user *user_buf, 379static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
@@ -418,6 +418,7 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
418 PR_IS("DTIMSYNC", dtimsync); 418 PR_IS("DTIMSYNC", dtimsync);
419 PR_IS("DTIM", dtim); 419 PR_IS("DTIM", dtim);
420 PR_IS("TSFOOR", tsfoor); 420 PR_IS("TSFOOR", tsfoor);
421 PR_IS("MCI", mci);
421 PR_IS("TOTAL", total); 422 PR_IS("TOTAL", total);
422 423
423 len += snprintf(buf + len, mxlen - len, 424 len += snprintf(buf + len, mxlen - len,
@@ -1318,7 +1319,7 @@ static int open_file_bb_mac_samps(struct inode *inode, struct file *file)
1318 u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; 1319 u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
1319 u8 nread; 1320 u8 nread;
1320 1321
1321 if (sc->sc_flags & SC_OP_INVALID) 1322 if (test_bit(SC_OP_INVALID, &sc->sc_flags))
1322 return -EAGAIN; 1323 return -EAGAIN;
1323 1324
1324 buf = vmalloc(size); 1325 buf = vmalloc(size);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index c34da09d910..d0f851cea43 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -86,6 +86,7 @@ struct ath_interrupt_stats {
86 u32 dtim; 86 u32 dtim;
87 u32 bb_watchdog; 87 u32 bb_watchdog;
88 u32 tsfoor; 88 u32 tsfoor;
89 u32 mci;
89 90
90 /* Sync-cause stats */ 91 /* Sync-cause stats */
91 u32 sync_cause_all; 92 u32 sync_cause_all;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 4322ac80c20..7d075105a85 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -135,7 +135,7 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
135 if (!dump_base_hdr) { 135 if (!dump_base_hdr) {
136 len += snprintf(buf + len, size - len, 136 len += snprintf(buf + len, size - len,
137 "%20s :\n", "2GHz modal Header"); 137 "%20s :\n", "2GHz modal Header");
138 len += ath9k_dump_4k_modal_eeprom(buf, len, size, 138 len = ath9k_dump_4k_modal_eeprom(buf, len, size,
139 &eep->modalHeader); 139 &eep->modalHeader);
140 goto out; 140 goto out;
141 } 141 }
@@ -188,8 +188,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
188{ 188{
189#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) 189#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
190 struct ath_common *common = ath9k_hw_common(ah); 190 struct ath_common *common = ath9k_hw_common(ah);
191 struct ar5416_eeprom_4k *eep = 191 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
192 (struct ar5416_eeprom_4k *) &ah->eeprom.map4k;
193 u16 *eepdata, temp, magic, magic2; 192 u16 *eepdata, temp, magic, magic2;
194 u32 sum = 0, el; 193 u32 sum = 0, el;
195 bool need_swap = false; 194 bool need_swap = false;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index aa614767adf..cd742fb944c 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -132,7 +132,7 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
132 if (!dump_base_hdr) { 132 if (!dump_base_hdr) {
133 len += snprintf(buf + len, size - len, 133 len += snprintf(buf + len, size - len,
134 "%20s :\n", "2GHz modal Header"); 134 "%20s :\n", "2GHz modal Header");
135 len += ar9287_dump_modal_eeprom(buf, len, size, 135 len = ar9287_dump_modal_eeprom(buf, len, size,
136 &eep->modalHeader); 136 &eep->modalHeader);
137 goto out; 137 goto out;
138 } 138 }
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index b5fba8b18b8..a8ac30a0072 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -211,11 +211,11 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
211 if (!dump_base_hdr) { 211 if (!dump_base_hdr) {
212 len += snprintf(buf + len, size - len, 212 len += snprintf(buf + len, size - len,
213 "%20s :\n", "2GHz modal Header"); 213 "%20s :\n", "2GHz modal Header");
214 len += ath9k_def_dump_modal_eeprom(buf, len, size, 214 len = ath9k_def_dump_modal_eeprom(buf, len, size,
215 &eep->modalHeader[0]); 215 &eep->modalHeader[0]);
216 len += snprintf(buf + len, size - len, 216 len += snprintf(buf + len, size - len,
217 "%20s :\n", "5GHz modal Header"); 217 "%20s :\n", "5GHz modal Header");
218 len += ath9k_def_dump_modal_eeprom(buf, len, size, 218 len = ath9k_def_dump_modal_eeprom(buf, len, size,
219 &eep->modalHeader[1]); 219 &eep->modalHeader[1]);
220 goto out; 220 goto out;
221 } 221 }
@@ -264,8 +264,7 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
264 264
265static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) 265static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
266{ 266{
267 struct ar5416_eeprom_def *eep = 267 struct ar5416_eeprom_def *eep = &ah->eeprom.def;
268 (struct ar5416_eeprom_def *) &ah->eeprom.def;
269 struct ath_common *common = ath9k_hw_common(ah); 268 struct ath_common *common = ath9k_hw_common(ah);
270 u16 *eepdata, temp, magic, magic2; 269 u16 *eepdata, temp, magic, magic2;
271 u32 sum = 0, el; 270 u32 sum = 0, el;
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 281a9af0f1b..9ae6a4d9769 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -132,17 +132,18 @@ static void ath_detect_bt_priority(struct ath_softc *sc)
132 132
133 if (time_after(jiffies, btcoex->bt_priority_time + 133 if (time_after(jiffies, btcoex->bt_priority_time +
134 msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) { 134 msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
135 sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN); 135 clear_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags);
136 clear_bit(BT_OP_SCAN, &btcoex->op_flags);
136 /* Detect if colocated bt started scanning */ 137 /* Detect if colocated bt started scanning */
137 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) { 138 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
138 ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX, 139 ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX,
139 "BT scan detected\n"); 140 "BT scan detected\n");
140 sc->sc_flags |= (SC_OP_BT_SCAN | 141 set_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags);
141 SC_OP_BT_PRIORITY_DETECTED); 142 set_bit(BT_OP_SCAN, &btcoex->op_flags);
142 } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) { 143 } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
143 ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX, 144 ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX,
144 "BT priority traffic detected\n"); 145 "BT priority traffic detected\n");
145 sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED; 146 set_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags);
146 } 147 }
147 148
148 btcoex->bt_priority_cnt = 0; 149 btcoex->bt_priority_cnt = 0;
@@ -190,13 +191,26 @@ static void ath_btcoex_period_timer(unsigned long data)
190 struct ath_softc *sc = (struct ath_softc *) data; 191 struct ath_softc *sc = (struct ath_softc *) data;
191 struct ath_hw *ah = sc->sc_ah; 192 struct ath_hw *ah = sc->sc_ah;
192 struct ath_btcoex *btcoex = &sc->btcoex; 193 struct ath_btcoex *btcoex = &sc->btcoex;
194 struct ath_mci_profile *mci = &btcoex->mci;
193 u32 timer_period; 195 u32 timer_period;
194 bool is_btscan; 196 bool is_btscan;
195 197
196 ath9k_ps_wakeup(sc); 198 ath9k_ps_wakeup(sc);
197 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI)) 199 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
198 ath_detect_bt_priority(sc); 200 ath_detect_bt_priority(sc);
199 is_btscan = sc->sc_flags & SC_OP_BT_SCAN; 201 is_btscan = test_bit(BT_OP_SCAN, &btcoex->op_flags);
202
203 btcoex->bt_wait_time += btcoex->btcoex_period;
204 if (btcoex->bt_wait_time > ATH_BTCOEX_RX_WAIT_TIME) {
205 if (ar9003_mci_state(ah, MCI_STATE_NEED_FTP_STOMP) &&
206 (mci->num_pan || mci->num_other_acl))
207 ah->btcoex_hw.mci.stomp_ftp =
208 (sc->rx.num_pkts < ATH_BTCOEX_STOMP_FTP_THRESH);
209 else
210 ah->btcoex_hw.mci.stomp_ftp = false;
211 btcoex->bt_wait_time = 0;
212 sc->rx.num_pkts = 0;
213 }
200 214
201 spin_lock_bh(&btcoex->btcoex_lock); 215 spin_lock_bh(&btcoex->btcoex_lock);
202 216
@@ -218,9 +232,8 @@ static void ath_btcoex_period_timer(unsigned long data)
218 } 232 }
219 233
220 ath9k_ps_restore(sc); 234 ath9k_ps_restore(sc);
221 timer_period = btcoex->btcoex_period / 1000; 235 timer_period = btcoex->btcoex_period;
222 mod_timer(&btcoex->period_timer, jiffies + 236 mod_timer(&btcoex->period_timer, jiffies + msecs_to_jiffies(timer_period));
223 msecs_to_jiffies(timer_period));
224} 237}
225 238
226/* 239/*
@@ -233,14 +246,14 @@ static void ath_btcoex_no_stomp_timer(void *arg)
233 struct ath_hw *ah = sc->sc_ah; 246 struct ath_hw *ah = sc->sc_ah;
234 struct ath_btcoex *btcoex = &sc->btcoex; 247 struct ath_btcoex *btcoex = &sc->btcoex;
235 struct ath_common *common = ath9k_hw_common(ah); 248 struct ath_common *common = ath9k_hw_common(ah);
236 bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
237 249
238 ath_dbg(common, BTCOEX, "no stomp timer running\n"); 250 ath_dbg(common, BTCOEX, "no stomp timer running\n");
239 251
240 ath9k_ps_wakeup(sc); 252 ath9k_ps_wakeup(sc);
241 spin_lock_bh(&btcoex->btcoex_lock); 253 spin_lock_bh(&btcoex->btcoex_lock);
242 254
243 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan) 255 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW ||
256 test_bit(BT_OP_SCAN, &btcoex->op_flags))
244 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE); 257 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
245 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL) 258 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
246 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW); 259 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
@@ -254,10 +267,10 @@ static int ath_init_btcoex_timer(struct ath_softc *sc)
254{ 267{
255 struct ath_btcoex *btcoex = &sc->btcoex; 268 struct ath_btcoex *btcoex = &sc->btcoex;
256 269
257 btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000; 270 btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD;
258 btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * 271 btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * 1000 *
259 btcoex->btcoex_period / 100; 272 btcoex->btcoex_period / 100;
260 btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) * 273 btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) * 1000 *
261 btcoex->btcoex_period / 100; 274 btcoex->btcoex_period / 100;
262 275
263 setup_timer(&btcoex->period_timer, ath_btcoex_period_timer, 276 setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
@@ -292,7 +305,7 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
292 305
293 btcoex->bt_priority_cnt = 0; 306 btcoex->bt_priority_cnt = 0;
294 btcoex->bt_priority_time = jiffies; 307 btcoex->bt_priority_time = jiffies;
295 sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN); 308 btcoex->op_flags &= ~(BT_OP_PRIORITY_DETECTED | BT_OP_SCAN);
296 309
297 mod_timer(&btcoex->period_timer, jiffies); 310 mod_timer(&btcoex->period_timer, jiffies);
298} 311}
@@ -316,12 +329,13 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc)
316 329
317u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen) 330u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
318{ 331{
332 struct ath_btcoex *btcoex = &sc->btcoex;
319 struct ath_mci_profile *mci = &sc->btcoex.mci; 333 struct ath_mci_profile *mci = &sc->btcoex.mci;
320 u16 aggr_limit = 0; 334 u16 aggr_limit = 0;
321 335
322 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit) 336 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit)
323 aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4; 337 aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4;
324 else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED) 338 else if (test_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags))
325 aggr_limit = min((max_4ms_framelen * 3) / 8, 339 aggr_limit = min((max_4ms_framelen * 3) / 8,
326 (u32)ATH_AMPDU_LIMIT_MAX); 340 (u32)ATH_AMPDU_LIMIT_MAX);
327 341
@@ -402,7 +416,7 @@ int ath9k_init_btcoex(struct ath_softc *sc)
402 txq = sc->tx.txq_map[WME_AC_BE]; 416 txq = sc->tx.txq_map[WME_AC_BE];
403 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum); 417 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
404 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; 418 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
405 if (AR_SREV_9462(ah)) { 419 if (ath9k_hw_mci_is_enabled(ah)) {
406 sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE; 420 sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
407 INIT_LIST_HEAD(&sc->btcoex.mci.info); 421 INIT_LIST_HEAD(&sc->btcoex.mci.info);
408 422
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 135795257d9..936e920fb88 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -453,7 +453,6 @@ struct ath9k_htc_priv {
453 u8 num_sta_assoc_vif; 453 u8 num_sta_assoc_vif;
454 u8 num_ap_vif; 454 u8 num_ap_vif;
455 455
456 u16 op_flags;
457 u16 curtxpow; 456 u16 curtxpow;
458 u16 txpowlimit; 457 u16 txpowlimit;
459 u16 nvifs; 458 u16 nvifs;
@@ -461,6 +460,7 @@ struct ath9k_htc_priv {
461 bool rearm_ani; 460 bool rearm_ani;
462 bool reconfig_beacon; 461 bool reconfig_beacon;
463 unsigned int rxfilter; 462 unsigned int rxfilter;
463 unsigned long op_flags;
464 464
465 struct ath9k_hw_cal_data caldata; 465 struct ath9k_hw_cal_data caldata;
466 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; 466 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
@@ -572,8 +572,6 @@ bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
572 572
573void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv); 573void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
574void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw); 574void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw);
575void ath9k_htc_radio_enable(struct ieee80211_hw *hw);
576void ath9k_htc_radio_disable(struct ieee80211_hw *hw);
577 575
578#ifdef CONFIG_MAC80211_LEDS 576#ifdef CONFIG_MAC80211_LEDS
579void ath9k_init_leds(struct ath9k_htc_priv *priv); 577void ath9k_init_leds(struct ath9k_htc_priv *priv);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 2eadffb7971..77d541feb91 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -207,9 +207,9 @@ static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
207 else 207 else
208 priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE; 208 priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
209 209
210 if (priv->op_flags & OP_TSF_RESET) { 210 if (test_bit(OP_TSF_RESET, &priv->op_flags)) {
211 ath9k_hw_reset_tsf(priv->ah); 211 ath9k_hw_reset_tsf(priv->ah);
212 priv->op_flags &= ~OP_TSF_RESET; 212 clear_bit(OP_TSF_RESET, &priv->op_flags);
213 } else { 213 } else {
214 /* 214 /*
215 * Pull nexttbtt forward to reflect the current TSF. 215 * Pull nexttbtt forward to reflect the current TSF.
@@ -221,7 +221,7 @@ static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
221 } while (nexttbtt < tsftu); 221 } while (nexttbtt < tsftu);
222 } 222 }
223 223
224 if (priv->op_flags & OP_ENABLE_BEACON) 224 if (test_bit(OP_ENABLE_BEACON, &priv->op_flags))
225 imask |= ATH9K_INT_SWBA; 225 imask |= ATH9K_INT_SWBA;
226 226
227 ath_dbg(common, CONFIG, 227 ath_dbg(common, CONFIG,
@@ -269,7 +269,7 @@ static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
269 else 269 else
270 priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE; 270 priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
271 271
272 if (priv->op_flags & OP_ENABLE_BEACON) 272 if (test_bit(OP_ENABLE_BEACON, &priv->op_flags))
273 imask |= ATH9K_INT_SWBA; 273 imask |= ATH9K_INT_SWBA;
274 274
275 ath_dbg(common, CONFIG, 275 ath_dbg(common, CONFIG,
@@ -365,7 +365,7 @@ static void ath9k_htc_send_beacon(struct ath9k_htc_priv *priv,
365 vif = priv->cur_beacon_conf.bslot[slot]; 365 vif = priv->cur_beacon_conf.bslot[slot];
366 avp = (struct ath9k_htc_vif *)vif->drv_priv; 366 avp = (struct ath9k_htc_vif *)vif->drv_priv;
367 367
368 if (unlikely(priv->op_flags & OP_SCANNING)) { 368 if (unlikely(test_bit(OP_SCANNING, &priv->op_flags))) {
369 spin_unlock_bh(&priv->beacon_lock); 369 spin_unlock_bh(&priv->beacon_lock);
370 return; 370 return;
371 } 371 }
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 1c10e2e5c23..07df279c8d4 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -37,17 +37,18 @@ static void ath_detect_bt_priority(struct ath9k_htc_priv *priv)
37 37
38 if (time_after(jiffies, btcoex->bt_priority_time + 38 if (time_after(jiffies, btcoex->bt_priority_time +
39 msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) { 39 msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
40 priv->op_flags &= ~(OP_BT_PRIORITY_DETECTED | OP_BT_SCAN); 40 clear_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags);
41 clear_bit(OP_BT_SCAN, &priv->op_flags);
41 /* Detect if colocated bt started scanning */ 42 /* Detect if colocated bt started scanning */
42 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) { 43 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
43 ath_dbg(ath9k_hw_common(ah), BTCOEX, 44 ath_dbg(ath9k_hw_common(ah), BTCOEX,
44 "BT scan detected\n"); 45 "BT scan detected\n");
45 priv->op_flags |= (OP_BT_SCAN | 46 set_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags);
46 OP_BT_PRIORITY_DETECTED); 47 set_bit(OP_BT_SCAN, &priv->op_flags);
47 } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) { 48 } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
48 ath_dbg(ath9k_hw_common(ah), BTCOEX, 49 ath_dbg(ath9k_hw_common(ah), BTCOEX,
49 "BT priority traffic detected\n"); 50 "BT priority traffic detected\n");
50 priv->op_flags |= OP_BT_PRIORITY_DETECTED; 51 set_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags);
51 } 52 }
52 53
53 btcoex->bt_priority_cnt = 0; 54 btcoex->bt_priority_cnt = 0;
@@ -67,26 +68,23 @@ static void ath_btcoex_period_work(struct work_struct *work)
67 struct ath_btcoex *btcoex = &priv->btcoex; 68 struct ath_btcoex *btcoex = &priv->btcoex;
68 struct ath_common *common = ath9k_hw_common(priv->ah); 69 struct ath_common *common = ath9k_hw_common(priv->ah);
69 u32 timer_period; 70 u32 timer_period;
70 bool is_btscan;
71 int ret; 71 int ret;
72 72
73 ath_detect_bt_priority(priv); 73 ath_detect_bt_priority(priv);
74 74
75 is_btscan = !!(priv->op_flags & OP_BT_SCAN);
76
77 ret = ath9k_htc_update_cap_target(priv, 75 ret = ath9k_htc_update_cap_target(priv,
78 !!(priv->op_flags & OP_BT_PRIORITY_DETECTED)); 76 test_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags));
79 if (ret) { 77 if (ret) {
80 ath_err(common, "Unable to set BTCOEX parameters\n"); 78 ath_err(common, "Unable to set BTCOEX parameters\n");
81 return; 79 return;
82 } 80 }
83 81
84 ath9k_hw_btcoex_bt_stomp(priv->ah, is_btscan ? ATH_BTCOEX_STOMP_ALL : 82 ath9k_hw_btcoex_bt_stomp(priv->ah, test_bit(OP_BT_SCAN, &priv->op_flags) ?
85 btcoex->bt_stomp_type); 83 ATH_BTCOEX_STOMP_ALL : btcoex->bt_stomp_type);
86 84
87 ath9k_hw_btcoex_enable(priv->ah); 85 ath9k_hw_btcoex_enable(priv->ah);
88 timer_period = is_btscan ? btcoex->btscan_no_stomp : 86 timer_period = test_bit(OP_BT_SCAN, &priv->op_flags) ?
89 btcoex->btcoex_no_stomp; 87 btcoex->btscan_no_stomp : btcoex->btcoex_no_stomp;
90 ieee80211_queue_delayed_work(priv->hw, &priv->duty_cycle_work, 88 ieee80211_queue_delayed_work(priv->hw, &priv->duty_cycle_work,
91 msecs_to_jiffies(timer_period)); 89 msecs_to_jiffies(timer_period));
92 ieee80211_queue_delayed_work(priv->hw, &priv->coex_period_work, 90 ieee80211_queue_delayed_work(priv->hw, &priv->coex_period_work,
@@ -104,14 +102,15 @@ static void ath_btcoex_duty_cycle_work(struct work_struct *work)
104 struct ath_hw *ah = priv->ah; 102 struct ath_hw *ah = priv->ah;
105 struct ath_btcoex *btcoex = &priv->btcoex; 103 struct ath_btcoex *btcoex = &priv->btcoex;
106 struct ath_common *common = ath9k_hw_common(ah); 104 struct ath_common *common = ath9k_hw_common(ah);
107 bool is_btscan = priv->op_flags & OP_BT_SCAN;
108 105
109 ath_dbg(common, BTCOEX, "time slice work for bt and wlan\n"); 106 ath_dbg(common, BTCOEX, "time slice work for bt and wlan\n");
110 107
111 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan) 108 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW ||
109 test_bit(OP_BT_SCAN, &priv->op_flags))
112 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE); 110 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
113 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL) 111 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
114 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW); 112 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
113
115 ath9k_hw_btcoex_enable(priv->ah); 114 ath9k_hw_btcoex_enable(priv->ah);
116} 115}
117 116
@@ -141,7 +140,8 @@ static void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv)
141 140
142 btcoex->bt_priority_cnt = 0; 141 btcoex->bt_priority_cnt = 0;
143 btcoex->bt_priority_time = jiffies; 142 btcoex->bt_priority_time = jiffies;
144 priv->op_flags &= ~(OP_BT_PRIORITY_DETECTED | OP_BT_SCAN); 143 clear_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags);
144 clear_bit(OP_BT_SCAN, &priv->op_flags);
145 ieee80211_queue_delayed_work(priv->hw, &priv->coex_period_work, 0); 145 ieee80211_queue_delayed_work(priv->hw, &priv->coex_period_work, 0);
146} 146}
147 147
@@ -310,95 +310,3 @@ void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv)
310 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) 310 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
311 wiphy_rfkill_start_polling(priv->hw->wiphy); 311 wiphy_rfkill_start_polling(priv->hw->wiphy);
312} 312}
313
314void ath9k_htc_radio_enable(struct ieee80211_hw *hw)
315{
316 struct ath9k_htc_priv *priv = hw->priv;
317 struct ath_hw *ah = priv->ah;
318 struct ath_common *common = ath9k_hw_common(ah);
319 int ret;
320 u8 cmd_rsp;
321
322 if (!ah->curchan)
323 ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
324
325 /* Reset the HW */
326 ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
327 if (ret) {
328 ath_err(common,
329 "Unable to reset hardware; reset status %d (freq %u MHz)\n",
330 ret, ah->curchan->channel);
331 }
332
333 ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
334 &priv->curtxpow);
335
336 /* Start RX */
337 WMI_CMD(WMI_START_RECV_CMDID);
338 ath9k_host_rx_init(priv);
339
340 /* Start TX */
341 htc_start(priv->htc);
342 spin_lock_bh(&priv->tx.tx_lock);
343 priv->tx.flags &= ~ATH9K_HTC_OP_TX_QUEUES_STOP;
344 spin_unlock_bh(&priv->tx.tx_lock);
345 ieee80211_wake_queues(hw);
346
347 WMI_CMD(WMI_ENABLE_INTR_CMDID);
348
349 /* Enable LED */
350 ath9k_hw_cfg_output(ah, ah->led_pin,
351 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
352 ath9k_hw_set_gpio(ah, ah->led_pin, 0);
353}
354
355void ath9k_htc_radio_disable(struct ieee80211_hw *hw)
356{
357 struct ath9k_htc_priv *priv = hw->priv;
358 struct ath_hw *ah = priv->ah;
359 struct ath_common *common = ath9k_hw_common(ah);
360 int ret;
361 u8 cmd_rsp;
362
363 ath9k_htc_ps_wakeup(priv);
364
365 /* Disable LED */
366 ath9k_hw_set_gpio(ah, ah->led_pin, 1);
367 ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
368
369 WMI_CMD(WMI_DISABLE_INTR_CMDID);
370
371 /* Stop TX */
372 ieee80211_stop_queues(hw);
373 ath9k_htc_tx_drain(priv);
374 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
375
376 /* Stop RX */
377 WMI_CMD(WMI_STOP_RECV_CMDID);
378
379 /* Clear the WMI event queue */
380 ath9k_wmi_event_drain(priv);
381
382 /*
383 * The MIB counters have to be disabled here,
384 * since the target doesn't do it.
385 */
386 ath9k_hw_disable_mib_counters(ah);
387
388 if (!ah->curchan)
389 ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
390
391 /* Reset the HW */
392 ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
393 if (ret) {
394 ath_err(common,
395 "Unable to reset hardware; reset status %d (freq %u MHz)\n",
396 ret, ah->curchan->channel);
397 }
398
399 /* Disable the PHY */
400 ath9k_hw_phy_disable(ah);
401
402 ath9k_htc_ps_restore(priv);
403 ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
404}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 25213d521bc..a035a380d66 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -611,7 +611,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
611 struct ath_common *common; 611 struct ath_common *common;
612 int i, ret = 0, csz = 0; 612 int i, ret = 0, csz = 0;
613 613
614 priv->op_flags |= OP_INVALID; 614 set_bit(OP_INVALID, &priv->op_flags);
615 615
616 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL); 616 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
617 if (!ah) 617 if (!ah)
@@ -718,7 +718,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
718 718
719 hw->queues = 4; 719 hw->queues = 4;
720 hw->channel_change_time = 5000; 720 hw->channel_change_time = 5000;
721 hw->max_listen_interval = 10; 721 hw->max_listen_interval = 1;
722 722
723 hw->vif_data_size = sizeof(struct ath9k_htc_vif); 723 hw->vif_data_size = sizeof(struct ath9k_htc_vif);
724 hw->sta_data_size = sizeof(struct ath9k_htc_sta); 724 hw->sta_data_size = sizeof(struct ath9k_htc_sta);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index abbd6effd60..374c32ed905 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -75,14 +75,19 @@ unlock:
75 75
76void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv) 76void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv)
77{ 77{
78 bool reset;
79
78 mutex_lock(&priv->htc_pm_lock); 80 mutex_lock(&priv->htc_pm_lock);
79 if (--priv->ps_usecount != 0) 81 if (--priv->ps_usecount != 0)
80 goto unlock; 82 goto unlock;
81 83
82 if (priv->ps_idle) 84 if (priv->ps_idle) {
85 ath9k_hw_setrxabort(priv->ah, true);
86 ath9k_hw_stopdmarecv(priv->ah, &reset);
83 ath9k_hw_setpower(priv->ah, ATH9K_PM_FULL_SLEEP); 87 ath9k_hw_setpower(priv->ah, ATH9K_PM_FULL_SLEEP);
84 else if (priv->ps_enabled) 88 } else if (priv->ps_enabled) {
85 ath9k_hw_setpower(priv->ah, ATH9K_PM_NETWORK_SLEEP); 89 ath9k_hw_setpower(priv->ah, ATH9K_PM_NETWORK_SLEEP);
90 }
86 91
87unlock: 92unlock:
88 mutex_unlock(&priv->htc_pm_lock); 93 mutex_unlock(&priv->htc_pm_lock);
@@ -250,7 +255,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
250 u8 cmd_rsp; 255 u8 cmd_rsp;
251 int ret; 256 int ret;
252 257
253 if (priv->op_flags & OP_INVALID) 258 if (test_bit(OP_INVALID, &priv->op_flags))
254 return -EIO; 259 return -EIO;
255 260
256 fastcc = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL); 261 fastcc = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
@@ -304,7 +309,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
304 309
305 htc_start(priv->htc); 310 htc_start(priv->htc);
306 311
307 if (!(priv->op_flags & OP_SCANNING) && 312 if (!test_bit(OP_SCANNING, &priv->op_flags) &&
308 !(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) 313 !(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
309 ath9k_htc_vif_reconfig(priv); 314 ath9k_htc_vif_reconfig(priv);
310 315
@@ -750,7 +755,7 @@ void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
750 common->ani.shortcal_timer = timestamp; 755 common->ani.shortcal_timer = timestamp;
751 common->ani.checkani_timer = timestamp; 756 common->ani.checkani_timer = timestamp;
752 757
753 priv->op_flags |= OP_ANI_RUNNING; 758 set_bit(OP_ANI_RUNNING, &priv->op_flags);
754 759
755 ieee80211_queue_delayed_work(common->hw, &priv->ani_work, 760 ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
756 msecs_to_jiffies(ATH_ANI_POLLINTERVAL)); 761 msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
@@ -759,7 +764,7 @@ void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
759void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv) 764void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv)
760{ 765{
761 cancel_delayed_work_sync(&priv->ani_work); 766 cancel_delayed_work_sync(&priv->ani_work);
762 priv->op_flags &= ~OP_ANI_RUNNING; 767 clear_bit(OP_ANI_RUNNING, &priv->op_flags);
763} 768}
764 769
765void ath9k_htc_ani_work(struct work_struct *work) 770void ath9k_htc_ani_work(struct work_struct *work)
@@ -944,7 +949,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
944 ath_dbg(common, CONFIG, 949 ath_dbg(common, CONFIG,
945 "Failed to update capability in target\n"); 950 "Failed to update capability in target\n");
946 951
947 priv->op_flags &= ~OP_INVALID; 952 clear_bit(OP_INVALID, &priv->op_flags);
948 htc_start(priv->htc); 953 htc_start(priv->htc);
949 954
950 spin_lock_bh(&priv->tx.tx_lock); 955 spin_lock_bh(&priv->tx.tx_lock);
@@ -973,7 +978,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
973 978
974 mutex_lock(&priv->mutex); 979 mutex_lock(&priv->mutex);
975 980
976 if (priv->op_flags & OP_INVALID) { 981 if (test_bit(OP_INVALID, &priv->op_flags)) {
977 ath_dbg(common, ANY, "Device not present\n"); 982 ath_dbg(common, ANY, "Device not present\n");
978 mutex_unlock(&priv->mutex); 983 mutex_unlock(&priv->mutex);
979 return; 984 return;
@@ -1015,7 +1020,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1015 ath9k_htc_ps_restore(priv); 1020 ath9k_htc_ps_restore(priv);
1016 ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP); 1021 ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
1017 1022
1018 priv->op_flags |= OP_INVALID; 1023 set_bit(OP_INVALID, &priv->op_flags);
1019 1024
1020 ath_dbg(common, CONFIG, "Driver halt\n"); 1025 ath_dbg(common, CONFIG, "Driver halt\n");
1021 mutex_unlock(&priv->mutex); 1026 mutex_unlock(&priv->mutex);
@@ -1105,7 +1110,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1105 ath9k_htc_set_opmode(priv); 1110 ath9k_htc_set_opmode(priv);
1106 1111
1107 if ((priv->ah->opmode == NL80211_IFTYPE_AP) && 1112 if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
1108 !(priv->op_flags & OP_ANI_RUNNING)) { 1113 !test_bit(OP_ANI_RUNNING, &priv->op_flags)) {
1109 ath9k_hw_set_tsfadjust(priv->ah, 1); 1114 ath9k_hw_set_tsfadjust(priv->ah, 1);
1110 ath9k_htc_start_ani(priv); 1115 ath9k_htc_start_ani(priv);
1111 } 1116 }
@@ -1178,24 +1183,20 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1178 struct ath9k_htc_priv *priv = hw->priv; 1183 struct ath9k_htc_priv *priv = hw->priv;
1179 struct ath_common *common = ath9k_hw_common(priv->ah); 1184 struct ath_common *common = ath9k_hw_common(priv->ah);
1180 struct ieee80211_conf *conf = &hw->conf; 1185 struct ieee80211_conf *conf = &hw->conf;
1186 bool chip_reset = false;
1187 int ret = 0;
1181 1188
1182 mutex_lock(&priv->mutex); 1189 mutex_lock(&priv->mutex);
1190 ath9k_htc_ps_wakeup(priv);
1183 1191
1184 if (changed & IEEE80211_CONF_CHANGE_IDLE) { 1192 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1185 bool enable_radio = false;
1186 bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
1187
1188 mutex_lock(&priv->htc_pm_lock); 1193 mutex_lock(&priv->htc_pm_lock);
1189 if (!idle && priv->ps_idle)
1190 enable_radio = true;
1191 priv->ps_idle = idle;
1192 mutex_unlock(&priv->htc_pm_lock);
1193 1194
1194 if (enable_radio) { 1195 priv->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
1195 ath_dbg(common, CONFIG, "not-idle: enabling radio\n"); 1196 if (priv->ps_idle)
1196 ath9k_htc_setpower(priv, ATH9K_PM_AWAKE); 1197 chip_reset = true;
1197 ath9k_htc_radio_enable(hw); 1198
1198 } 1199 mutex_unlock(&priv->htc_pm_lock);
1199 } 1200 }
1200 1201
1201 /* 1202 /*
@@ -1210,7 +1211,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1210 ath9k_htc_remove_monitor_interface(priv); 1211 ath9k_htc_remove_monitor_interface(priv);
1211 } 1212 }
1212 1213
1213 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 1214 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || chip_reset) {
1214 struct ieee80211_channel *curchan = hw->conf.channel; 1215 struct ieee80211_channel *curchan = hw->conf.channel;
1215 int pos = curchan->hw_value; 1216 int pos = curchan->hw_value;
1216 1217
@@ -1223,8 +1224,8 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1223 1224
1224 if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) { 1225 if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
1225 ath_err(common, "Unable to set channel\n"); 1226 ath_err(common, "Unable to set channel\n");
1226 mutex_unlock(&priv->mutex); 1227 ret = -EINVAL;
1227 return -EINVAL; 1228 goto out;
1228 } 1229 }
1229 1230
1230 } 1231 }
@@ -1246,21 +1247,10 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1246 priv->txpowlimit, &priv->curtxpow); 1247 priv->txpowlimit, &priv->curtxpow);
1247 } 1248 }
1248 1249
1249 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1250 mutex_lock(&priv->htc_pm_lock);
1251 if (!priv->ps_idle) {
1252 mutex_unlock(&priv->htc_pm_lock);
1253 goto out;
1254 }
1255 mutex_unlock(&priv->htc_pm_lock);
1256
1257 ath_dbg(common, CONFIG, "idle: disabling radio\n");
1258 ath9k_htc_radio_disable(hw);
1259 }
1260
1261out: 1250out:
1251 ath9k_htc_ps_restore(priv);
1262 mutex_unlock(&priv->mutex); 1252 mutex_unlock(&priv->mutex);
1263 return 0; 1253 return ret;
1264} 1254}
1265 1255
1266#define SUPPORTED_FILTERS \ 1256#define SUPPORTED_FILTERS \
@@ -1285,7 +1275,7 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
1285 changed_flags &= SUPPORTED_FILTERS; 1275 changed_flags &= SUPPORTED_FILTERS;
1286 *total_flags &= SUPPORTED_FILTERS; 1276 *total_flags &= SUPPORTED_FILTERS;
1287 1277
1288 if (priv->op_flags & OP_INVALID) { 1278 if (test_bit(OP_INVALID, &priv->op_flags)) {
1289 ath_dbg(ath9k_hw_common(priv->ah), ANY, 1279 ath_dbg(ath9k_hw_common(priv->ah), ANY,
1290 "Unable to configure filter on invalid state\n"); 1280 "Unable to configure filter on invalid state\n");
1291 mutex_unlock(&priv->mutex); 1281 mutex_unlock(&priv->mutex);
@@ -1516,7 +1506,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1516 ath_dbg(common, CONFIG, "Beacon enabled for BSS: %pM\n", 1506 ath_dbg(common, CONFIG, "Beacon enabled for BSS: %pM\n",
1517 bss_conf->bssid); 1507 bss_conf->bssid);
1518 ath9k_htc_set_tsfadjust(priv, vif); 1508 ath9k_htc_set_tsfadjust(priv, vif);
1519 priv->op_flags |= OP_ENABLE_BEACON; 1509 set_bit(OP_ENABLE_BEACON, &priv->op_flags);
1520 ath9k_htc_beacon_config(priv, vif); 1510 ath9k_htc_beacon_config(priv, vif);
1521 } 1511 }
1522 1512
@@ -1529,7 +1519,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1529 ath_dbg(common, CONFIG, 1519 ath_dbg(common, CONFIG,
1530 "Beacon disabled for BSS: %pM\n", 1520 "Beacon disabled for BSS: %pM\n",
1531 bss_conf->bssid); 1521 bss_conf->bssid);
1532 priv->op_flags &= ~OP_ENABLE_BEACON; 1522 clear_bit(OP_ENABLE_BEACON, &priv->op_flags);
1533 ath9k_htc_beacon_config(priv, vif); 1523 ath9k_htc_beacon_config(priv, vif);
1534 } 1524 }
1535 } 1525 }
@@ -1542,7 +1532,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1542 (priv->nvifs == 1) && 1532 (priv->nvifs == 1) &&
1543 (priv->num_ap_vif == 1) && 1533 (priv->num_ap_vif == 1) &&
1544 (vif->type == NL80211_IFTYPE_AP)) { 1534 (vif->type == NL80211_IFTYPE_AP)) {
1545 priv->op_flags |= OP_TSF_RESET; 1535 set_bit(OP_TSF_RESET, &priv->op_flags);
1546 } 1536 }
1547 ath_dbg(common, CONFIG, 1537 ath_dbg(common, CONFIG,
1548 "Beacon interval changed for BSS: %pM\n", 1538 "Beacon interval changed for BSS: %pM\n",
@@ -1654,7 +1644,7 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
1654 1644
1655 mutex_lock(&priv->mutex); 1645 mutex_lock(&priv->mutex);
1656 spin_lock_bh(&priv->beacon_lock); 1646 spin_lock_bh(&priv->beacon_lock);
1657 priv->op_flags |= OP_SCANNING; 1647 set_bit(OP_SCANNING, &priv->op_flags);
1658 spin_unlock_bh(&priv->beacon_lock); 1648 spin_unlock_bh(&priv->beacon_lock);
1659 cancel_work_sync(&priv->ps_work); 1649 cancel_work_sync(&priv->ps_work);
1660 ath9k_htc_stop_ani(priv); 1650 ath9k_htc_stop_ani(priv);
@@ -1667,7 +1657,7 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
1667 1657
1668 mutex_lock(&priv->mutex); 1658 mutex_lock(&priv->mutex);
1669 spin_lock_bh(&priv->beacon_lock); 1659 spin_lock_bh(&priv->beacon_lock);
1670 priv->op_flags &= ~OP_SCANNING; 1660 clear_bit(OP_SCANNING, &priv->op_flags);
1671 spin_unlock_bh(&priv->beacon_lock); 1661 spin_unlock_bh(&priv->beacon_lock);
1672 ath9k_htc_ps_wakeup(priv); 1662 ath9k_htc_ps_wakeup(priv);
1673 ath9k_htc_vif_reconfig(priv); 1663 ath9k_htc_vif_reconfig(priv);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 3e40a646151..47e61d0da33 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -916,7 +916,7 @@ void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
916{ 916{
917 ath9k_hw_rxena(priv->ah); 917 ath9k_hw_rxena(priv->ah);
918 ath9k_htc_opmode_init(priv); 918 ath9k_htc_opmode_init(priv);
919 ath9k_hw_startpcureceive(priv->ah, (priv->op_flags & OP_SCANNING)); 919 ath9k_hw_startpcureceive(priv->ah, test_bit(OP_SCANNING, &priv->op_flags));
920 priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER; 920 priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
921} 921}
922 922
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 995ca8e1302..ebfb2a3c645 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -390,14 +390,6 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
390 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); 390 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
391} 391}
392 392
393static void ath9k_hw_aspm_init(struct ath_hw *ah)
394{
395 struct ath_common *common = ath9k_hw_common(ah);
396
397 if (common->bus_ops->aspm_init)
398 common->bus_ops->aspm_init(common);
399}
400
401/* This should work for all families including legacy */ 393/* This should work for all families including legacy */
402static bool ath9k_hw_chip_test(struct ath_hw *ah) 394static bool ath9k_hw_chip_test(struct ath_hw *ah)
403{ 395{
@@ -693,9 +685,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
693 if (r) 685 if (r)
694 return r; 686 return r;
695 687
696 if (ah->is_pciexpress)
697 ath9k_hw_aspm_init(ah);
698
699 r = ath9k_hw_init_macaddr(ah); 688 r = ath9k_hw_init_macaddr(ah);
700 if (r) { 689 if (r) {
701 ath_err(common, "Failed to initialize MAC address\n"); 690 ath_err(common, "Failed to initialize MAC address\n");
@@ -1371,6 +1360,9 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1371 } 1360 }
1372 } 1361 }
1373 1362
1363 if (ath9k_hw_mci_is_enabled(ah))
1364 ar9003_mci_check_gpm_offset(ah);
1365
1374 REG_WRITE(ah, AR_RTC_RC, rst_flags); 1366 REG_WRITE(ah, AR_RTC_RC, rst_flags);
1375 1367
1376 REGWRITE_BUFFER_FLUSH(ah); 1368 REGWRITE_BUFFER_FLUSH(ah);
@@ -1455,9 +1447,6 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
1455 break; 1447 break;
1456 } 1448 }
1457 1449
1458 if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
1459 REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
1460
1461 return ret; 1450 return ret;
1462} 1451}
1463 1452
@@ -1733,8 +1722,8 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
1733 ath9k_hw_loadnf(ah, ah->curchan); 1722 ath9k_hw_loadnf(ah, ah->curchan);
1734 ath9k_hw_start_nfcal(ah, true); 1723 ath9k_hw_start_nfcal(ah, true);
1735 1724
1736 if ((ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && ar9003_mci_is_ready(ah)) 1725 if (ath9k_hw_mci_is_enabled(ah))
1737 ar9003_mci_2g5g_switch(ah, true); 1726 ar9003_mci_2g5g_switch(ah, false);
1738 1727
1739 if (AR_SREV_9271(ah)) 1728 if (AR_SREV_9271(ah))
1740 ar9002_hw_load_ani_reg(ah, chan); 1729 ar9002_hw_load_ani_reg(ah, chan);
@@ -1754,10 +1743,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1754 u64 tsf = 0; 1743 u64 tsf = 0;
1755 int i, r; 1744 int i, r;
1756 bool start_mci_reset = false; 1745 bool start_mci_reset = false;
1757 bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
1758 bool save_fullsleep = ah->chip_fullsleep; 1746 bool save_fullsleep = ah->chip_fullsleep;
1759 1747
1760 if (mci) { 1748 if (ath9k_hw_mci_is_enabled(ah)) {
1761 start_mci_reset = ar9003_mci_start_reset(ah, chan); 1749 start_mci_reset = ar9003_mci_start_reset(ah, chan);
1762 if (start_mci_reset) 1750 if (start_mci_reset)
1763 return 0; 1751 return 0;
@@ -1786,7 +1774,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1786 return r; 1774 return r;
1787 } 1775 }
1788 1776
1789 if (mci) 1777 if (ath9k_hw_mci_is_enabled(ah))
1790 ar9003_mci_stop_bt(ah, save_fullsleep); 1778 ar9003_mci_stop_bt(ah, save_fullsleep);
1791 1779
1792 saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA); 1780 saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA);
@@ -1844,7 +1832,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1844 if (r) 1832 if (r)
1845 return r; 1833 return r;
1846 1834
1847 if (mci) 1835 if (ath9k_hw_mci_is_enabled(ah))
1848 ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep); 1836 ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep);
1849 1837
1850 /* 1838 /*
@@ -1939,7 +1927,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1939 1927
1940 ath9k_hw_set_dma(ah); 1928 ath9k_hw_set_dma(ah);
1941 1929
1942 REG_WRITE(ah, AR_OBS, 8); 1930 if (!ath9k_hw_mci_is_enabled(ah))
1931 REG_WRITE(ah, AR_OBS, 8);
1943 1932
1944 if (ah->config.rx_intr_mitigation) { 1933 if (ah->config.rx_intr_mitigation) {
1945 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500); 1934 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
@@ -1963,7 +1952,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1963 ath9k_hw_loadnf(ah, chan); 1952 ath9k_hw_loadnf(ah, chan);
1964 ath9k_hw_start_nfcal(ah, true); 1953 ath9k_hw_start_nfcal(ah, true);
1965 1954
1966 if (mci && ar9003_mci_end_reset(ah, chan, caldata)) 1955 if (ath9k_hw_mci_is_enabled(ah) && ar9003_mci_end_reset(ah, chan, caldata))
1967 return -EIO; 1956 return -EIO;
1968 1957
1969 ENABLE_REGWRITE_BUFFER(ah); 1958 ENABLE_REGWRITE_BUFFER(ah);
@@ -2008,7 +1997,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2008 if (ath9k_hw_btcoex_is_enabled(ah)) 1997 if (ath9k_hw_btcoex_is_enabled(ah))
2009 ath9k_hw_btcoex_enable(ah); 1998 ath9k_hw_btcoex_enable(ah);
2010 1999
2011 if (mci) 2000 if (ath9k_hw_mci_is_enabled(ah))
2012 ar9003_mci_check_bt(ah); 2001 ar9003_mci_check_bt(ah);
2013 2002
2014 if (AR_SREV_9300_20_OR_LATER(ah)) { 2003 if (AR_SREV_9300_20_OR_LATER(ah)) {
@@ -2031,39 +2020,35 @@ EXPORT_SYMBOL(ath9k_hw_reset);
2031 * Notify Power Mgt is disabled in self-generated frames. 2020 * Notify Power Mgt is disabled in self-generated frames.
2032 * If requested, force chip to sleep. 2021 * If requested, force chip to sleep.
2033 */ 2022 */
2034static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip) 2023static void ath9k_set_power_sleep(struct ath_hw *ah)
2035{ 2024{
2036 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 2025 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
2037 if (setChip) {
2038 if (AR_SREV_9462(ah)) {
2039 REG_WRITE(ah, AR_TIMER_MODE,
2040 REG_READ(ah, AR_TIMER_MODE) & 0xFFFFFF00);
2041 REG_WRITE(ah, AR_NDP2_TIMER_MODE, REG_READ(ah,
2042 AR_NDP2_TIMER_MODE) & 0xFFFFFF00);
2043 REG_WRITE(ah, AR_SLP32_INC,
2044 REG_READ(ah, AR_SLP32_INC) & 0xFFF00000);
2045 /* xxx Required for WLAN only case ? */
2046 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0);
2047 udelay(100);
2048 }
2049 2026
2050 /* 2027 if (AR_SREV_9462(ah)) {
2051 * Clear the RTC force wake bit to allow the 2028 REG_CLR_BIT(ah, AR_TIMER_MODE, 0xff);
2052 * mac to go to sleep. 2029 REG_CLR_BIT(ah, AR_NDP2_TIMER_MODE, 0xff);
2053 */ 2030 REG_CLR_BIT(ah, AR_SLP32_INC, 0xfffff);
2054 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); 2031 /* xxx Required for WLAN only case ? */
2032 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0);
2033 udelay(100);
2034 }
2055 2035
2056 if (AR_SREV_9462(ah)) 2036 /*
2057 udelay(100); 2037 * Clear the RTC force wake bit to allow the
2038 * mac to go to sleep.
2039 */
2040 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN);
2058 2041
2059 if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah)) 2042 if (ath9k_hw_mci_is_enabled(ah))
2060 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF); 2043 udelay(100);
2061 2044
2062 /* Shutdown chip. Active low */ 2045 if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
2063 if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah)) { 2046 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
2064 REG_CLR_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN); 2047
2065 udelay(2); 2048 /* Shutdown chip. Active low */
2066 } 2049 if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah)) {
2050 REG_CLR_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN);
2051 udelay(2);
2067 } 2052 }
2068 2053
2069 /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */ 2054 /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */
@@ -2076,44 +2061,38 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
2076 * frames. If request, set power mode of chip to 2061 * frames. If request, set power mode of chip to
2077 * auto/normal. Duration in units of 128us (1/8 TU). 2062 * auto/normal. Duration in units of 128us (1/8 TU).
2078 */ 2063 */
2079static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip) 2064static void ath9k_set_power_network_sleep(struct ath_hw *ah)
2080{ 2065{
2081 u32 val; 2066 struct ath9k_hw_capabilities *pCap = &ah->caps;
2082 2067
2083 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 2068 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
2084 if (setChip) {
2085 struct ath9k_hw_capabilities *pCap = &ah->caps;
2086 2069
2087 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 2070 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
2088 /* Set WakeOnInterrupt bit; clear ForceWake bit */ 2071 /* Set WakeOnInterrupt bit; clear ForceWake bit */
2089 REG_WRITE(ah, AR_RTC_FORCE_WAKE, 2072 REG_WRITE(ah, AR_RTC_FORCE_WAKE,
2090 AR_RTC_FORCE_WAKE_ON_INT); 2073 AR_RTC_FORCE_WAKE_ON_INT);
2091 } else { 2074 } else {
2092 2075
2093 /* When chip goes into network sleep, it could be waken 2076 /* When chip goes into network sleep, it could be waken
2094 * up by MCI_INT interrupt caused by BT's HW messages 2077 * up by MCI_INT interrupt caused by BT's HW messages
2095 * (LNA_xxx, CONT_xxx) which chould be in a very fast 2078 * (LNA_xxx, CONT_xxx) which chould be in a very fast
2096 * rate (~100us). This will cause chip to leave and 2079 * rate (~100us). This will cause chip to leave and
2097 * re-enter network sleep mode frequently, which in 2080 * re-enter network sleep mode frequently, which in
2098 * consequence will have WLAN MCI HW to generate lots of 2081 * consequence will have WLAN MCI HW to generate lots of
2099 * SYS_WAKING and SYS_SLEEPING messages which will make 2082 * SYS_WAKING and SYS_SLEEPING messages which will make
2100 * BT CPU to busy to process. 2083 * BT CPU to busy to process.
2101 */ 2084 */
2102 if (AR_SREV_9462(ah)) { 2085 if (ath9k_hw_mci_is_enabled(ah))
2103 val = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_EN) & 2086 REG_CLR_BIT(ah, AR_MCI_INTERRUPT_RX_MSG_EN,
2104 ~AR_MCI_INTERRUPT_RX_HW_MSG_MASK; 2087 AR_MCI_INTERRUPT_RX_HW_MSG_MASK);
2105 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, val); 2088 /*
2106 } 2089 * Clear the RTC force wake bit to allow the
2107 /* 2090 * mac to go to sleep.
2108 * Clear the RTC force wake bit to allow the 2091 */
2109 * mac to go to sleep. 2092 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN);
2110 */ 2093
2111 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, 2094 if (ath9k_hw_mci_is_enabled(ah))
2112 AR_RTC_FORCE_WAKE_EN); 2095 udelay(30);
2113
2114 if (AR_SREV_9462(ah))
2115 udelay(30);
2116 }
2117 } 2096 }
2118 2097
2119 /* Clear Bit 14 of AR_WA after putting chip into Net Sleep mode. */ 2098 /* Clear Bit 14 of AR_WA after putting chip into Net Sleep mode. */
@@ -2121,7 +2100,7 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
2121 REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE); 2100 REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
2122} 2101}
2123 2102
2124static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip) 2103static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
2125{ 2104{
2126 u32 val; 2105 u32 val;
2127 int i; 2106 int i;
@@ -2132,37 +2111,38 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
2132 udelay(10); 2111 udelay(10);
2133 } 2112 }
2134 2113
2135 if (setChip) { 2114 if ((REG_READ(ah, AR_RTC_STATUS) &
2136 if ((REG_READ(ah, AR_RTC_STATUS) & 2115 AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) {
2137 AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) { 2116 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
2138 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { 2117 return false;
2139 return false;
2140 }
2141 if (!AR_SREV_9300_20_OR_LATER(ah))
2142 ath9k_hw_init_pll(ah, NULL);
2143 } 2118 }
2144 if (AR_SREV_9100(ah)) 2119 if (!AR_SREV_9300_20_OR_LATER(ah))
2145 REG_SET_BIT(ah, AR_RTC_RESET, 2120 ath9k_hw_init_pll(ah, NULL);
2146 AR_RTC_RESET_EN); 2121 }
2122 if (AR_SREV_9100(ah))
2123 REG_SET_BIT(ah, AR_RTC_RESET,
2124 AR_RTC_RESET_EN);
2147 2125
2126 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
2127 AR_RTC_FORCE_WAKE_EN);
2128 udelay(50);
2129
2130 if (ath9k_hw_mci_is_enabled(ah))
2131 ar9003_mci_set_power_awake(ah);
2132
2133 for (i = POWER_UP_TIME / 50; i > 0; i--) {
2134 val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
2135 if (val == AR_RTC_STATUS_ON)
2136 break;
2137 udelay(50);
2148 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, 2138 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
2149 AR_RTC_FORCE_WAKE_EN); 2139 AR_RTC_FORCE_WAKE_EN);
2150 udelay(50); 2140 }
2151 2141 if (i == 0) {
2152 for (i = POWER_UP_TIME / 50; i > 0; i--) { 2142 ath_err(ath9k_hw_common(ah),
2153 val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M; 2143 "Failed to wakeup in %uus\n",
2154 if (val == AR_RTC_STATUS_ON) 2144 POWER_UP_TIME / 20);
2155 break; 2145 return false;
2156 udelay(50);
2157 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
2158 AR_RTC_FORCE_WAKE_EN);
2159 }
2160 if (i == 0) {
2161 ath_err(ath9k_hw_common(ah),
2162 "Failed to wakeup in %uus\n",
2163 POWER_UP_TIME / 20);
2164 return false;
2165 }
2166 } 2146 }
2167 2147
2168 REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 2148 REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
@@ -2173,7 +2153,7 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
2173bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode) 2153bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
2174{ 2154{
2175 struct ath_common *common = ath9k_hw_common(ah); 2155 struct ath_common *common = ath9k_hw_common(ah);
2176 int status = true, setChip = true; 2156 int status = true;
2177 static const char *modes[] = { 2157 static const char *modes[] = {
2178 "AWAKE", 2158 "AWAKE",
2179 "FULL-SLEEP", 2159 "FULL-SLEEP",
@@ -2189,25 +2169,17 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
2189 2169
2190 switch (mode) { 2170 switch (mode) {
2191 case ATH9K_PM_AWAKE: 2171 case ATH9K_PM_AWAKE:
2192 status = ath9k_hw_set_power_awake(ah, setChip); 2172 status = ath9k_hw_set_power_awake(ah);
2193
2194 if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
2195 REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
2196
2197 break; 2173 break;
2198 case ATH9K_PM_FULL_SLEEP: 2174 case ATH9K_PM_FULL_SLEEP:
2199 if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) 2175 if (ath9k_hw_mci_is_enabled(ah))
2200 ar9003_mci_set_full_sleep(ah); 2176 ar9003_mci_set_full_sleep(ah);
2201 2177
2202 ath9k_set_power_sleep(ah, setChip); 2178 ath9k_set_power_sleep(ah);
2203 ah->chip_fullsleep = true; 2179 ah->chip_fullsleep = true;
2204 break; 2180 break;
2205 case ATH9K_PM_NETWORK_SLEEP: 2181 case ATH9K_PM_NETWORK_SLEEP:
2206 2182 ath9k_set_power_network_sleep(ah);
2207 if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
2208 REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
2209
2210 ath9k_set_power_network_sleep(ah, setChip);
2211 break; 2183 break;
2212 default: 2184 default:
2213 ath_err(common, "Unknown power mode %u\n", mode); 2185 ath_err(common, "Unknown power mode %u\n", mode);
@@ -2777,6 +2749,9 @@ EXPORT_SYMBOL(ath9k_hw_setrxfilter);
2777 2749
2778bool ath9k_hw_phy_disable(struct ath_hw *ah) 2750bool ath9k_hw_phy_disable(struct ath_hw *ah)
2779{ 2751{
2752 if (ath9k_hw_mci_is_enabled(ah))
2753 ar9003_mci_bt_gain_ctrl(ah);
2754
2780 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM)) 2755 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
2781 return false; 2756 return false;
2782 2757
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index b620c557c2a..94096607cbd 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -824,7 +824,6 @@ struct ath_hw {
824 struct ar5416IniArray ini_japan2484; 824 struct ar5416IniArray ini_japan2484;
825 struct ar5416IniArray iniModes_9271_ANI_reg; 825 struct ar5416IniArray iniModes_9271_ANI_reg;
826 struct ar5416IniArray ini_radio_post_sys2ant; 826 struct ar5416IniArray ini_radio_post_sys2ant;
827 struct ar5416IniArray ini_BTCOEX_MAX_TXPWR;
828 827
829 struct ar5416IniArray iniMac[ATH_INI_NUM_SPLIT]; 828 struct ar5416IniArray iniMac[ATH_INI_NUM_SPLIT];
830 struct ar5416IniArray iniBB[ATH_INI_NUM_SPLIT]; 829 struct ar5416IniArray iniBB[ATH_INI_NUM_SPLIT];
@@ -1020,16 +1019,8 @@ void ar9002_hw_attach_ops(struct ath_hw *ah);
1020void ar9003_hw_attach_ops(struct ath_hw *ah); 1019void ar9003_hw_attach_ops(struct ath_hw *ah);
1021 1020
1022void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan); 1021void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan);
1023/* 1022
1024 * ANI work can be shared between all families but a next
1025 * generation implementation of ANI will be used only for AR9003 only
1026 * for now as the other families still need to be tested with the same
1027 * next generation ANI. Feel free to start testing it though for the
1028 * older families (AR5008, AR9001, AR9002) by using modparam_force_new_ani.
1029 */
1030extern int modparam_force_new_ani;
1031void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning); 1023void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning);
1032void ath9k_hw_proc_mib_event(struct ath_hw *ah);
1033void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan); 1024void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan);
1034 1025
1035#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 1026#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
@@ -1037,6 +1028,12 @@ static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah)
1037{ 1028{
1038 return ah->btcoex_hw.enabled; 1029 return ah->btcoex_hw.enabled;
1039} 1030}
1031static inline bool ath9k_hw_mci_is_enabled(struct ath_hw *ah)
1032{
1033 return ah->common.btcoex_enabled &&
1034 (ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
1035
1036}
1040void ath9k_hw_btcoex_enable(struct ath_hw *ah); 1037void ath9k_hw_btcoex_enable(struct ath_hw *ah);
1041static inline enum ath_btcoex_scheme 1038static inline enum ath_btcoex_scheme
1042ath9k_hw_get_btcoex_scheme(struct ath_hw *ah) 1039ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
@@ -1048,6 +1045,10 @@ static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah)
1048{ 1045{
1049 return false; 1046 return false;
1050} 1047}
1048static inline bool ath9k_hw_mci_is_enabled(struct ath_hw *ah)
1049{
1050 return false;
1051}
1051static inline void ath9k_hw_btcoex_enable(struct ath_hw *ah) 1052static inline void ath9k_hw_btcoex_enable(struct ath_hw *ah)
1052{ 1053{
1053} 1054}
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index dee9e092449..9dfce1a69c7 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -489,6 +489,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
489 489
490 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc); 490 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
491 491
492 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
492 sc->config.txpowlimit = ATH_TXPOWER_MAX; 493 sc->config.txpowlimit = ATH_TXPOWER_MAX;
493 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN); 494 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
494 sc->beacon.slottime = ATH9K_SLOT_TIME_9; 495 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
@@ -560,6 +561,12 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
560 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet, 561 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
561 (unsigned long)sc); 562 (unsigned long)sc);
562 563
564 INIT_WORK(&sc->hw_reset_work, ath_reset_work);
565 INIT_WORK(&sc->hw_check_work, ath_hw_check);
566 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
567 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
568 setup_timer(&sc->rx_poll_timer, ath_rx_poll, (unsigned long)sc);
569
563 /* 570 /*
564 * Cache line size is used to size and align various 571 * Cache line size is used to size and align various
565 * structures used to communicate with the hardware. 572 * structures used to communicate with the hardware.
@@ -590,6 +597,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
590 ath9k_cmn_init_crypto(sc->sc_ah); 597 ath9k_cmn_init_crypto(sc->sc_ah);
591 ath9k_init_misc(sc); 598 ath9k_init_misc(sc);
592 599
600 if (common->bus_ops->aspm_init)
601 common->bus_ops->aspm_init(common);
602
593 return 0; 603 return 0;
594 604
595err_btcoex: 605err_btcoex:
@@ -782,11 +792,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
782 ARRAY_SIZE(ath9k_tpt_blink)); 792 ARRAY_SIZE(ath9k_tpt_blink));
783#endif 793#endif
784 794
785 INIT_WORK(&sc->hw_reset_work, ath_reset_work);
786 INIT_WORK(&sc->hw_check_work, ath_hw_check);
787 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
788 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
789
790 /* Register with mac80211 */ 795 /* Register with mac80211 */
791 error = ieee80211_register_hw(hw); 796 error = ieee80211_register_hw(hw);
792 if (error) 797 if (error)
@@ -805,9 +810,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
805 goto error_world; 810 goto error_world;
806 } 811 }
807 812
808 setup_timer(&sc->rx_poll_timer, ath_rx_poll, (unsigned long)sc);
809 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
810
811 ath_init_leds(sc); 813 ath_init_leds(sc);
812 ath_start_rfkill_poll(sc); 814 ath_start_rfkill_poll(sc);
813 815
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
new file mode 100644
index 00000000000..91650fe5046
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -0,0 +1,510 @@
1/*
2 * Copyright (c) 2012 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "ath9k.h"
18
19/*
20 * TX polling - checks if the TX engine is stuck somewhere
21 * and issues a chip reset if so.
22 */
23void ath_tx_complete_poll_work(struct work_struct *work)
24{
25 struct ath_softc *sc = container_of(work, struct ath_softc,
26 tx_complete_work.work);
27 struct ath_txq *txq;
28 int i;
29 bool needreset = false;
30#ifdef CONFIG_ATH9K_DEBUGFS
31 sc->tx_complete_poll_work_seen++;
32#endif
33
34 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
35 if (ATH_TXQ_SETUP(sc, i)) {
36 txq = &sc->tx.txq[i];
37 ath_txq_lock(sc, txq);
38 if (txq->axq_depth) {
39 if (txq->axq_tx_inprogress) {
40 needreset = true;
41 ath_txq_unlock(sc, txq);
42 break;
43 } else {
44 txq->axq_tx_inprogress = true;
45 }
46 }
47 ath_txq_unlock_complete(sc, txq);
48 }
49
50 if (needreset) {
51 ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
52 "tx hung, resetting the chip\n");
53 RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
54 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
55 return;
56 }
57
58 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
59 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
60}
61
62/*
63 * Checks if the BB/MAC is hung.
64 */
65void ath_hw_check(struct work_struct *work)
66{
67 struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
68 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
69 unsigned long flags;
70 int busy;
71 u8 is_alive, nbeacon = 1;
72
73 ath9k_ps_wakeup(sc);
74 is_alive = ath9k_hw_check_alive(sc->sc_ah);
75
76 if (is_alive && !AR_SREV_9300(sc->sc_ah))
77 goto out;
78 else if (!is_alive && AR_SREV_9300(sc->sc_ah)) {
79 ath_dbg(common, RESET,
80 "DCU stuck is detected. Schedule chip reset\n");
81 RESET_STAT_INC(sc, RESET_TYPE_MAC_HANG);
82 goto sched_reset;
83 }
84
85 spin_lock_irqsave(&common->cc_lock, flags);
86 busy = ath_update_survey_stats(sc);
87 spin_unlock_irqrestore(&common->cc_lock, flags);
88
89 ath_dbg(common, RESET, "Possible baseband hang, busy=%d (try %d)\n",
90 busy, sc->hw_busy_count + 1);
91 if (busy >= 99) {
92 if (++sc->hw_busy_count >= 3) {
93 RESET_STAT_INC(sc, RESET_TYPE_BB_HANG);
94 goto sched_reset;
95 }
96 } else if (busy >= 0) {
97 sc->hw_busy_count = 0;
98 nbeacon = 3;
99 }
100
101 ath_start_rx_poll(sc, nbeacon);
102 goto out;
103
104sched_reset:
105 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
106out:
107 ath9k_ps_restore(sc);
108}
109
110/*
111 * PLL-WAR for AR9485/AR9340
112 */
113static bool ath_hw_pll_rx_hang_check(struct ath_softc *sc, u32 pll_sqsum)
114{
115 static int count;
116 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
117
118 if (pll_sqsum >= 0x40000) {
119 count++;
120 if (count == 3) {
121 ath_dbg(common, RESET, "PLL WAR, resetting the chip\n");
122 RESET_STAT_INC(sc, RESET_TYPE_PLL_HANG);
123 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
124 count = 0;
125 return true;
126 }
127 } else {
128 count = 0;
129 }
130
131 return false;
132}
133
134void ath_hw_pll_work(struct work_struct *work)
135{
136 u32 pll_sqsum;
137 struct ath_softc *sc = container_of(work, struct ath_softc,
138 hw_pll_work.work);
139 /*
140 * ensure that the PLL WAR is executed only
141 * after the STA is associated (or) if the
142 * beaconing had started in interfaces that
143 * uses beacons.
144 */
145 if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
146 return;
147
148 ath9k_ps_wakeup(sc);
149 pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah);
150 ath9k_ps_restore(sc);
151 if (ath_hw_pll_rx_hang_check(sc, pll_sqsum))
152 return;
153
154 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
155 msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
156}
157
158/*
159 * RX Polling - monitors baseband hangs.
160 */
161void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon)
162{
163 if (!AR_SREV_9300(sc->sc_ah))
164 return;
165
166 if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
167 return;
168
169 mod_timer(&sc->rx_poll_timer, jiffies + msecs_to_jiffies
170 (nbeacon * sc->cur_beacon_conf.beacon_interval));
171}
172
173void ath_rx_poll(unsigned long data)
174{
175 struct ath_softc *sc = (struct ath_softc *)data;
176
177 ieee80211_queue_work(sc->hw, &sc->hw_check_work);
178}
179
180/*
181 * PA Pre-distortion.
182 */
183static void ath_paprd_activate(struct ath_softc *sc)
184{
185 struct ath_hw *ah = sc->sc_ah;
186 struct ath9k_hw_cal_data *caldata = ah->caldata;
187 int chain;
188
189 if (!caldata || !caldata->paprd_done)
190 return;
191
192 ath9k_ps_wakeup(sc);
193 ar9003_paprd_enable(ah, false);
194 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
195 if (!(ah->txchainmask & BIT(chain)))
196 continue;
197
198 ar9003_paprd_populate_single_table(ah, caldata, chain);
199 }
200
201 ar9003_paprd_enable(ah, true);
202 ath9k_ps_restore(sc);
203}
204
205static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int chain)
206{
207 struct ieee80211_hw *hw = sc->hw;
208 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
209 struct ath_hw *ah = sc->sc_ah;
210 struct ath_common *common = ath9k_hw_common(ah);
211 struct ath_tx_control txctl;
212 int time_left;
213
214 memset(&txctl, 0, sizeof(txctl));
215 txctl.txq = sc->tx.txq_map[WME_AC_BE];
216
217 memset(tx_info, 0, sizeof(*tx_info));
218 tx_info->band = hw->conf.channel->band;
219 tx_info->flags |= IEEE80211_TX_CTL_NO_ACK;
220 tx_info->control.rates[0].idx = 0;
221 tx_info->control.rates[0].count = 1;
222 tx_info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
223 tx_info->control.rates[1].idx = -1;
224
225 init_completion(&sc->paprd_complete);
226 txctl.paprd = BIT(chain);
227
228 if (ath_tx_start(hw, skb, &txctl) != 0) {
229 ath_dbg(common, CALIBRATE, "PAPRD TX failed\n");
230 dev_kfree_skb_any(skb);
231 return false;
232 }
233
234 time_left = wait_for_completion_timeout(&sc->paprd_complete,
235 msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
236
237 if (!time_left)
238 ath_dbg(common, CALIBRATE,
239 "Timeout waiting for paprd training on TX chain %d\n",
240 chain);
241
242 return !!time_left;
243}
244
245void ath_paprd_calibrate(struct work_struct *work)
246{
247 struct ath_softc *sc = container_of(work, struct ath_softc, paprd_work);
248 struct ieee80211_hw *hw = sc->hw;
249 struct ath_hw *ah = sc->sc_ah;
250 struct ieee80211_hdr *hdr;
251 struct sk_buff *skb = NULL;
252 struct ath9k_hw_cal_data *caldata = ah->caldata;
253 struct ath_common *common = ath9k_hw_common(ah);
254 int ftype;
255 int chain_ok = 0;
256 int chain;
257 int len = 1800;
258
259 if (!caldata)
260 return;
261
262 ath9k_ps_wakeup(sc);
263
264 if (ar9003_paprd_init_table(ah) < 0)
265 goto fail_paprd;
266
267 skb = alloc_skb(len, GFP_KERNEL);
268 if (!skb)
269 goto fail_paprd;
270
271 skb_put(skb, len);
272 memset(skb->data, 0, len);
273 hdr = (struct ieee80211_hdr *)skb->data;
274 ftype = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC;
275 hdr->frame_control = cpu_to_le16(ftype);
276 hdr->duration_id = cpu_to_le16(10);
277 memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
278 memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
279 memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
280
281 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
282 if (!(ah->txchainmask & BIT(chain)))
283 continue;
284
285 chain_ok = 0;
286
287 ath_dbg(common, CALIBRATE,
288 "Sending PAPRD frame for thermal measurement on chain %d\n",
289 chain);
290 if (!ath_paprd_send_frame(sc, skb, chain))
291 goto fail_paprd;
292
293 ar9003_paprd_setup_gain_table(ah, chain);
294
295 ath_dbg(common, CALIBRATE,
296 "Sending PAPRD training frame on chain %d\n", chain);
297 if (!ath_paprd_send_frame(sc, skb, chain))
298 goto fail_paprd;
299
300 if (!ar9003_paprd_is_done(ah)) {
301 ath_dbg(common, CALIBRATE,
302 "PAPRD not yet done on chain %d\n", chain);
303 break;
304 }
305
306 if (ar9003_paprd_create_curve(ah, caldata, chain)) {
307 ath_dbg(common, CALIBRATE,
308 "PAPRD create curve failed on chain %d\n",
309 chain);
310 break;
311 }
312
313 chain_ok = 1;
314 }
315 kfree_skb(skb);
316
317 if (chain_ok) {
318 caldata->paprd_done = true;
319 ath_paprd_activate(sc);
320 }
321
322fail_paprd:
323 ath9k_ps_restore(sc);
324}
325
326/*
327 * ANI performs periodic noise floor calibration
328 * that is used to adjust and optimize the chip performance. This
329 * takes environmental changes (location, temperature) into account.
330 * When the task is complete, it reschedules itself depending on the
331 * appropriate interval that was calculated.
332 */
333void ath_ani_calibrate(unsigned long data)
334{
335 struct ath_softc *sc = (struct ath_softc *)data;
336 struct ath_hw *ah = sc->sc_ah;
337 struct ath_common *common = ath9k_hw_common(ah);
338 bool longcal = false;
339 bool shortcal = false;
340 bool aniflag = false;
341 unsigned int timestamp = jiffies_to_msecs(jiffies);
342 u32 cal_interval, short_cal_interval, long_cal_interval;
343 unsigned long flags;
344
345 if (ah->caldata && ah->caldata->nfcal_interference)
346 long_cal_interval = ATH_LONG_CALINTERVAL_INT;
347 else
348 long_cal_interval = ATH_LONG_CALINTERVAL;
349
350 short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
351 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
352
353 /* Only calibrate if awake */
354 if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
355 goto set_timer;
356
357 ath9k_ps_wakeup(sc);
358
359 /* Long calibration runs independently of short calibration. */
360 if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
361 longcal = true;
362 common->ani.longcal_timer = timestamp;
363 }
364
365 /* Short calibration applies only while caldone is false */
366 if (!common->ani.caldone) {
367 if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
368 shortcal = true;
369 common->ani.shortcal_timer = timestamp;
370 common->ani.resetcal_timer = timestamp;
371 }
372 } else {
373 if ((timestamp - common->ani.resetcal_timer) >=
374 ATH_RESTART_CALINTERVAL) {
375 common->ani.caldone = ath9k_hw_reset_calvalid(ah);
376 if (common->ani.caldone)
377 common->ani.resetcal_timer = timestamp;
378 }
379 }
380
381 /* Verify whether we must check ANI */
382 if (sc->sc_ah->config.enable_ani
383 && (timestamp - common->ani.checkani_timer) >=
384 ah->config.ani_poll_interval) {
385 aniflag = true;
386 common->ani.checkani_timer = timestamp;
387 }
388
389 /* Call ANI routine if necessary */
390 if (aniflag) {
391 spin_lock_irqsave(&common->cc_lock, flags);
392 ath9k_hw_ani_monitor(ah, ah->curchan);
393 ath_update_survey_stats(sc);
394 spin_unlock_irqrestore(&common->cc_lock, flags);
395 }
396
397 /* Perform calibration if necessary */
398 if (longcal || shortcal) {
399 common->ani.caldone =
400 ath9k_hw_calibrate(ah, ah->curchan,
401 ah->rxchainmask, longcal);
402 }
403
404 ath_dbg(common, ANI,
405 "Calibration @%lu finished: %s %s %s, caldone: %s\n",
406 jiffies,
407 longcal ? "long" : "", shortcal ? "short" : "",
408 aniflag ? "ani" : "", common->ani.caldone ? "true" : "false");
409
410 ath9k_debug_samp_bb_mac(sc);
411 ath9k_ps_restore(sc);
412
413set_timer:
414 /*
415 * Set timer interval based on previous results.
416 * The interval must be the shortest necessary to satisfy ANI,
417 * short calibration and long calibration.
418 */
419 cal_interval = ATH_LONG_CALINTERVAL;
420 if (sc->sc_ah->config.enable_ani)
421 cal_interval = min(cal_interval,
422 (u32)ah->config.ani_poll_interval);
423 if (!common->ani.caldone)
424 cal_interval = min(cal_interval, (u32)short_cal_interval);
425
426 mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
427 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->caldata) {
428 if (!ah->caldata->paprd_done)
429 ieee80211_queue_work(sc->hw, &sc->paprd_work);
430 else if (!ah->paprd_table_write_done)
431 ath_paprd_activate(sc);
432 }
433}
434
435void ath_start_ani(struct ath_common *common)
436{
437 struct ath_hw *ah = common->ah;
438 unsigned long timestamp = jiffies_to_msecs(jiffies);
439 struct ath_softc *sc = (struct ath_softc *) common->priv;
440
441 if (!test_bit(SC_OP_ANI_RUN, &sc->sc_flags))
442 return;
443
444 if (sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
445 return;
446
447 common->ani.longcal_timer = timestamp;
448 common->ani.shortcal_timer = timestamp;
449 common->ani.checkani_timer = timestamp;
450
451 mod_timer(&common->ani.timer,
452 jiffies + msecs_to_jiffies((u32)ah->config.ani_poll_interval));
453}
454
455void ath_update_survey_nf(struct ath_softc *sc, int channel)
456{
457 struct ath_hw *ah = sc->sc_ah;
458 struct ath9k_channel *chan = &ah->channels[channel];
459 struct survey_info *survey = &sc->survey[channel];
460
461 if (chan->noisefloor) {
462 survey->filled |= SURVEY_INFO_NOISE_DBM;
463 survey->noise = ath9k_hw_getchan_noise(ah, chan);
464 }
465}
466
467/*
468 * Updates the survey statistics and returns the busy time since last
469 * update in %, if the measurement duration was long enough for the
470 * result to be useful, -1 otherwise.
471 */
472int ath_update_survey_stats(struct ath_softc *sc)
473{
474 struct ath_hw *ah = sc->sc_ah;
475 struct ath_common *common = ath9k_hw_common(ah);
476 int pos = ah->curchan - &ah->channels[0];
477 struct survey_info *survey = &sc->survey[pos];
478 struct ath_cycle_counters *cc = &common->cc_survey;
479 unsigned int div = common->clockrate * 1000;
480 int ret = 0;
481
482 if (!ah->curchan)
483 return -1;
484
485 if (ah->power_mode == ATH9K_PM_AWAKE)
486 ath_hw_cycle_counters_update(common);
487
488 if (cc->cycles > 0) {
489 survey->filled |= SURVEY_INFO_CHANNEL_TIME |
490 SURVEY_INFO_CHANNEL_TIME_BUSY |
491 SURVEY_INFO_CHANNEL_TIME_RX |
492 SURVEY_INFO_CHANNEL_TIME_TX;
493 survey->channel_time += cc->cycles / div;
494 survey->channel_time_busy += cc->rx_busy / div;
495 survey->channel_time_rx += cc->rx_frame / div;
496 survey->channel_time_tx += cc->tx_frame / div;
497 }
498
499 if (cc->cycles < div)
500 return -1;
501
502 if (cc->cycles > 0)
503 ret = cc->rx_busy * 100 / cc->cycles;
504
505 memset(cc, 0, sizeof(*cc));
506
507 ath_update_survey_nf(sc, pos);
508
509 return ret;
510}
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index dac1a2709e3..e4e73f061a2 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -19,7 +19,7 @@
19#include "ath9k.h" 19#include "ath9k.h"
20#include "btcoex.h" 20#include "btcoex.h"
21 21
22static u8 parse_mpdudensity(u8 mpdudensity) 22u8 ath9k_parse_mpdudensity(u8 mpdudensity)
23{ 23{
24 /* 24 /*
25 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": 25 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
@@ -101,6 +101,7 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
101 spin_lock(&common->cc_lock); 101 spin_lock(&common->cc_lock);
102 ath_hw_cycle_counters_update(common); 102 ath_hw_cycle_counters_update(common);
103 memset(&common->cc_survey, 0, sizeof(common->cc_survey)); 103 memset(&common->cc_survey, 0, sizeof(common->cc_survey));
104 memset(&common->cc_ani, 0, sizeof(common->cc_ani));
104 spin_unlock(&common->cc_lock); 105 spin_unlock(&common->cc_lock);
105 } 106 }
106 107
@@ -143,90 +144,17 @@ void ath9k_ps_restore(struct ath_softc *sc)
143 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 144 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
144} 145}
145 146
146void ath_start_ani(struct ath_common *common)
147{
148 struct ath_hw *ah = common->ah;
149 unsigned long timestamp = jiffies_to_msecs(jiffies);
150 struct ath_softc *sc = (struct ath_softc *) common->priv;
151
152 if (!(sc->sc_flags & SC_OP_ANI_RUN))
153 return;
154
155 if (sc->sc_flags & SC_OP_OFFCHANNEL)
156 return;
157
158 common->ani.longcal_timer = timestamp;
159 common->ani.shortcal_timer = timestamp;
160 common->ani.checkani_timer = timestamp;
161
162 mod_timer(&common->ani.timer,
163 jiffies +
164 msecs_to_jiffies((u32)ah->config.ani_poll_interval));
165}
166
167static void ath_update_survey_nf(struct ath_softc *sc, int channel)
168{
169 struct ath_hw *ah = sc->sc_ah;
170 struct ath9k_channel *chan = &ah->channels[channel];
171 struct survey_info *survey = &sc->survey[channel];
172
173 if (chan->noisefloor) {
174 survey->filled |= SURVEY_INFO_NOISE_DBM;
175 survey->noise = ath9k_hw_getchan_noise(ah, chan);
176 }
177}
178
179/*
180 * Updates the survey statistics and returns the busy time since last
181 * update in %, if the measurement duration was long enough for the
182 * result to be useful, -1 otherwise.
183 */
184static int ath_update_survey_stats(struct ath_softc *sc)
185{
186 struct ath_hw *ah = sc->sc_ah;
187 struct ath_common *common = ath9k_hw_common(ah);
188 int pos = ah->curchan - &ah->channels[0];
189 struct survey_info *survey = &sc->survey[pos];
190 struct ath_cycle_counters *cc = &common->cc_survey;
191 unsigned int div = common->clockrate * 1000;
192 int ret = 0;
193
194 if (!ah->curchan)
195 return -1;
196
197 if (ah->power_mode == ATH9K_PM_AWAKE)
198 ath_hw_cycle_counters_update(common);
199
200 if (cc->cycles > 0) {
201 survey->filled |= SURVEY_INFO_CHANNEL_TIME |
202 SURVEY_INFO_CHANNEL_TIME_BUSY |
203 SURVEY_INFO_CHANNEL_TIME_RX |
204 SURVEY_INFO_CHANNEL_TIME_TX;
205 survey->channel_time += cc->cycles / div;
206 survey->channel_time_busy += cc->rx_busy / div;
207 survey->channel_time_rx += cc->rx_frame / div;
208 survey->channel_time_tx += cc->tx_frame / div;
209 }
210
211 if (cc->cycles < div)
212 return -1;
213
214 if (cc->cycles > 0)
215 ret = cc->rx_busy * 100 / cc->cycles;
216
217 memset(cc, 0, sizeof(*cc));
218
219 ath_update_survey_nf(sc, pos);
220
221 return ret;
222}
223
224static void __ath_cancel_work(struct ath_softc *sc) 147static void __ath_cancel_work(struct ath_softc *sc)
225{ 148{
226 cancel_work_sync(&sc->paprd_work); 149 cancel_work_sync(&sc->paprd_work);
227 cancel_work_sync(&sc->hw_check_work); 150 cancel_work_sync(&sc->hw_check_work);
228 cancel_delayed_work_sync(&sc->tx_complete_work); 151 cancel_delayed_work_sync(&sc->tx_complete_work);
229 cancel_delayed_work_sync(&sc->hw_pll_work); 152 cancel_delayed_work_sync(&sc->hw_pll_work);
153
154#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
155 if (ath9k_hw_mci_is_enabled(sc->sc_ah))
156 cancel_work_sync(&sc->mci_work);
157#endif
230} 158}
231 159
232static void ath_cancel_work(struct ath_softc *sc) 160static void ath_cancel_work(struct ath_softc *sc)
@@ -235,6 +163,22 @@ static void ath_cancel_work(struct ath_softc *sc)
235 cancel_work_sync(&sc->hw_reset_work); 163 cancel_work_sync(&sc->hw_reset_work);
236} 164}
237 165
166static void ath_restart_work(struct ath_softc *sc)
167{
168 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
169
170 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
171
172 if (AR_SREV_9485(sc->sc_ah) || AR_SREV_9340(sc->sc_ah))
173 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
174 msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
175
176 ath_start_rx_poll(sc, 3);
177
178 if (!common->disable_ani)
179 ath_start_ani(common);
180}
181
238static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush) 182static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
239{ 183{
240 struct ath_hw *ah = sc->sc_ah; 184 struct ath_hw *ah = sc->sc_ah;
@@ -271,6 +215,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
271{ 215{
272 struct ath_hw *ah = sc->sc_ah; 216 struct ath_hw *ah = sc->sc_ah;
273 struct ath_common *common = ath9k_hw_common(ah); 217 struct ath_common *common = ath9k_hw_common(ah);
218 unsigned long flags;
274 219
275 if (ath_startrecv(sc) != 0) { 220 if (ath_startrecv(sc) != 0) {
276 ath_err(common, "Unable to restart recv logic\n"); 221 ath_err(common, "Unable to restart recv logic\n");
@@ -279,36 +224,30 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
279 224
280 ath9k_cmn_update_txpow(ah, sc->curtxpow, 225 ath9k_cmn_update_txpow(ah, sc->curtxpow,
281 sc->config.txpowlimit, &sc->curtxpow); 226 sc->config.txpowlimit, &sc->curtxpow);
227
228 clear_bit(SC_OP_HW_RESET, &sc->sc_flags);
282 ath9k_hw_set_interrupts(ah); 229 ath9k_hw_set_interrupts(ah);
283 ath9k_hw_enable_interrupts(ah); 230 ath9k_hw_enable_interrupts(ah);
284 231
285 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) && start) { 232 if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) && start) {
286 if (sc->sc_flags & SC_OP_BEACONS) 233 if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
287 ath_set_beacon(sc); 234 goto work;
288
289 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
290 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2);
291 ath_start_rx_poll(sc, 3);
292 if (!common->disable_ani)
293 ath_start_ani(common);
294 }
295
296 if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3) {
297 struct ath_hw_antcomb_conf div_ant_conf;
298 u8 lna_conf;
299 235
300 ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf); 236 ath_set_beacon(sc);
301 237
302 if (sc->ant_rx == 1) 238 if (ah->opmode == NL80211_IFTYPE_STATION &&
303 lna_conf = ATH_ANT_DIV_COMB_LNA1; 239 test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
304 else 240 spin_lock_irqsave(&sc->sc_pm_lock, flags);
305 lna_conf = ATH_ANT_DIV_COMB_LNA2; 241 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
306 div_ant_conf.main_lna_conf = lna_conf; 242 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
307 div_ant_conf.alt_lna_conf = lna_conf; 243 }
308 244 work:
309 ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf); 245 ath_restart_work(sc);
310 } 246 }
311 247
248 if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3)
249 ath_ant_comb_update(sc);
250
312 ieee80211_wake_queues(sc->hw); 251 ieee80211_wake_queues(sc->hw);
313 252
314 return true; 253 return true;
@@ -328,7 +267,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
328 267
329 spin_lock_bh(&sc->sc_pcu_lock); 268 spin_lock_bh(&sc->sc_pcu_lock);
330 269
331 if (!(sc->sc_flags & SC_OP_OFFCHANNEL)) { 270 if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
332 fastcc = false; 271 fastcc = false;
333 caldata = &sc->caldata; 272 caldata = &sc->caldata;
334 } 273 }
@@ -371,7 +310,7 @@ static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
371{ 310{
372 int r; 311 int r;
373 312
374 if (sc->sc_flags & SC_OP_INVALID) 313 if (test_bit(SC_OP_INVALID, &sc->sc_flags))
375 return -EIO; 314 return -EIO;
376 315
377 r = ath_reset_internal(sc, hchan, false); 316 r = ath_reset_internal(sc, hchan, false);
@@ -379,262 +318,11 @@ static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
379 return r; 318 return r;
380} 319}
381 320
382static void ath_paprd_activate(struct ath_softc *sc)
383{
384 struct ath_hw *ah = sc->sc_ah;
385 struct ath9k_hw_cal_data *caldata = ah->caldata;
386 int chain;
387
388 if (!caldata || !caldata->paprd_done)
389 return;
390
391 ath9k_ps_wakeup(sc);
392 ar9003_paprd_enable(ah, false);
393 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
394 if (!(ah->txchainmask & BIT(chain)))
395 continue;
396
397 ar9003_paprd_populate_single_table(ah, caldata, chain);
398 }
399
400 ar9003_paprd_enable(ah, true);
401 ath9k_ps_restore(sc);
402}
403
404static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int chain)
405{
406 struct ieee80211_hw *hw = sc->hw;
407 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
408 struct ath_hw *ah = sc->sc_ah;
409 struct ath_common *common = ath9k_hw_common(ah);
410 struct ath_tx_control txctl;
411 int time_left;
412
413 memset(&txctl, 0, sizeof(txctl));
414 txctl.txq = sc->tx.txq_map[WME_AC_BE];
415
416 memset(tx_info, 0, sizeof(*tx_info));
417 tx_info->band = hw->conf.channel->band;
418 tx_info->flags |= IEEE80211_TX_CTL_NO_ACK;
419 tx_info->control.rates[0].idx = 0;
420 tx_info->control.rates[0].count = 1;
421 tx_info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
422 tx_info->control.rates[1].idx = -1;
423
424 init_completion(&sc->paprd_complete);
425 txctl.paprd = BIT(chain);
426
427 if (ath_tx_start(hw, skb, &txctl) != 0) {
428 ath_dbg(common, CALIBRATE, "PAPRD TX failed\n");
429 dev_kfree_skb_any(skb);
430 return false;
431 }
432
433 time_left = wait_for_completion_timeout(&sc->paprd_complete,
434 msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
435
436 if (!time_left)
437 ath_dbg(common, CALIBRATE,
438 "Timeout waiting for paprd training on TX chain %d\n",
439 chain);
440
441 return !!time_left;
442}
443
444void ath_paprd_calibrate(struct work_struct *work)
445{
446 struct ath_softc *sc = container_of(work, struct ath_softc, paprd_work);
447 struct ieee80211_hw *hw = sc->hw;
448 struct ath_hw *ah = sc->sc_ah;
449 struct ieee80211_hdr *hdr;
450 struct sk_buff *skb = NULL;
451 struct ath9k_hw_cal_data *caldata = ah->caldata;
452 struct ath_common *common = ath9k_hw_common(ah);
453 int ftype;
454 int chain_ok = 0;
455 int chain;
456 int len = 1800;
457
458 if (!caldata)
459 return;
460
461 ath9k_ps_wakeup(sc);
462
463 if (ar9003_paprd_init_table(ah) < 0)
464 goto fail_paprd;
465
466 skb = alloc_skb(len, GFP_KERNEL);
467 if (!skb)
468 goto fail_paprd;
469
470 skb_put(skb, len);
471 memset(skb->data, 0, len);
472 hdr = (struct ieee80211_hdr *)skb->data;
473 ftype = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC;
474 hdr->frame_control = cpu_to_le16(ftype);
475 hdr->duration_id = cpu_to_le16(10);
476 memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
477 memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
478 memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
479
480 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
481 if (!(ah->txchainmask & BIT(chain)))
482 continue;
483
484 chain_ok = 0;
485
486 ath_dbg(common, CALIBRATE,
487 "Sending PAPRD frame for thermal measurement on chain %d\n",
488 chain);
489 if (!ath_paprd_send_frame(sc, skb, chain))
490 goto fail_paprd;
491
492 ar9003_paprd_setup_gain_table(ah, chain);
493
494 ath_dbg(common, CALIBRATE,
495 "Sending PAPRD training frame on chain %d\n", chain);
496 if (!ath_paprd_send_frame(sc, skb, chain))
497 goto fail_paprd;
498
499 if (!ar9003_paprd_is_done(ah)) {
500 ath_dbg(common, CALIBRATE,
501 "PAPRD not yet done on chain %d\n", chain);
502 break;
503 }
504
505 if (ar9003_paprd_create_curve(ah, caldata, chain)) {
506 ath_dbg(common, CALIBRATE,
507 "PAPRD create curve failed on chain %d\n",
508 chain);
509 break;
510 }
511
512 chain_ok = 1;
513 }
514 kfree_skb(skb);
515
516 if (chain_ok) {
517 caldata->paprd_done = true;
518 ath_paprd_activate(sc);
519 }
520
521fail_paprd:
522 ath9k_ps_restore(sc);
523}
524
525/*
526 * This routine performs the periodic noise floor calibration function
527 * that is used to adjust and optimize the chip performance. This
528 * takes environmental changes (location, temperature) into account.
529 * When the task is complete, it reschedules itself depending on the
530 * appropriate interval that was calculated.
531 */
532void ath_ani_calibrate(unsigned long data)
533{
534 struct ath_softc *sc = (struct ath_softc *)data;
535 struct ath_hw *ah = sc->sc_ah;
536 struct ath_common *common = ath9k_hw_common(ah);
537 bool longcal = false;
538 bool shortcal = false;
539 bool aniflag = false;
540 unsigned int timestamp = jiffies_to_msecs(jiffies);
541 u32 cal_interval, short_cal_interval, long_cal_interval;
542 unsigned long flags;
543
544 if (ah->caldata && ah->caldata->nfcal_interference)
545 long_cal_interval = ATH_LONG_CALINTERVAL_INT;
546 else
547 long_cal_interval = ATH_LONG_CALINTERVAL;
548
549 short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
550 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
551
552 /* Only calibrate if awake */
553 if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
554 goto set_timer;
555
556 ath9k_ps_wakeup(sc);
557
558 /* Long calibration runs independently of short calibration. */
559 if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
560 longcal = true;
561 common->ani.longcal_timer = timestamp;
562 }
563
564 /* Short calibration applies only while caldone is false */
565 if (!common->ani.caldone) {
566 if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
567 shortcal = true;
568 common->ani.shortcal_timer = timestamp;
569 common->ani.resetcal_timer = timestamp;
570 }
571 } else {
572 if ((timestamp - common->ani.resetcal_timer) >=
573 ATH_RESTART_CALINTERVAL) {
574 common->ani.caldone = ath9k_hw_reset_calvalid(ah);
575 if (common->ani.caldone)
576 common->ani.resetcal_timer = timestamp;
577 }
578 }
579
580 /* Verify whether we must check ANI */
581 if (sc->sc_ah->config.enable_ani
582 && (timestamp - common->ani.checkani_timer) >=
583 ah->config.ani_poll_interval) {
584 aniflag = true;
585 common->ani.checkani_timer = timestamp;
586 }
587
588 /* Call ANI routine if necessary */
589 if (aniflag) {
590 spin_lock_irqsave(&common->cc_lock, flags);
591 ath9k_hw_ani_monitor(ah, ah->curchan);
592 ath_update_survey_stats(sc);
593 spin_unlock_irqrestore(&common->cc_lock, flags);
594 }
595
596 /* Perform calibration if necessary */
597 if (longcal || shortcal) {
598 common->ani.caldone =
599 ath9k_hw_calibrate(ah, ah->curchan,
600 ah->rxchainmask, longcal);
601 }
602
603 ath_dbg(common, ANI,
604 "Calibration @%lu finished: %s %s %s, caldone: %s\n",
605 jiffies,
606 longcal ? "long" : "", shortcal ? "short" : "",
607 aniflag ? "ani" : "", common->ani.caldone ? "true" : "false");
608
609 ath9k_ps_restore(sc);
610
611set_timer:
612 /*
613 * Set timer interval based on previous results.
614 * The interval must be the shortest necessary to satisfy ANI,
615 * short calibration and long calibration.
616 */
617 ath9k_debug_samp_bb_mac(sc);
618 cal_interval = ATH_LONG_CALINTERVAL;
619 if (sc->sc_ah->config.enable_ani)
620 cal_interval = min(cal_interval,
621 (u32)ah->config.ani_poll_interval);
622 if (!common->ani.caldone)
623 cal_interval = min(cal_interval, (u32)short_cal_interval);
624
625 mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
626 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->caldata) {
627 if (!ah->caldata->paprd_done)
628 ieee80211_queue_work(sc->hw, &sc->paprd_work);
629 else if (!ah->paprd_table_write_done)
630 ath_paprd_activate(sc);
631 }
632}
633
634static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta, 321static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
635 struct ieee80211_vif *vif) 322 struct ieee80211_vif *vif)
636{ 323{
637 struct ath_node *an; 324 struct ath_node *an;
325 u8 density;
638 an = (struct ath_node *)sta->drv_priv; 326 an = (struct ath_node *)sta->drv_priv;
639 327
640#ifdef CONFIG_ATH9K_DEBUGFS 328#ifdef CONFIG_ATH9K_DEBUGFS
@@ -649,7 +337,8 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
649 ath_tx_node_init(sc, an); 337 ath_tx_node_init(sc, an);
650 an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + 338 an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
651 sta->ht_cap.ampdu_factor); 339 sta->ht_cap.ampdu_factor);
652 an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density); 340 density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
341 an->mpdudensity = density;
653 } 342 }
654} 343}
655 344
@@ -668,13 +357,12 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
668 ath_tx_node_cleanup(sc, an); 357 ath_tx_node_cleanup(sc, an);
669} 358}
670 359
671
672void ath9k_tasklet(unsigned long data) 360void ath9k_tasklet(unsigned long data)
673{ 361{
674 struct ath_softc *sc = (struct ath_softc *)data; 362 struct ath_softc *sc = (struct ath_softc *)data;
675 struct ath_hw *ah = sc->sc_ah; 363 struct ath_hw *ah = sc->sc_ah;
676 struct ath_common *common = ath9k_hw_common(ah); 364 struct ath_common *common = ath9k_hw_common(ah);
677 365 unsigned long flags;
678 u32 status = sc->intrstatus; 366 u32 status = sc->intrstatus;
679 u32 rxmask; 367 u32 rxmask;
680 368
@@ -693,10 +381,12 @@ void ath9k_tasklet(unsigned long data)
693 381
694 RESET_STAT_INC(sc, type); 382 RESET_STAT_INC(sc, type);
695#endif 383#endif
384 set_bit(SC_OP_HW_RESET, &sc->sc_flags);
696 ieee80211_queue_work(sc->hw, &sc->hw_reset_work); 385 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
697 goto out; 386 goto out;
698 } 387 }
699 388
389 spin_lock_irqsave(&sc->sc_pm_lock, flags);
700 if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) { 390 if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
701 /* 391 /*
702 * TSF sync does not look correct; remain awake to sync with 392 * TSF sync does not look correct; remain awake to sync with
@@ -705,6 +395,7 @@ void ath9k_tasklet(unsigned long data)
705 ath_dbg(common, PS, "TSFOOR - Sync with next Beacon\n"); 395 ath_dbg(common, PS, "TSFOOR - Sync with next Beacon\n");
706 sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC; 396 sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
707 } 397 }
398 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
708 399
709 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 400 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
710 rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL | 401 rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL |
@@ -766,15 +457,17 @@ irqreturn_t ath_isr(int irq, void *dev)
766 * touch anything. Note this can happen early 457 * touch anything. Note this can happen early
767 * on if the IRQ is shared. 458 * on if the IRQ is shared.
768 */ 459 */
769 if (sc->sc_flags & SC_OP_INVALID) 460 if (test_bit(SC_OP_INVALID, &sc->sc_flags))
770 return IRQ_NONE; 461 return IRQ_NONE;
771 462
772
773 /* shared irq, not for us */ 463 /* shared irq, not for us */
774 464
775 if (!ath9k_hw_intrpend(ah)) 465 if (!ath9k_hw_intrpend(ah))
776 return IRQ_NONE; 466 return IRQ_NONE;
777 467
468 if(test_bit(SC_OP_HW_RESET, &sc->sc_flags))
469 return IRQ_HANDLED;
470
778 /* 471 /*
779 * Figure out the reason(s) for the interrupt. Note 472 * Figure out the reason(s) for the interrupt. Note
780 * that the hal returns a pseudo-ISR that may include 473 * that the hal returns a pseudo-ISR that may include
@@ -827,24 +520,6 @@ irqreturn_t ath_isr(int irq, void *dev)
827 ath9k_hw_set_interrupts(ah); 520 ath9k_hw_set_interrupts(ah);
828 } 521 }
829 522
830 if (status & ATH9K_INT_MIB) {
831 /*
832 * Disable interrupts until we service the MIB
833 * interrupt; otherwise it will continue to
834 * fire.
835 */
836 ath9k_hw_disable_interrupts(ah);
837 /*
838 * Let the hal handle the event. We assume
839 * it will clear whatever condition caused
840 * the interrupt.
841 */
842 spin_lock(&common->cc_lock);
843 ath9k_hw_proc_mib_event(ah);
844 spin_unlock(&common->cc_lock);
845 ath9k_hw_enable_interrupts(ah);
846 }
847
848 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 523 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
849 if (status & ATH9K_INT_TIM_TIMER) { 524 if (status & ATH9K_INT_TIM_TIMER) {
850 if (ATH_DBG_WARN_ON_ONCE(sc->ps_idle)) 525 if (ATH_DBG_WARN_ON_ONCE(sc->ps_idle))
@@ -852,8 +527,10 @@ irqreturn_t ath_isr(int irq, void *dev)
852 /* Clear RxAbort bit so that we can 527 /* Clear RxAbort bit so that we can
853 * receive frames */ 528 * receive frames */
854 ath9k_setpower(sc, ATH9K_PM_AWAKE); 529 ath9k_setpower(sc, ATH9K_PM_AWAKE);
530 spin_lock(&sc->sc_pm_lock);
855 ath9k_hw_setrxabort(sc->sc_ah, 0); 531 ath9k_hw_setrxabort(sc->sc_ah, 0);
856 sc->ps_flags |= PS_WAIT_FOR_BEACON; 532 sc->ps_flags |= PS_WAIT_FOR_BEACON;
533 spin_unlock(&sc->sc_pm_lock);
857 } 534 }
858 535
859chip_reset: 536chip_reset:
@@ -902,96 +579,6 @@ void ath_reset_work(struct work_struct *work)
902 ath_reset(sc, true); 579 ath_reset(sc, true);
903} 580}
904 581
905void ath_hw_check(struct work_struct *work)
906{
907 struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
908 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
909 unsigned long flags;
910 int busy;
911 u8 is_alive, nbeacon = 1;
912
913 ath9k_ps_wakeup(sc);
914 is_alive = ath9k_hw_check_alive(sc->sc_ah);
915
916 if (is_alive && !AR_SREV_9300(sc->sc_ah))
917 goto out;
918 else if (!is_alive && AR_SREV_9300(sc->sc_ah)) {
919 ath_dbg(common, RESET,
920 "DCU stuck is detected. Schedule chip reset\n");
921 RESET_STAT_INC(sc, RESET_TYPE_MAC_HANG);
922 goto sched_reset;
923 }
924
925 spin_lock_irqsave(&common->cc_lock, flags);
926 busy = ath_update_survey_stats(sc);
927 spin_unlock_irqrestore(&common->cc_lock, flags);
928
929 ath_dbg(common, RESET, "Possible baseband hang, busy=%d (try %d)\n",
930 busy, sc->hw_busy_count + 1);
931 if (busy >= 99) {
932 if (++sc->hw_busy_count >= 3) {
933 RESET_STAT_INC(sc, RESET_TYPE_BB_HANG);
934 goto sched_reset;
935 }
936 } else if (busy >= 0) {
937 sc->hw_busy_count = 0;
938 nbeacon = 3;
939 }
940
941 ath_start_rx_poll(sc, nbeacon);
942 goto out;
943
944sched_reset:
945 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
946out:
947 ath9k_ps_restore(sc);
948}
949
950static void ath_hw_pll_rx_hang_check(struct ath_softc *sc, u32 pll_sqsum)
951{
952 static int count;
953 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
954
955 if (pll_sqsum >= 0x40000) {
956 count++;
957 if (count == 3) {
958 /* Rx is hung for more than 500ms. Reset it */
959 ath_dbg(common, RESET, "Possible RX hang, resetting\n");
960 RESET_STAT_INC(sc, RESET_TYPE_PLL_HANG);
961 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
962 count = 0;
963 }
964 } else
965 count = 0;
966}
967
968void ath_hw_pll_work(struct work_struct *work)
969{
970 struct ath_softc *sc = container_of(work, struct ath_softc,
971 hw_pll_work.work);
972 u32 pll_sqsum;
973
974 /*
975 * ensure that the PLL WAR is executed only
976 * after the STA is associated (or) if the
977 * beaconing had started in interfaces that
978 * uses beacons.
979 */
980 if (!(sc->sc_flags & SC_OP_BEACONS))
981 return;
982
983 if (AR_SREV_9485(sc->sc_ah)) {
984
985 ath9k_ps_wakeup(sc);
986 pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah);
987 ath9k_ps_restore(sc);
988
989 ath_hw_pll_rx_hang_check(sc, pll_sqsum);
990
991 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
992 }
993}
994
995/**********************/ 582/**********************/
996/* mac80211 callbacks */ 583/* mac80211 callbacks */
997/**********************/ 584/**********************/
@@ -1054,10 +641,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
1054 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) 641 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
1055 ah->imask |= ATH9K_INT_CST; 642 ah->imask |= ATH9K_INT_CST;
1056 643
1057 if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) 644 ath_mci_enable(sc);
1058 ah->imask |= ATH9K_INT_MCI;
1059 645
1060 sc->sc_flags &= ~SC_OP_INVALID; 646 clear_bit(SC_OP_INVALID, &sc->sc_flags);
1061 sc->sc_ah->is_monitoring = false; 647 sc->sc_ah->is_monitoring = false;
1062 648
1063 if (!ath_complete_reset(sc, false)) { 649 if (!ath_complete_reset(sc, false)) {
@@ -1099,6 +685,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1099 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 685 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1100 struct ath_tx_control txctl; 686 struct ath_tx_control txctl;
1101 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 687 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
688 unsigned long flags;
1102 689
1103 if (sc->ps_enabled) { 690 if (sc->ps_enabled) {
1104 /* 691 /*
@@ -1121,6 +708,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1121 * completed and if needed, also for RX of buffered frames. 708 * completed and if needed, also for RX of buffered frames.
1122 */ 709 */
1123 ath9k_ps_wakeup(sc); 710 ath9k_ps_wakeup(sc);
711 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1124 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 712 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
1125 ath9k_hw_setrxabort(sc->sc_ah, 0); 713 ath9k_hw_setrxabort(sc->sc_ah, 0);
1126 if (ieee80211_is_pspoll(hdr->frame_control)) { 714 if (ieee80211_is_pspoll(hdr->frame_control)) {
@@ -1136,6 +724,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1136 * the ps_flags bit is cleared. We are just dropping 724 * the ps_flags bit is cleared. We are just dropping
1137 * the ps_usecount here. 725 * the ps_usecount here.
1138 */ 726 */
727 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1139 ath9k_ps_restore(sc); 728 ath9k_ps_restore(sc);
1140 } 729 }
1141 730
@@ -1176,7 +765,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1176 ath_cancel_work(sc); 765 ath_cancel_work(sc);
1177 del_timer_sync(&sc->rx_poll_timer); 766 del_timer_sync(&sc->rx_poll_timer);
1178 767
1179 if (sc->sc_flags & SC_OP_INVALID) { 768 if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
1180 ath_dbg(common, ANY, "Device not present\n"); 769 ath_dbg(common, ANY, "Device not present\n");
1181 mutex_unlock(&sc->mutex); 770 mutex_unlock(&sc->mutex);
1182 return; 771 return;
@@ -1233,7 +822,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1233 822
1234 ath9k_ps_restore(sc); 823 ath9k_ps_restore(sc);
1235 824
1236 sc->sc_flags |= SC_OP_INVALID; 825 set_bit(SC_OP_INVALID, &sc->sc_flags);
1237 sc->ps_idle = prev_idle; 826 sc->ps_idle = prev_idle;
1238 827
1239 mutex_unlock(&sc->mutex); 828 mutex_unlock(&sc->mutex);
@@ -1337,11 +926,11 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
1337 /* Set op-mode & TSF */ 926 /* Set op-mode & TSF */
1338 if (iter_data.naps > 0) { 927 if (iter_data.naps > 0) {
1339 ath9k_hw_set_tsfadjust(ah, 1); 928 ath9k_hw_set_tsfadjust(ah, 1);
1340 sc->sc_flags |= SC_OP_TSF_RESET; 929 set_bit(SC_OP_TSF_RESET, &sc->sc_flags);
1341 ah->opmode = NL80211_IFTYPE_AP; 930 ah->opmode = NL80211_IFTYPE_AP;
1342 } else { 931 } else {
1343 ath9k_hw_set_tsfadjust(ah, 0); 932 ath9k_hw_set_tsfadjust(ah, 0);
1344 sc->sc_flags &= ~SC_OP_TSF_RESET; 933 clear_bit(SC_OP_TSF_RESET, &sc->sc_flags);
1345 934
1346 if (iter_data.nmeshes) 935 if (iter_data.nmeshes)
1347 ah->opmode = NL80211_IFTYPE_MESH_POINT; 936 ah->opmode = NL80211_IFTYPE_MESH_POINT;
@@ -1356,14 +945,10 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
1356 /* 945 /*
1357 * Enable MIB interrupts when there are hardware phy counters. 946 * Enable MIB interrupts when there are hardware phy counters.
1358 */ 947 */
1359 if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0) { 948 if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0)
1360 if (ah->config.enable_ani)
1361 ah->imask |= ATH9K_INT_MIB;
1362 ah->imask |= ATH9K_INT_TSFOOR; 949 ah->imask |= ATH9K_INT_TSFOOR;
1363 } else { 950 else
1364 ah->imask &= ~ATH9K_INT_MIB;
1365 ah->imask &= ~ATH9K_INT_TSFOOR; 951 ah->imask &= ~ATH9K_INT_TSFOOR;
1366 }
1367 952
1368 ath9k_hw_set_interrupts(ah); 953 ath9k_hw_set_interrupts(ah);
1369 954
@@ -1372,12 +957,12 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
1372 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER; 957 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
1373 958
1374 if (!common->disable_ani) { 959 if (!common->disable_ani) {
1375 sc->sc_flags |= SC_OP_ANI_RUN; 960 set_bit(SC_OP_ANI_RUN, &sc->sc_flags);
1376 ath_start_ani(common); 961 ath_start_ani(common);
1377 } 962 }
1378 963
1379 } else { 964 } else {
1380 sc->sc_flags &= ~SC_OP_ANI_RUN; 965 clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
1381 del_timer_sync(&common->ani.timer); 966 del_timer_sync(&common->ani.timer);
1382 } 967 }
1383} 968}
@@ -1398,25 +983,6 @@ static void ath9k_do_vif_add_setup(struct ieee80211_hw *hw,
1398 } 983 }
1399} 984}
1400 985
1401void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon)
1402{
1403 if (!AR_SREV_9300(sc->sc_ah))
1404 return;
1405
1406 if (!(sc->sc_flags & SC_OP_PRIM_STA_VIF))
1407 return;
1408
1409 mod_timer(&sc->rx_poll_timer, jiffies + msecs_to_jiffies
1410 (nbeacon * sc->cur_beacon_conf.beacon_interval));
1411}
1412
1413void ath_rx_poll(unsigned long data)
1414{
1415 struct ath_softc *sc = (struct ath_softc *)data;
1416
1417 ieee80211_queue_work(sc->hw, &sc->hw_check_work);
1418}
1419
1420static int ath9k_add_interface(struct ieee80211_hw *hw, 986static int ath9k_add_interface(struct ieee80211_hw *hw,
1421 struct ieee80211_vif *vif) 987 struct ieee80211_vif *vif)
1422{ 988{
@@ -1618,11 +1184,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1618 if (ah->curchan) 1184 if (ah->curchan)
1619 old_pos = ah->curchan - &ah->channels[0]; 1185 old_pos = ah->curchan - &ah->channels[0];
1620 1186
1621 if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
1622 sc->sc_flags |= SC_OP_OFFCHANNEL;
1623 else
1624 sc->sc_flags &= ~SC_OP_OFFCHANNEL;
1625
1626 ath_dbg(common, CONFIG, "Set channel: %d MHz type: %d\n", 1187 ath_dbg(common, CONFIG, "Set channel: %d MHz type: %d\n",
1627 curchan->center_freq, conf->channel_type); 1188 curchan->center_freq, conf->channel_type);
1628 1189
@@ -1664,6 +1225,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1664 if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) { 1225 if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
1665 ath_err(common, "Unable to set channel\n"); 1226 ath_err(common, "Unable to set channel\n");
1666 mutex_unlock(&sc->mutex); 1227 mutex_unlock(&sc->mutex);
1228 ath9k_ps_restore(sc);
1667 return -EINVAL; 1229 return -EINVAL;
1668 } 1230 }
1669 1231
@@ -1902,16 +1464,16 @@ static void ath9k_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
1902 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1464 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1903 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 1465 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
1904 struct ath_vif *avp = (void *)vif->drv_priv; 1466 struct ath_vif *avp = (void *)vif->drv_priv;
1905 1467 unsigned long flags;
1906 /* 1468 /*
1907 * Skip iteration if primary station vif's bss info 1469 * Skip iteration if primary station vif's bss info
1908 * was not changed 1470 * was not changed
1909 */ 1471 */
1910 if (sc->sc_flags & SC_OP_PRIM_STA_VIF) 1472 if (test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
1911 return; 1473 return;
1912 1474
1913 if (bss_conf->assoc) { 1475 if (bss_conf->assoc) {
1914 sc->sc_flags |= SC_OP_PRIM_STA_VIF; 1476 set_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags);
1915 avp->primary_sta_vif = true; 1477 avp->primary_sta_vif = true;
1916 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); 1478 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1917 common->curaid = bss_conf->aid; 1479 common->curaid = bss_conf->aid;
@@ -1924,7 +1486,10 @@ static void ath9k_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
1924 * on the receipt of the first Beacon frame (i.e., 1486 * on the receipt of the first Beacon frame (i.e.,
1925 * after time sync with the AP). 1487 * after time sync with the AP).
1926 */ 1488 */
1489 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1927 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON; 1490 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
1491 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1492
1928 /* Reset rssi stats */ 1493 /* Reset rssi stats */
1929 sc->last_rssi = ATH_RSSI_DUMMY_MARKER; 1494 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
1930 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER; 1495 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
@@ -1932,7 +1497,7 @@ static void ath9k_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
1932 ath_start_rx_poll(sc, 3); 1497 ath_start_rx_poll(sc, 3);
1933 1498
1934 if (!common->disable_ani) { 1499 if (!common->disable_ani) {
1935 sc->sc_flags |= SC_OP_ANI_RUN; 1500 set_bit(SC_OP_ANI_RUN, &sc->sc_flags);
1936 ath_start_ani(common); 1501 ath_start_ani(common);
1937 } 1502 }
1938 1503
@@ -1952,7 +1517,8 @@ static void ath9k_config_bss(struct ath_softc *sc, struct ieee80211_vif *vif)
1952 if (avp->primary_sta_vif && !bss_conf->assoc) { 1517 if (avp->primary_sta_vif && !bss_conf->assoc) {
1953 ath_dbg(common, CONFIG, "Bss Info DISASSOC %d, bssid %pM\n", 1518 ath_dbg(common, CONFIG, "Bss Info DISASSOC %d, bssid %pM\n",
1954 common->curaid, common->curbssid); 1519 common->curaid, common->curbssid);
1955 sc->sc_flags &= ~(SC_OP_PRIM_STA_VIF | SC_OP_BEACONS); 1520 clear_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags);
1521 clear_bit(SC_OP_BEACONS, &sc->sc_flags);
1956 avp->primary_sta_vif = false; 1522 avp->primary_sta_vif = false;
1957 memset(common->curbssid, 0, ETH_ALEN); 1523 memset(common->curbssid, 0, ETH_ALEN);
1958 common->curaid = 0; 1524 common->curaid = 0;
@@ -1965,10 +1531,9 @@ static void ath9k_config_bss(struct ath_softc *sc, struct ieee80211_vif *vif)
1965 * None of station vifs are associated. 1531 * None of station vifs are associated.
1966 * Clear bssid & aid 1532 * Clear bssid & aid
1967 */ 1533 */
1968 if (!(sc->sc_flags & SC_OP_PRIM_STA_VIF)) { 1534 if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
1969 ath9k_hw_write_associd(sc->sc_ah); 1535 ath9k_hw_write_associd(sc->sc_ah);
1970 /* Stop ANI */ 1536 clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
1971 sc->sc_flags &= ~SC_OP_ANI_RUN;
1972 del_timer_sync(&common->ani.timer); 1537 del_timer_sync(&common->ani.timer);
1973 del_timer_sync(&sc->rx_poll_timer); 1538 del_timer_sync(&sc->rx_poll_timer);
1974 memset(&sc->caldata, 0, sizeof(sc->caldata)); 1539 memset(&sc->caldata, 0, sizeof(sc->caldata));
@@ -2006,12 +1571,12 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2006 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER; 1571 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
2007 1572
2008 if (!common->disable_ani) { 1573 if (!common->disable_ani) {
2009 sc->sc_flags |= SC_OP_ANI_RUN; 1574 set_bit(SC_OP_ANI_RUN, &sc->sc_flags);
2010 ath_start_ani(common); 1575 ath_start_ani(common);
2011 } 1576 }
2012 1577
2013 } else { 1578 } else {
2014 sc->sc_flags &= ~SC_OP_ANI_RUN; 1579 clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
2015 del_timer_sync(&common->ani.timer); 1580 del_timer_sync(&common->ani.timer);
2016 del_timer_sync(&sc->rx_poll_timer); 1581 del_timer_sync(&sc->rx_poll_timer);
2017 } 1582 }
@@ -2023,7 +1588,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2023 */ 1588 */
2024 if ((changed & BSS_CHANGED_BEACON_INT) && 1589 if ((changed & BSS_CHANGED_BEACON_INT) &&
2025 (vif->type == NL80211_IFTYPE_AP)) 1590 (vif->type == NL80211_IFTYPE_AP))
2026 sc->sc_flags |= SC_OP_TSF_RESET; 1591 set_bit(SC_OP_TSF_RESET, &sc->sc_flags);
2027 1592
2028 /* Configure beaconing (AP, IBSS, MESH) */ 1593 /* Configure beaconing (AP, IBSS, MESH) */
2029 if (ath9k_uses_beacons(vif->type) && 1594 if (ath9k_uses_beacons(vif->type) &&
@@ -2215,7 +1780,7 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
2215 return; 1780 return;
2216 } 1781 }
2217 1782
2218 if (sc->sc_flags & SC_OP_INVALID) { 1783 if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
2219 ath_dbg(common, ANY, "Device not present\n"); 1784 ath_dbg(common, ANY, "Device not present\n");
2220 mutex_unlock(&sc->mutex); 1785 mutex_unlock(&sc->mutex);
2221 return; 1786 return;
@@ -2380,6 +1945,134 @@ static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
2380 return 0; 1945 return 0;
2381} 1946}
2382 1947
1948#ifdef CONFIG_ATH9K_DEBUGFS
1949
1950/* Ethtool support for get-stats */
1951
1952#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
1953static const char ath9k_gstrings_stats[][ETH_GSTRING_LEN] = {
1954 "tx_pkts_nic",
1955 "tx_bytes_nic",
1956 "rx_pkts_nic",
1957 "rx_bytes_nic",
1958 AMKSTR(d_tx_pkts),
1959 AMKSTR(d_tx_bytes),
1960 AMKSTR(d_tx_mpdus_queued),
1961 AMKSTR(d_tx_mpdus_completed),
1962 AMKSTR(d_tx_mpdu_xretries),
1963 AMKSTR(d_tx_aggregates),
1964 AMKSTR(d_tx_ampdus_queued_hw),
1965 AMKSTR(d_tx_ampdus_queued_sw),
1966 AMKSTR(d_tx_ampdus_completed),
1967 AMKSTR(d_tx_ampdu_retries),
1968 AMKSTR(d_tx_ampdu_xretries),
1969 AMKSTR(d_tx_fifo_underrun),
1970 AMKSTR(d_tx_op_exceeded),
1971 AMKSTR(d_tx_timer_expiry),
1972 AMKSTR(d_tx_desc_cfg_err),
1973 AMKSTR(d_tx_data_underrun),
1974 AMKSTR(d_tx_delim_underrun),
1975
1976 "d_rx_decrypt_crc_err",
1977 "d_rx_phy_err",
1978 "d_rx_mic_err",
1979 "d_rx_pre_delim_crc_err",
1980 "d_rx_post_delim_crc_err",
1981 "d_rx_decrypt_busy_err",
1982
1983 "d_rx_phyerr_radar",
1984 "d_rx_phyerr_ofdm_timing",
1985 "d_rx_phyerr_cck_timing",
1986
1987};
1988#define ATH9K_SSTATS_LEN ARRAY_SIZE(ath9k_gstrings_stats)
1989
1990static void ath9k_get_et_strings(struct ieee80211_hw *hw,
1991 struct ieee80211_vif *vif,
1992 u32 sset, u8 *data)
1993{
1994 if (sset == ETH_SS_STATS)
1995 memcpy(data, *ath9k_gstrings_stats,
1996 sizeof(ath9k_gstrings_stats));
1997}
1998
1999static int ath9k_get_et_sset_count(struct ieee80211_hw *hw,
2000 struct ieee80211_vif *vif, int sset)
2001{
2002 if (sset == ETH_SS_STATS)
2003 return ATH9K_SSTATS_LEN;
2004 return 0;
2005}
2006
2007#define PR_QNUM(_n) (sc->tx.txq_map[_n]->axq_qnum)
2008#define AWDATA(elem) \
2009 do { \
2010 data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].elem; \
2011 data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].elem; \
2012 data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].elem; \
2013 data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].elem; \
2014 } while (0)
2015
2016#define AWDATA_RX(elem) \
2017 do { \
2018 data[i++] = sc->debug.stats.rxstats.elem; \
2019 } while (0)
2020
2021static void ath9k_get_et_stats(struct ieee80211_hw *hw,
2022 struct ieee80211_vif *vif,
2023 struct ethtool_stats *stats, u64 *data)
2024{
2025 struct ath_softc *sc = hw->priv;
2026 int i = 0;
2027
2028 data[i++] = (sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].tx_pkts_all +
2029 sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].tx_pkts_all +
2030 sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].tx_pkts_all +
2031 sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].tx_pkts_all);
2032 data[i++] = (sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].tx_bytes_all +
2033 sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].tx_bytes_all +
2034 sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].tx_bytes_all +
2035 sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].tx_bytes_all);
2036 AWDATA_RX(rx_pkts_all);
2037 AWDATA_RX(rx_bytes_all);
2038
2039 AWDATA(tx_pkts_all);
2040 AWDATA(tx_bytes_all);
2041 AWDATA(queued);
2042 AWDATA(completed);
2043 AWDATA(xretries);
2044 AWDATA(a_aggr);
2045 AWDATA(a_queued_hw);
2046 AWDATA(a_queued_sw);
2047 AWDATA(a_completed);
2048 AWDATA(a_retries);
2049 AWDATA(a_xretries);
2050 AWDATA(fifo_underrun);
2051 AWDATA(xtxop);
2052 AWDATA(timer_exp);
2053 AWDATA(desc_cfg_err);
2054 AWDATA(data_underrun);
2055 AWDATA(delim_underrun);
2056
2057 AWDATA_RX(decrypt_crc_err);
2058 AWDATA_RX(phy_err);
2059 AWDATA_RX(mic_err);
2060 AWDATA_RX(pre_delim_crc_err);
2061 AWDATA_RX(post_delim_crc_err);
2062 AWDATA_RX(decrypt_busy_err);
2063
2064 AWDATA_RX(phy_err_stats[ATH9K_PHYERR_RADAR]);
2065 AWDATA_RX(phy_err_stats[ATH9K_PHYERR_OFDM_TIMING]);
2066 AWDATA_RX(phy_err_stats[ATH9K_PHYERR_CCK_TIMING]);
2067
2068 WARN_ON(i != ATH9K_SSTATS_LEN);
2069}
2070
2071/* End of ethtool get-stats functions */
2072
2073#endif
2074
2075
2383struct ieee80211_ops ath9k_ops = { 2076struct ieee80211_ops ath9k_ops = {
2384 .tx = ath9k_tx, 2077 .tx = ath9k_tx,
2385 .start = ath9k_start, 2078 .start = ath9k_start,
@@ -2408,4 +2101,10 @@ struct ieee80211_ops ath9k_ops = {
2408 .get_stats = ath9k_get_stats, 2101 .get_stats = ath9k_get_stats,
2409 .set_antenna = ath9k_set_antenna, 2102 .set_antenna = ath9k_set_antenna,
2410 .get_antenna = ath9k_get_antenna, 2103 .get_antenna = ath9k_get_antenna,
2104
2105#ifdef CONFIG_ATH9K_DEBUGFS
2106 .get_et_sset_count = ath9k_get_et_sset_count,
2107 .get_et_stats = ath9k_get_et_stats,
2108 .get_et_strings = ath9k_get_et_strings,
2109#endif
2411}; 2110};
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 29fe52d6997..64cc782587d 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -20,7 +20,7 @@
20#include "ath9k.h" 20#include "ath9k.h"
21#include "mci.h" 21#include "mci.h"
22 22
23static const u8 ath_mci_duty_cycle[] = { 0, 50, 60, 70, 80, 85, 90, 95, 98 }; 23static const u8 ath_mci_duty_cycle[] = { 55, 50, 60, 70, 80, 85, 90, 95, 98 };
24 24
25static struct ath_mci_profile_info* 25static struct ath_mci_profile_info*
26ath_mci_find_profile(struct ath_mci_profile *mci, 26ath_mci_find_profile(struct ath_mci_profile *mci,
@@ -28,11 +28,14 @@ ath_mci_find_profile(struct ath_mci_profile *mci,
28{ 28{
29 struct ath_mci_profile_info *entry; 29 struct ath_mci_profile_info *entry;
30 30
31 if (list_empty(&mci->info))
32 return NULL;
33
31 list_for_each_entry(entry, &mci->info, list) { 34 list_for_each_entry(entry, &mci->info, list) {
32 if (entry->conn_handle == info->conn_handle) 35 if (entry->conn_handle == info->conn_handle)
33 break; 36 return entry;
34 } 37 }
35 return entry; 38 return NULL;
36} 39}
37 40
38static bool ath_mci_add_profile(struct ath_common *common, 41static bool ath_mci_add_profile(struct ath_common *common,
@@ -49,31 +52,21 @@ static bool ath_mci_add_profile(struct ath_common *common,
49 (info->type != MCI_GPM_COEX_PROFILE_VOICE)) 52 (info->type != MCI_GPM_COEX_PROFILE_VOICE))
50 return false; 53 return false;
51 54
52 entry = ath_mci_find_profile(mci, info); 55 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
53 56 if (!entry)
54 if (entry) { 57 return false;
55 memcpy(entry, info, 10);
56 } else {
57 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
58 if (!entry)
59 return false;
60 58
61 memcpy(entry, info, 10); 59 memcpy(entry, info, 10);
62 INC_PROF(mci, info); 60 INC_PROF(mci, info);
63 list_add_tail(&info->list, &mci->info); 61 list_add_tail(&entry->list, &mci->info);
64 }
65 62
66 return true; 63 return true;
67} 64}
68 65
69static void ath_mci_del_profile(struct ath_common *common, 66static void ath_mci_del_profile(struct ath_common *common,
70 struct ath_mci_profile *mci, 67 struct ath_mci_profile *mci,
71 struct ath_mci_profile_info *info) 68 struct ath_mci_profile_info *entry)
72{ 69{
73 struct ath_mci_profile_info *entry;
74
75 entry = ath_mci_find_profile(mci, info);
76
77 if (!entry) 70 if (!entry)
78 return; 71 return;
79 72
@@ -86,12 +79,16 @@ void ath_mci_flush_profile(struct ath_mci_profile *mci)
86{ 79{
87 struct ath_mci_profile_info *info, *tinfo; 80 struct ath_mci_profile_info *info, *tinfo;
88 81
82 mci->aggr_limit = 0;
83
84 if (list_empty(&mci->info))
85 return;
86
89 list_for_each_entry_safe(info, tinfo, &mci->info, list) { 87 list_for_each_entry_safe(info, tinfo, &mci->info, list) {
90 list_del(&info->list); 88 list_del(&info->list);
91 DEC_PROF(mci, info); 89 DEC_PROF(mci, info);
92 kfree(info); 90 kfree(info);
93 } 91 }
94 mci->aggr_limit = 0;
95} 92}
96 93
97static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex) 94static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex)
@@ -116,42 +113,60 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
116 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 113 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
117 struct ath_btcoex *btcoex = &sc->btcoex; 114 struct ath_btcoex *btcoex = &sc->btcoex;
118 struct ath_mci_profile *mci = &btcoex->mci; 115 struct ath_mci_profile *mci = &btcoex->mci;
116 struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
119 struct ath_mci_profile_info *info; 117 struct ath_mci_profile_info *info;
120 u32 num_profile = NUM_PROF(mci); 118 u32 num_profile = NUM_PROF(mci);
121 119
120 if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_TUNING)
121 goto skip_tuning;
122
123 btcoex->duty_cycle = ath_mci_duty_cycle[num_profile];
124
122 if (num_profile == 1) { 125 if (num_profile == 1) {
123 info = list_first_entry(&mci->info, 126 info = list_first_entry(&mci->info,
124 struct ath_mci_profile_info, 127 struct ath_mci_profile_info,
125 list); 128 list);
126 if (mci->num_sco && info->T == 12) { 129 if (mci->num_sco) {
127 mci->aggr_limit = 8; 130 if (info->T == 12)
131 mci->aggr_limit = 8;
132 else if (info->T == 6) {
133 mci->aggr_limit = 6;
134 btcoex->duty_cycle = 30;
135 }
128 ath_dbg(common, MCI, 136 ath_dbg(common, MCI,
129 "Single SCO, aggregation limit 2 ms\n"); 137 "Single SCO, aggregation limit %d 1/4 ms\n",
130 } else if ((info->type == MCI_GPM_COEX_PROFILE_BNEP) && 138 mci->aggr_limit);
131 !info->master) { 139 } else if (mci->num_pan || mci->num_other_acl) {
132 btcoex->btcoex_period = 60; 140 /*
141 * For single PAN/FTP profile, allocate 35% for BT
142 * to improve WLAN throughput.
143 */
144 btcoex->duty_cycle = 35;
145 btcoex->btcoex_period = 53;
133 ath_dbg(common, MCI, 146 ath_dbg(common, MCI,
134 "Single slave PAN/FTP, bt period 60 ms\n"); 147 "Single PAN/FTP bt period %d ms dutycycle %d\n",
135 } else if ((info->type == MCI_GPM_COEX_PROFILE_HID) && 148 btcoex->duty_cycle, btcoex->btcoex_period);
136 (info->T > 0 && info->T < 50) && 149 } else if (mci->num_hid) {
137 (info->A > 1 || info->W > 1)) {
138 btcoex->duty_cycle = 30; 150 btcoex->duty_cycle = 30;
139 mci->aggr_limit = 8; 151 mci->aggr_limit = 6;
140 ath_dbg(common, MCI, 152 ath_dbg(common, MCI,
141 "Multiple attempt/timeout single HID " 153 "Multiple attempt/timeout single HID "
142 "aggregation limit 2 ms dutycycle 30%%\n"); 154 "aggregation limit 1.5 ms dutycycle 30%%\n");
143 } 155 }
144 } else if ((num_profile == 2) && (mci->num_hid == 2)) { 156 } else if (num_profile == 2) {
145 btcoex->duty_cycle = 30; 157 if (mci->num_hid == 2)
146 mci->aggr_limit = 8; 158 btcoex->duty_cycle = 30;
147 ath_dbg(common, MCI,
148 "Two HIDs aggregation limit 2 ms dutycycle 30%%\n");
149 } else if (num_profile > 3) {
150 mci->aggr_limit = 6; 159 mci->aggr_limit = 6;
151 ath_dbg(common, MCI, 160 ath_dbg(common, MCI,
152 "Three or more profiles aggregation limit 1.5 ms\n"); 161 "Two BT profiles aggr limit 1.5 ms dutycycle %d%%\n",
162 btcoex->duty_cycle);
163 } else if (num_profile >= 3) {
164 mci->aggr_limit = 4;
165 ath_dbg(common, MCI,
166 "Three or more profiles aggregation limit 1 ms\n");
153 } 167 }
154 168
169skip_tuning:
155 if (IS_CHAN_2GHZ(sc->sc_ah->curchan)) { 170 if (IS_CHAN_2GHZ(sc->sc_ah->curchan)) {
156 if (IS_CHAN_HT(sc->sc_ah->curchan)) 171 if (IS_CHAN_HT(sc->sc_ah->curchan))
157 ath_mci_adjust_aggr_limit(btcoex); 172 ath_mci_adjust_aggr_limit(btcoex);
@@ -165,12 +180,11 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
165 if (IS_CHAN_5GHZ(sc->sc_ah->curchan)) 180 if (IS_CHAN_5GHZ(sc->sc_ah->curchan))
166 return; 181 return;
167 182
168 btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_MAX_DUTY_CYCLE : 0); 183 btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_BDR_DUTY_CYCLE : 0);
169 if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE) 184 if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE)
170 btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE; 185 btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE;
171 186
172 btcoex->btcoex_period *= 1000; 187 btcoex->btcoex_no_stomp = btcoex->btcoex_period * 1000 *
173 btcoex->btcoex_no_stomp = btcoex->btcoex_period *
174 (100 - btcoex->duty_cycle) / 100; 188 (100 - btcoex->duty_cycle) / 100;
175 189
176 ath9k_hw_btcoex_enable(sc->sc_ah); 190 ath9k_hw_btcoex_enable(sc->sc_ah);
@@ -181,20 +195,16 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
181{ 195{
182 struct ath_hw *ah = sc->sc_ah; 196 struct ath_hw *ah = sc->sc_ah;
183 struct ath_common *common = ath9k_hw_common(ah); 197 struct ath_common *common = ath9k_hw_common(ah);
198 struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
184 u32 payload[4] = {0, 0, 0, 0}; 199 u32 payload[4] = {0, 0, 0, 0};
185 200
186 switch (opcode) { 201 switch (opcode) {
187 case MCI_GPM_BT_CAL_REQ: 202 case MCI_GPM_BT_CAL_REQ:
188 if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) { 203 if (mci_hw->bt_state == MCI_BT_AWAKE) {
189 ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START, NULL); 204 ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START);
190 ieee80211_queue_work(sc->hw, &sc->hw_reset_work); 205 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
191 } else {
192 ath_dbg(common, MCI, "MCI State mismatch: %d\n",
193 ar9003_mci_state(ah, MCI_STATE_BT, NULL));
194 } 206 }
195 break; 207 ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state);
196 case MCI_GPM_BT_CAL_DONE:
197 ar9003_mci_state(ah, MCI_STATE_BT, NULL);
198 break; 208 break;
199 case MCI_GPM_BT_CAL_GRANT: 209 case MCI_GPM_BT_CAL_GRANT:
200 MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE); 210 MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE);
@@ -207,32 +217,55 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
207 } 217 }
208} 218}
209 219
220static void ath9k_mci_work(struct work_struct *work)
221{
222 struct ath_softc *sc = container_of(work, struct ath_softc, mci_work);
223
224 ath_mci_update_scheme(sc);
225}
226
210static void ath_mci_process_profile(struct ath_softc *sc, 227static void ath_mci_process_profile(struct ath_softc *sc,
211 struct ath_mci_profile_info *info) 228 struct ath_mci_profile_info *info)
212{ 229{
213 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 230 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
214 struct ath_btcoex *btcoex = &sc->btcoex; 231 struct ath_btcoex *btcoex = &sc->btcoex;
215 struct ath_mci_profile *mci = &btcoex->mci; 232 struct ath_mci_profile *mci = &btcoex->mci;
233 struct ath_mci_profile_info *entry = NULL;
234
235 entry = ath_mci_find_profile(mci, info);
236 if (entry) {
237 /*
238 * Two MCI interrupts are generated while connecting to
239 * headset and A2DP profile, but only one MCI interrupt
240 * is generated with last added profile type while disconnecting
241 * both profiles.
242 * So while adding second profile type decrement
243 * the first one.
244 */
245 if (entry->type != info->type) {
246 DEC_PROF(mci, entry);
247 INC_PROF(mci, info);
248 }
249 memcpy(entry, info, 10);
250 }
216 251
217 if (info->start) { 252 if (info->start) {
218 if (!ath_mci_add_profile(common, mci, info)) 253 if (!entry && !ath_mci_add_profile(common, mci, info))
219 return; 254 return;
220 } else 255 } else
221 ath_mci_del_profile(common, mci, info); 256 ath_mci_del_profile(common, mci, entry);
222 257
223 btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD; 258 btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
224 mci->aggr_limit = mci->num_sco ? 6 : 0; 259 mci->aggr_limit = mci->num_sco ? 6 : 0;
225 260
226 if (NUM_PROF(mci)) { 261 btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)];
262 if (NUM_PROF(mci))
227 btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW; 263 btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
228 btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)]; 264 else
229 } else {
230 btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL : 265 btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
231 ATH_BTCOEX_STOMP_LOW; 266 ATH_BTCOEX_STOMP_LOW;
232 btcoex->duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
233 }
234 267
235 ath_mci_update_scheme(sc); 268 ieee80211_queue_work(sc->hw, &sc->mci_work);
236} 269}
237 270
238static void ath_mci_process_status(struct ath_softc *sc, 271static void ath_mci_process_status(struct ath_softc *sc,
@@ -247,8 +280,6 @@ static void ath_mci_process_status(struct ath_softc *sc,
247 if (status->is_link) 280 if (status->is_link)
248 return; 281 return;
249 282
250 memset(&info, 0, sizeof(struct ath_mci_profile_info));
251
252 info.conn_handle = status->conn_handle; 283 info.conn_handle = status->conn_handle;
253 if (ath_mci_find_profile(mci, &info)) 284 if (ath_mci_find_profile(mci, &info))
254 return; 285 return;
@@ -268,7 +299,7 @@ static void ath_mci_process_status(struct ath_softc *sc,
268 } while (++i < ATH_MCI_MAX_PROFILE); 299 } while (++i < ATH_MCI_MAX_PROFILE);
269 300
270 if (old_num_mgmt != mci->num_mgmt) 301 if (old_num_mgmt != mci->num_mgmt)
271 ath_mci_update_scheme(sc); 302 ieee80211_queue_work(sc->hw, &sc->mci_work);
272} 303}
273 304
274static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) 305static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
@@ -277,25 +308,20 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
277 struct ath_mci_profile_info profile_info; 308 struct ath_mci_profile_info profile_info;
278 struct ath_mci_profile_status profile_status; 309 struct ath_mci_profile_status profile_status;
279 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 310 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
280 u32 version; 311 u8 major, minor;
281 u8 major;
282 u8 minor;
283 u32 seq_num; 312 u32 seq_num;
284 313
285 switch (opcode) { 314 switch (opcode) {
286 case MCI_GPM_COEX_VERSION_QUERY: 315 case MCI_GPM_COEX_VERSION_QUERY:
287 version = ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION, 316 ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION);
288 NULL);
289 break; 317 break;
290 case MCI_GPM_COEX_VERSION_RESPONSE: 318 case MCI_GPM_COEX_VERSION_RESPONSE:
291 major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION); 319 major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION);
292 minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION); 320 minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION);
293 version = (major << 8) + minor; 321 ar9003_mci_set_bt_version(ah, major, minor);
294 version = ar9003_mci_state(ah, MCI_STATE_SET_BT_COEX_VERSION,
295 &version);
296 break; 322 break;
297 case MCI_GPM_COEX_STATUS_QUERY: 323 case MCI_GPM_COEX_STATUS_QUERY:
298 ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_CHANNELS, NULL); 324 ar9003_mci_send_wlan_channels(ah);
299 break; 325 break;
300 case MCI_GPM_COEX_BT_PROFILE_INFO: 326 case MCI_GPM_COEX_BT_PROFILE_INFO:
301 memcpy(&profile_info, 327 memcpy(&profile_info,
@@ -322,7 +348,7 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
322 348
323 seq_num = *((u32 *)(rx_payload + 12)); 349 seq_num = *((u32 *)(rx_payload + 12));
324 ath_dbg(common, MCI, 350 ath_dbg(common, MCI,
325 "BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%d\n", 351 "BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%u\n",
326 profile_status.is_link, profile_status.conn_handle, 352 profile_status.is_link, profile_status.conn_handle,
327 profile_status.is_critical, seq_num); 353 profile_status.is_critical, seq_num);
328 354
@@ -362,6 +388,7 @@ int ath_mci_setup(struct ath_softc *sc)
362 mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4), 388 mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
363 mci->sched_buf.bf_paddr); 389 mci->sched_buf.bf_paddr);
364 390
391 INIT_WORK(&sc->mci_work, ath9k_mci_work);
365 ath_dbg(common, MCI, "MCI Initialized\n"); 392 ath_dbg(common, MCI, "MCI Initialized\n");
366 393
367 return 0; 394 return 0;
@@ -389,6 +416,7 @@ void ath_mci_intr(struct ath_softc *sc)
389 struct ath_mci_coex *mci = &sc->mci_coex; 416 struct ath_mci_coex *mci = &sc->mci_coex;
390 struct ath_hw *ah = sc->sc_ah; 417 struct ath_hw *ah = sc->sc_ah;
391 struct ath_common *common = ath9k_hw_common(ah); 418 struct ath_common *common = ath9k_hw_common(ah);
419 struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
392 u32 mci_int, mci_int_rxmsg; 420 u32 mci_int, mci_int_rxmsg;
393 u32 offset, subtype, opcode; 421 u32 offset, subtype, opcode;
394 u32 *pgpm; 422 u32 *pgpm;
@@ -397,8 +425,8 @@ void ath_mci_intr(struct ath_softc *sc)
397 425
398 ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg); 426 ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
399 427
400 if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) == 0) { 428 if (ar9003_mci_state(ah, MCI_STATE_ENABLE) == 0) {
401 ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL); 429 ar9003_mci_get_next_gpm_offset(ah, true, NULL);
402 return; 430 return;
403 } 431 }
404 432
@@ -417,46 +445,41 @@ void ath_mci_intr(struct ath_softc *sc)
417 NULL, 0, true, false); 445 NULL, 0, true, false);
418 446
419 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE; 447 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE;
420 ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE, NULL); 448 ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE);
421 449
422 /* 450 /*
423 * always do this for recovery and 2G/5G toggling and LNA_TRANS 451 * always do this for recovery and 2G/5G toggling and LNA_TRANS
424 */ 452 */
425 ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, NULL); 453 ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE);
426 } 454 }
427 455
428 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) { 456 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) {
429 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING; 457 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING;
430 458
431 if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_SLEEP) { 459 if ((mci_hw->bt_state == MCI_BT_SLEEP) &&
432 if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL) != 460 (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) !=
433 MCI_BT_SLEEP) 461 MCI_BT_SLEEP))
434 ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, 462 ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE);
435 NULL);
436 }
437 } 463 }
438 464
439 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) { 465 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) {
440 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING; 466 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING;
441 467
442 if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) { 468 if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
443 if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL) != 469 (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) !=
444 MCI_BT_AWAKE) 470 MCI_BT_AWAKE))
445 ar9003_mci_state(ah, MCI_STATE_SET_BT_SLEEP, 471 mci_hw->bt_state = MCI_BT_SLEEP;
446 NULL);
447 }
448 } 472 }
449 473
450 if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) || 474 if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
451 (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) { 475 (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
452 ar9003_mci_state(ah, MCI_STATE_RECOVER_RX, NULL); 476 ar9003_mci_state(ah, MCI_STATE_RECOVER_RX);
453 skip_gpm = true; 477 skip_gpm = true;
454 } 478 }
455 479
456 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) { 480 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) {
457 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO; 481 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO;
458 offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET, 482 offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET);
459 NULL);
460 } 483 }
461 484
462 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) { 485 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) {
@@ -465,8 +488,8 @@ void ath_mci_intr(struct ath_softc *sc)
465 while (more_data == MCI_GPM_MORE) { 488 while (more_data == MCI_GPM_MORE) {
466 489
467 pgpm = mci->gpm_buf.bf_addr; 490 pgpm = mci->gpm_buf.bf_addr;
468 offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET, 491 offset = ar9003_mci_get_next_gpm_offset(ah, false,
469 &more_data); 492 &more_data);
470 493
471 if (offset == MCI_GPM_INVALID) 494 if (offset == MCI_GPM_INVALID)
472 break; 495 break;
@@ -507,23 +530,17 @@ void ath_mci_intr(struct ath_softc *sc)
507 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO; 530 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO;
508 531
509 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) { 532 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
510 int value_dbm = ar9003_mci_state(ah, 533 int value_dbm = MS(mci_hw->cont_status,
511 MCI_STATE_CONT_RSSI_POWER, NULL); 534 AR_MCI_CONT_RSSI_POWER);
512 535
513 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO; 536 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO;
514 537
515 if (ar9003_mci_state(ah, MCI_STATE_CONT_TXRX, NULL)) 538 ath_dbg(common, MCI,
516 ath_dbg(common, MCI, 539 "MCI CONT_INFO: (%s) pri = %d pwr = %d dBm\n",
517 "MCI CONT_INFO: (tx) pri = %d, pwr = %d dBm\n", 540 MS(mci_hw->cont_status, AR_MCI_CONT_TXRX) ?
518 ar9003_mci_state(ah, 541 "tx" : "rx",
519 MCI_STATE_CONT_PRIORITY, NULL), 542 MS(mci_hw->cont_status, AR_MCI_CONT_PRIORITY),
520 value_dbm); 543 value_dbm);
521 else
522 ath_dbg(common, MCI,
523 "MCI CONT_INFO: (rx) pri = %d,pwr = %d dBm\n",
524 ar9003_mci_state(ah,
525 MCI_STATE_CONT_PRIORITY, NULL),
526 value_dbm);
527 } 544 }
528 545
529 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK) 546 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK)
@@ -538,3 +555,14 @@ void ath_mci_intr(struct ath_softc *sc)
538 mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR | 555 mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
539 AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT); 556 AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
540} 557}
558
559void ath_mci_enable(struct ath_softc *sc)
560{
561 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
562
563 if (!common->btcoex_enabled)
564 return;
565
566 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
567 sc->sc_ah->imask |= ATH9K_INT_MCI;
568}
diff --git a/drivers/net/wireless/ath/ath9k/mci.h b/drivers/net/wireless/ath/ath9k/mci.h
index c841444f53c..fc14eea034e 100644
--- a/drivers/net/wireless/ath/ath9k/mci.h
+++ b/drivers/net/wireless/ath/ath9k/mci.h
@@ -130,4 +130,13 @@ void ath_mci_flush_profile(struct ath_mci_profile *mci);
130int ath_mci_setup(struct ath_softc *sc); 130int ath_mci_setup(struct ath_softc *sc);
131void ath_mci_cleanup(struct ath_softc *sc); 131void ath_mci_cleanup(struct ath_softc *sc);
132void ath_mci_intr(struct ath_softc *sc); 132void ath_mci_intr(struct ath_softc *sc);
133#endif 133
134#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
135void ath_mci_enable(struct ath_softc *sc);
136#else
137static inline void ath_mci_enable(struct ath_softc *sc)
138{
139}
140#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
141
142#endif /* MCI_H*/
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index a856b51255f..aa0e83ac51f 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -115,6 +115,9 @@ static void ath_pci_aspm_init(struct ath_common *common)
115 int pos; 115 int pos;
116 u8 aspm; 116 u8 aspm;
117 117
118 if (!ah->is_pciexpress)
119 return;
120
118 pos = pci_pcie_cap(pdev); 121 pos = pci_pcie_cap(pdev);
119 if (!pos) 122 if (!pos)
120 return; 123 return;
@@ -138,6 +141,7 @@ static void ath_pci_aspm_init(struct ath_common *common)
138 aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); 141 aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
139 pci_write_config_byte(parent, pos + PCI_EXP_LNKCTL, aspm); 142 pci_write_config_byte(parent, pos + PCI_EXP_LNKCTL, aspm);
140 143
144 ath_info(common, "Disabling ASPM since BTCOEX is enabled\n");
141 return; 145 return;
142 } 146 }
143 147
@@ -147,6 +151,7 @@ static void ath_pci_aspm_init(struct ath_common *common)
147 ah->aspm_enabled = true; 151 ah->aspm_enabled = true;
148 /* Initialize PCIe PM and SERDES registers. */ 152 /* Initialize PCIe PM and SERDES registers. */
149 ath9k_hw_configpcipowersave(ah, false); 153 ath9k_hw_configpcipowersave(ah, false);
154 ath_info(common, "ASPM enabled: 0x%x\n", aspm);
150 } 155 }
151} 156}
152 157
@@ -246,7 +251,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
246 sc->mem = mem; 251 sc->mem = mem;
247 252
248 /* Will be cleared in ath9k_start() */ 253 /* Will be cleared in ath9k_start() */
249 sc->sc_flags |= SC_OP_INVALID; 254 set_bit(SC_OP_INVALID, &sc->sc_flags);
250 255
251 ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc); 256 ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
252 if (ret) { 257 if (ret) {
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 92a6c0a87f8..e034add9cd5 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -770,7 +770,7 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
770 struct ieee80211_tx_rate *rates = tx_info->control.rates; 770 struct ieee80211_tx_rate *rates = tx_info->control.rates;
771 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 771 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
772 __le16 fc = hdr->frame_control; 772 __le16 fc = hdr->frame_control;
773 u8 try_per_rate, i = 0, rix, high_rix; 773 u8 try_per_rate, i = 0, rix;
774 int is_probe = 0; 774 int is_probe = 0;
775 775
776 if (rate_control_send_low(sta, priv_sta, txrc)) 776 if (rate_control_send_low(sta, priv_sta, txrc))
@@ -791,7 +791,6 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
791 rate_table = ath_rc_priv->rate_table; 791 rate_table = ath_rc_priv->rate_table;
792 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, 792 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
793 &is_probe, false); 793 &is_probe, false);
794 high_rix = rix;
795 794
796 /* 795 /*
797 * If we're in HT mode and both us and our peer supports LDPC. 796 * If we're in HT mode and both us and our peer supports LDPC.
@@ -839,16 +838,16 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
839 try_per_rate = 8; 838 try_per_rate = 8;
840 839
841 /* 840 /*
842 * Use a legacy rate as last retry to ensure that the frame 841 * If the last rate in the rate series is MCS and has
843 * is tried in both MCS and legacy rates. 842 * more than 80% of per thresh, then use a legacy rate
843 * as last retry to ensure that the frame is tried in both
844 * MCS and legacy rate.
844 */ 845 */
845 if ((rates[2].flags & IEEE80211_TX_RC_MCS) && 846 ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
846 (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU) || 847 if (WLAN_RC_PHY_HT(rate_table->info[rix].phy) &&
847 (ath_rc_priv->per[high_rix] > 45))) 848 (ath_rc_priv->per[rix] > 45))
848 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, 849 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
849 &is_probe, true); 850 &is_probe, true);
850 else
851 ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
852 851
853 /* All other rates in the series have RTS enabled */ 852 /* All other rates in the series have RTS enabled */
854 ath_rc_rate_set_series(rate_table, &rates[i], txrc, 853 ath_rc_rate_set_series(rate_table, &rates[i], txrc,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 0735aeb3b26..6a7dd26f2a1 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -20,43 +20,6 @@
20 20
21#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 21#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
22 22
23static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
24 int mindelta, int main_rssi_avg,
25 int alt_rssi_avg, int pkt_count)
26{
27 return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
28 (alt_rssi_avg > main_rssi_avg + maxdelta)) ||
29 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
30}
31
32static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio,
33 int curr_main_set, int curr_alt_set,
34 int alt_rssi_avg, int main_rssi_avg)
35{
36 bool result = false;
37 switch (div_group) {
38 case 0:
39 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
40 result = true;
41 break;
42 case 1:
43 case 2:
44 if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) &&
45 (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) &&
46 (alt_rssi_avg >= (main_rssi_avg - 5))) ||
47 ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) &&
48 (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) &&
49 (alt_rssi_avg >= (main_rssi_avg - 2)))) &&
50 (alt_rssi_avg >= 4))
51 result = true;
52 else
53 result = false;
54 break;
55 }
56
57 return result;
58}
59
60static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 23static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
61{ 24{
62 return sc->ps_enabled && 25 return sc->ps_enabled &&
@@ -303,7 +266,7 @@ static void ath_edma_start_recv(struct ath_softc *sc)
303 266
304 ath_opmode_init(sc); 267 ath_opmode_init(sc);
305 268
306 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 269 ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
307 270
308 spin_unlock_bh(&sc->rx.rxbuflock); 271 spin_unlock_bh(&sc->rx.rxbuflock);
309} 272}
@@ -322,8 +285,8 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
322 int error = 0; 285 int error = 0;
323 286
324 spin_lock_init(&sc->sc_pcu_lock); 287 spin_lock_init(&sc->sc_pcu_lock);
325 sc->sc_flags &= ~SC_OP_RXFLUSH;
326 spin_lock_init(&sc->rx.rxbuflock); 288 spin_lock_init(&sc->rx.rxbuflock);
289 clear_bit(SC_OP_RXFLUSH, &sc->sc_flags);
327 290
328 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + 291 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
329 sc->sc_ah->caps.rx_status_len; 292 sc->sc_ah->caps.rx_status_len;
@@ -500,7 +463,7 @@ int ath_startrecv(struct ath_softc *sc)
500 463
501start_recv: 464start_recv:
502 ath_opmode_init(sc); 465 ath_opmode_init(sc);
503 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 466 ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
504 467
505 spin_unlock_bh(&sc->rx.rxbuflock); 468 spin_unlock_bh(&sc->rx.rxbuflock);
506 469
@@ -535,11 +498,11 @@ bool ath_stoprecv(struct ath_softc *sc)
535 498
536void ath_flushrecv(struct ath_softc *sc) 499void ath_flushrecv(struct ath_softc *sc)
537{ 500{
538 sc->sc_flags |= SC_OP_RXFLUSH; 501 set_bit(SC_OP_RXFLUSH, &sc->sc_flags);
539 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 502 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
540 ath_rx_tasklet(sc, 1, true); 503 ath_rx_tasklet(sc, 1, true);
541 ath_rx_tasklet(sc, 1, false); 504 ath_rx_tasklet(sc, 1, false);
542 sc->sc_flags &= ~SC_OP_RXFLUSH; 505 clear_bit(SC_OP_RXFLUSH, &sc->sc_flags);
543} 506}
544 507
545static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 508static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
@@ -624,13 +587,13 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
624 587
625 /* Process Beacon and CAB receive in PS state */ 588 /* Process Beacon and CAB receive in PS state */
626 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) 589 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
627 && mybeacon) 590 && mybeacon) {
628 ath_rx_ps_beacon(sc, skb); 591 ath_rx_ps_beacon(sc, skb);
629 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 592 } else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
630 (ieee80211_is_data(hdr->frame_control) || 593 (ieee80211_is_data(hdr->frame_control) ||
631 ieee80211_is_action(hdr->frame_control)) && 594 ieee80211_is_action(hdr->frame_control)) &&
632 is_multicast_ether_addr(hdr->addr1) && 595 is_multicast_ether_addr(hdr->addr1) &&
633 !ieee80211_has_moredata(hdr->frame_control)) { 596 !ieee80211_has_moredata(hdr->frame_control)) {
634 /* 597 /*
635 * No more broadcast/multicast frames to be received at this 598 * No more broadcast/multicast frames to be received at this
636 * point. 599 * point.
@@ -1068,709 +1031,6 @@ static void ath9k_rx_skb_postprocess(struct ath_common *common,
1068 rxs->flag &= ~RX_FLAG_DECRYPTED; 1031 rxs->flag &= ~RX_FLAG_DECRYPTED;
1069} 1032}
1070 1033
1071static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
1072 struct ath_hw_antcomb_conf ant_conf,
1073 int main_rssi_avg)
1074{
1075 antcomb->quick_scan_cnt = 0;
1076
1077 if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
1078 antcomb->rssi_lna2 = main_rssi_avg;
1079 else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1)
1080 antcomb->rssi_lna1 = main_rssi_avg;
1081
1082 switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) {
1083 case 0x10: /* LNA2 A-B */
1084 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1085 antcomb->first_quick_scan_conf =
1086 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1087 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1088 break;
1089 case 0x20: /* LNA1 A-B */
1090 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1091 antcomb->first_quick_scan_conf =
1092 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1093 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1094 break;
1095 case 0x21: /* LNA1 LNA2 */
1096 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2;
1097 antcomb->first_quick_scan_conf =
1098 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1099 antcomb->second_quick_scan_conf =
1100 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1101 break;
1102 case 0x12: /* LNA2 LNA1 */
1103 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1;
1104 antcomb->first_quick_scan_conf =
1105 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1106 antcomb->second_quick_scan_conf =
1107 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1108 break;
1109 case 0x13: /* LNA2 A+B */
1110 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1111 antcomb->first_quick_scan_conf =
1112 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1113 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1114 break;
1115 case 0x23: /* LNA1 A+B */
1116 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1117 antcomb->first_quick_scan_conf =
1118 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1119 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1120 break;
1121 default:
1122 break;
1123 }
1124}
1125
1126static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
1127 struct ath_hw_antcomb_conf *div_ant_conf,
1128 int main_rssi_avg, int alt_rssi_avg,
1129 int alt_ratio)
1130{
1131 /* alt_good */
1132 switch (antcomb->quick_scan_cnt) {
1133 case 0:
1134 /* set alt to main, and alt to first conf */
1135 div_ant_conf->main_lna_conf = antcomb->main_conf;
1136 div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf;
1137 break;
1138 case 1:
1139 /* set alt to main, and alt to first conf */
1140 div_ant_conf->main_lna_conf = antcomb->main_conf;
1141 div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf;
1142 antcomb->rssi_first = main_rssi_avg;
1143 antcomb->rssi_second = alt_rssi_avg;
1144
1145 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1146 /* main is LNA1 */
1147 if (ath_is_alt_ant_ratio_better(alt_ratio,
1148 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1149 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1150 main_rssi_avg, alt_rssi_avg,
1151 antcomb->total_pkt_count))
1152 antcomb->first_ratio = true;
1153 else
1154 antcomb->first_ratio = false;
1155 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1156 if (ath_is_alt_ant_ratio_better(alt_ratio,
1157 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1158 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1159 main_rssi_avg, alt_rssi_avg,
1160 antcomb->total_pkt_count))
1161 antcomb->first_ratio = true;
1162 else
1163 antcomb->first_ratio = false;
1164 } else {
1165 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1166 (alt_rssi_avg > main_rssi_avg +
1167 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1168 (alt_rssi_avg > main_rssi_avg)) &&
1169 (antcomb->total_pkt_count > 50))
1170 antcomb->first_ratio = true;
1171 else
1172 antcomb->first_ratio = false;
1173 }
1174 break;
1175 case 2:
1176 antcomb->alt_good = false;
1177 antcomb->scan_not_start = false;
1178 antcomb->scan = false;
1179 antcomb->rssi_first = main_rssi_avg;
1180 antcomb->rssi_third = alt_rssi_avg;
1181
1182 if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
1183 antcomb->rssi_lna1 = alt_rssi_avg;
1184 else if (antcomb->second_quick_scan_conf ==
1185 ATH_ANT_DIV_COMB_LNA2)
1186 antcomb->rssi_lna2 = alt_rssi_avg;
1187 else if (antcomb->second_quick_scan_conf ==
1188 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
1189 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
1190 antcomb->rssi_lna2 = main_rssi_avg;
1191 else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
1192 antcomb->rssi_lna1 = main_rssi_avg;
1193 }
1194
1195 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
1196 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
1197 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1198 else
1199 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
1200
1201 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1202 if (ath_is_alt_ant_ratio_better(alt_ratio,
1203 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1204 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1205 main_rssi_avg, alt_rssi_avg,
1206 antcomb->total_pkt_count))
1207 antcomb->second_ratio = true;
1208 else
1209 antcomb->second_ratio = false;
1210 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1211 if (ath_is_alt_ant_ratio_better(alt_ratio,
1212 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1213 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1214 main_rssi_avg, alt_rssi_avg,
1215 antcomb->total_pkt_count))
1216 antcomb->second_ratio = true;
1217 else
1218 antcomb->second_ratio = false;
1219 } else {
1220 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1221 (alt_rssi_avg > main_rssi_avg +
1222 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1223 (alt_rssi_avg > main_rssi_avg)) &&
1224 (antcomb->total_pkt_count > 50))
1225 antcomb->second_ratio = true;
1226 else
1227 antcomb->second_ratio = false;
1228 }
1229
1230 /* set alt to the conf with maximun ratio */
1231 if (antcomb->first_ratio && antcomb->second_ratio) {
1232 if (antcomb->rssi_second > antcomb->rssi_third) {
1233 /* first alt*/
1234 if ((antcomb->first_quick_scan_conf ==
1235 ATH_ANT_DIV_COMB_LNA1) ||
1236 (antcomb->first_quick_scan_conf ==
1237 ATH_ANT_DIV_COMB_LNA2))
1238 /* Set alt LNA1 or LNA2*/
1239 if (div_ant_conf->main_lna_conf ==
1240 ATH_ANT_DIV_COMB_LNA2)
1241 div_ant_conf->alt_lna_conf =
1242 ATH_ANT_DIV_COMB_LNA1;
1243 else
1244 div_ant_conf->alt_lna_conf =
1245 ATH_ANT_DIV_COMB_LNA2;
1246 else
1247 /* Set alt to A+B or A-B */
1248 div_ant_conf->alt_lna_conf =
1249 antcomb->first_quick_scan_conf;
1250 } else if ((antcomb->second_quick_scan_conf ==
1251 ATH_ANT_DIV_COMB_LNA1) ||
1252 (antcomb->second_quick_scan_conf ==
1253 ATH_ANT_DIV_COMB_LNA2)) {
1254 /* Set alt LNA1 or LNA2 */
1255 if (div_ant_conf->main_lna_conf ==
1256 ATH_ANT_DIV_COMB_LNA2)
1257 div_ant_conf->alt_lna_conf =
1258 ATH_ANT_DIV_COMB_LNA1;
1259 else
1260 div_ant_conf->alt_lna_conf =
1261 ATH_ANT_DIV_COMB_LNA2;
1262 } else {
1263 /* Set alt to A+B or A-B */
1264 div_ant_conf->alt_lna_conf =
1265 antcomb->second_quick_scan_conf;
1266 }
1267 } else if (antcomb->first_ratio) {
1268 /* first alt */
1269 if ((antcomb->first_quick_scan_conf ==
1270 ATH_ANT_DIV_COMB_LNA1) ||
1271 (antcomb->first_quick_scan_conf ==
1272 ATH_ANT_DIV_COMB_LNA2))
1273 /* Set alt LNA1 or LNA2 */
1274 if (div_ant_conf->main_lna_conf ==
1275 ATH_ANT_DIV_COMB_LNA2)
1276 div_ant_conf->alt_lna_conf =
1277 ATH_ANT_DIV_COMB_LNA1;
1278 else
1279 div_ant_conf->alt_lna_conf =
1280 ATH_ANT_DIV_COMB_LNA2;
1281 else
1282 /* Set alt to A+B or A-B */
1283 div_ant_conf->alt_lna_conf =
1284 antcomb->first_quick_scan_conf;
1285 } else if (antcomb->second_ratio) {
1286 /* second alt */
1287 if ((antcomb->second_quick_scan_conf ==
1288 ATH_ANT_DIV_COMB_LNA1) ||
1289 (antcomb->second_quick_scan_conf ==
1290 ATH_ANT_DIV_COMB_LNA2))
1291 /* Set alt LNA1 or LNA2 */
1292 if (div_ant_conf->main_lna_conf ==
1293 ATH_ANT_DIV_COMB_LNA2)
1294 div_ant_conf->alt_lna_conf =
1295 ATH_ANT_DIV_COMB_LNA1;
1296 else
1297 div_ant_conf->alt_lna_conf =
1298 ATH_ANT_DIV_COMB_LNA2;
1299 else
1300 /* Set alt to A+B or A-B */
1301 div_ant_conf->alt_lna_conf =
1302 antcomb->second_quick_scan_conf;
1303 } else {
1304 /* main is largest */
1305 if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
1306 (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
1307 /* Set alt LNA1 or LNA2 */
1308 if (div_ant_conf->main_lna_conf ==
1309 ATH_ANT_DIV_COMB_LNA2)
1310 div_ant_conf->alt_lna_conf =
1311 ATH_ANT_DIV_COMB_LNA1;
1312 else
1313 div_ant_conf->alt_lna_conf =
1314 ATH_ANT_DIV_COMB_LNA2;
1315 else
1316 /* Set alt to A+B or A-B */
1317 div_ant_conf->alt_lna_conf = antcomb->main_conf;
1318 }
1319 break;
1320 default:
1321 break;
1322 }
1323}
1324
1325static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
1326 struct ath_ant_comb *antcomb, int alt_ratio)
1327{
1328 if (ant_conf->div_group == 0) {
1329 /* Adjust the fast_div_bias based on main and alt lna conf */
1330 switch ((ant_conf->main_lna_conf << 4) |
1331 ant_conf->alt_lna_conf) {
1332 case 0x01: /* A-B LNA2 */
1333 ant_conf->fast_div_bias = 0x3b;
1334 break;
1335 case 0x02: /* A-B LNA1 */
1336 ant_conf->fast_div_bias = 0x3d;
1337 break;
1338 case 0x03: /* A-B A+B */
1339 ant_conf->fast_div_bias = 0x1;
1340 break;
1341 case 0x10: /* LNA2 A-B */
1342 ant_conf->fast_div_bias = 0x7;
1343 break;
1344 case 0x12: /* LNA2 LNA1 */
1345 ant_conf->fast_div_bias = 0x2;
1346 break;
1347 case 0x13: /* LNA2 A+B */
1348 ant_conf->fast_div_bias = 0x7;
1349 break;
1350 case 0x20: /* LNA1 A-B */
1351 ant_conf->fast_div_bias = 0x6;
1352 break;
1353 case 0x21: /* LNA1 LNA2 */
1354 ant_conf->fast_div_bias = 0x0;
1355 break;
1356 case 0x23: /* LNA1 A+B */
1357 ant_conf->fast_div_bias = 0x6;
1358 break;
1359 case 0x30: /* A+B A-B */
1360 ant_conf->fast_div_bias = 0x1;
1361 break;
1362 case 0x31: /* A+B LNA2 */
1363 ant_conf->fast_div_bias = 0x3b;
1364 break;
1365 case 0x32: /* A+B LNA1 */
1366 ant_conf->fast_div_bias = 0x3d;
1367 break;
1368 default:
1369 break;
1370 }
1371 } else if (ant_conf->div_group == 1) {
1372 /* Adjust the fast_div_bias based on main and alt_lna_conf */
1373 switch ((ant_conf->main_lna_conf << 4) |
1374 ant_conf->alt_lna_conf) {
1375 case 0x01: /* A-B LNA2 */
1376 ant_conf->fast_div_bias = 0x1;
1377 ant_conf->main_gaintb = 0;
1378 ant_conf->alt_gaintb = 0;
1379 break;
1380 case 0x02: /* A-B LNA1 */
1381 ant_conf->fast_div_bias = 0x1;
1382 ant_conf->main_gaintb = 0;
1383 ant_conf->alt_gaintb = 0;
1384 break;
1385 case 0x03: /* A-B A+B */
1386 ant_conf->fast_div_bias = 0x1;
1387 ant_conf->main_gaintb = 0;
1388 ant_conf->alt_gaintb = 0;
1389 break;
1390 case 0x10: /* LNA2 A-B */
1391 if (!(antcomb->scan) &&
1392 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1393 ant_conf->fast_div_bias = 0x3f;
1394 else
1395 ant_conf->fast_div_bias = 0x1;
1396 ant_conf->main_gaintb = 0;
1397 ant_conf->alt_gaintb = 0;
1398 break;
1399 case 0x12: /* LNA2 LNA1 */
1400 ant_conf->fast_div_bias = 0x1;
1401 ant_conf->main_gaintb = 0;
1402 ant_conf->alt_gaintb = 0;
1403 break;
1404 case 0x13: /* LNA2 A+B */
1405 if (!(antcomb->scan) &&
1406 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1407 ant_conf->fast_div_bias = 0x3f;
1408 else
1409 ant_conf->fast_div_bias = 0x1;
1410 ant_conf->main_gaintb = 0;
1411 ant_conf->alt_gaintb = 0;
1412 break;
1413 case 0x20: /* LNA1 A-B */
1414 if (!(antcomb->scan) &&
1415 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1416 ant_conf->fast_div_bias = 0x3f;
1417 else
1418 ant_conf->fast_div_bias = 0x1;
1419 ant_conf->main_gaintb = 0;
1420 ant_conf->alt_gaintb = 0;
1421 break;
1422 case 0x21: /* LNA1 LNA2 */
1423 ant_conf->fast_div_bias = 0x1;
1424 ant_conf->main_gaintb = 0;
1425 ant_conf->alt_gaintb = 0;
1426 break;
1427 case 0x23: /* LNA1 A+B */
1428 if (!(antcomb->scan) &&
1429 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1430 ant_conf->fast_div_bias = 0x3f;
1431 else
1432 ant_conf->fast_div_bias = 0x1;
1433 ant_conf->main_gaintb = 0;
1434 ant_conf->alt_gaintb = 0;
1435 break;
1436 case 0x30: /* A+B A-B */
1437 ant_conf->fast_div_bias = 0x1;
1438 ant_conf->main_gaintb = 0;
1439 ant_conf->alt_gaintb = 0;
1440 break;
1441 case 0x31: /* A+B LNA2 */
1442 ant_conf->fast_div_bias = 0x1;
1443 ant_conf->main_gaintb = 0;
1444 ant_conf->alt_gaintb = 0;
1445 break;
1446 case 0x32: /* A+B LNA1 */
1447 ant_conf->fast_div_bias = 0x1;
1448 ant_conf->main_gaintb = 0;
1449 ant_conf->alt_gaintb = 0;
1450 break;
1451 default:
1452 break;
1453 }
1454 } else if (ant_conf->div_group == 2) {
1455 /* Adjust the fast_div_bias based on main and alt_lna_conf */
1456 switch ((ant_conf->main_lna_conf << 4) |
1457 ant_conf->alt_lna_conf) {
1458 case 0x01: /* A-B LNA2 */
1459 ant_conf->fast_div_bias = 0x1;
1460 ant_conf->main_gaintb = 0;
1461 ant_conf->alt_gaintb = 0;
1462 break;
1463 case 0x02: /* A-B LNA1 */
1464 ant_conf->fast_div_bias = 0x1;
1465 ant_conf->main_gaintb = 0;
1466 ant_conf->alt_gaintb = 0;
1467 break;
1468 case 0x03: /* A-B A+B */
1469 ant_conf->fast_div_bias = 0x1;
1470 ant_conf->main_gaintb = 0;
1471 ant_conf->alt_gaintb = 0;
1472 break;
1473 case 0x10: /* LNA2 A-B */
1474 if (!(antcomb->scan) &&
1475 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1476 ant_conf->fast_div_bias = 0x1;
1477 else
1478 ant_conf->fast_div_bias = 0x2;
1479 ant_conf->main_gaintb = 0;
1480 ant_conf->alt_gaintb = 0;
1481 break;
1482 case 0x12: /* LNA2 LNA1 */
1483 ant_conf->fast_div_bias = 0x1;
1484 ant_conf->main_gaintb = 0;
1485 ant_conf->alt_gaintb = 0;
1486 break;
1487 case 0x13: /* LNA2 A+B */
1488 if (!(antcomb->scan) &&
1489 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1490 ant_conf->fast_div_bias = 0x1;
1491 else
1492 ant_conf->fast_div_bias = 0x2;
1493 ant_conf->main_gaintb = 0;
1494 ant_conf->alt_gaintb = 0;
1495 break;
1496 case 0x20: /* LNA1 A-B */
1497 if (!(antcomb->scan) &&
1498 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1499 ant_conf->fast_div_bias = 0x1;
1500 else
1501 ant_conf->fast_div_bias = 0x2;
1502 ant_conf->main_gaintb = 0;
1503 ant_conf->alt_gaintb = 0;
1504 break;
1505 case 0x21: /* LNA1 LNA2 */
1506 ant_conf->fast_div_bias = 0x1;
1507 ant_conf->main_gaintb = 0;
1508 ant_conf->alt_gaintb = 0;
1509 break;
1510 case 0x23: /* LNA1 A+B */
1511 if (!(antcomb->scan) &&
1512 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1513 ant_conf->fast_div_bias = 0x1;
1514 else
1515 ant_conf->fast_div_bias = 0x2;
1516 ant_conf->main_gaintb = 0;
1517 ant_conf->alt_gaintb = 0;
1518 break;
1519 case 0x30: /* A+B A-B */
1520 ant_conf->fast_div_bias = 0x1;
1521 ant_conf->main_gaintb = 0;
1522 ant_conf->alt_gaintb = 0;
1523 break;
1524 case 0x31: /* A+B LNA2 */
1525 ant_conf->fast_div_bias = 0x1;
1526 ant_conf->main_gaintb = 0;
1527 ant_conf->alt_gaintb = 0;
1528 break;
1529 case 0x32: /* A+B LNA1 */
1530 ant_conf->fast_div_bias = 0x1;
1531 ant_conf->main_gaintb = 0;
1532 ant_conf->alt_gaintb = 0;
1533 break;
1534 default:
1535 break;
1536 }
1537 }
1538}
1539
1540/* Antenna diversity and combining */
1541static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
1542{
1543 struct ath_hw_antcomb_conf div_ant_conf;
1544 struct ath_ant_comb *antcomb = &sc->ant_comb;
1545 int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
1546 int curr_main_set;
1547 int main_rssi = rs->rs_rssi_ctl0;
1548 int alt_rssi = rs->rs_rssi_ctl1;
1549 int rx_ant_conf, main_ant_conf;
1550 bool short_scan = false;
1551
1552 rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
1553 ATH_ANT_RX_MASK;
1554 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
1555 ATH_ANT_RX_MASK;
1556
1557 /* Record packet only when both main_rssi and alt_rssi is positive */
1558 if (main_rssi > 0 && alt_rssi > 0) {
1559 antcomb->total_pkt_count++;
1560 antcomb->main_total_rssi += main_rssi;
1561 antcomb->alt_total_rssi += alt_rssi;
1562 if (main_ant_conf == rx_ant_conf)
1563 antcomb->main_recv_cnt++;
1564 else
1565 antcomb->alt_recv_cnt++;
1566 }
1567
1568 /* Short scan check */
1569 if (antcomb->scan && antcomb->alt_good) {
1570 if (time_after(jiffies, antcomb->scan_start_time +
1571 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
1572 short_scan = true;
1573 else
1574 if (antcomb->total_pkt_count ==
1575 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
1576 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1577 antcomb->total_pkt_count);
1578 if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
1579 short_scan = true;
1580 }
1581 }
1582
1583 if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
1584 rs->rs_moreaggr) && !short_scan)
1585 return;
1586
1587 if (antcomb->total_pkt_count) {
1588 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1589 antcomb->total_pkt_count);
1590 main_rssi_avg = (antcomb->main_total_rssi /
1591 antcomb->total_pkt_count);
1592 alt_rssi_avg = (antcomb->alt_total_rssi /
1593 antcomb->total_pkt_count);
1594 }
1595
1596
1597 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
1598 curr_alt_set = div_ant_conf.alt_lna_conf;
1599 curr_main_set = div_ant_conf.main_lna_conf;
1600
1601 antcomb->count++;
1602
1603 if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
1604 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
1605 ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
1606 main_rssi_avg);
1607 antcomb->alt_good = true;
1608 } else {
1609 antcomb->alt_good = false;
1610 }
1611
1612 antcomb->count = 0;
1613 antcomb->scan = true;
1614 antcomb->scan_not_start = true;
1615 }
1616
1617 if (!antcomb->scan) {
1618 if (ath_ant_div_comb_alt_check(div_ant_conf.div_group,
1619 alt_ratio, curr_main_set, curr_alt_set,
1620 alt_rssi_avg, main_rssi_avg)) {
1621 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
1622 /* Switch main and alt LNA */
1623 div_ant_conf.main_lna_conf =
1624 ATH_ANT_DIV_COMB_LNA2;
1625 div_ant_conf.alt_lna_conf =
1626 ATH_ANT_DIV_COMB_LNA1;
1627 } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
1628 div_ant_conf.main_lna_conf =
1629 ATH_ANT_DIV_COMB_LNA1;
1630 div_ant_conf.alt_lna_conf =
1631 ATH_ANT_DIV_COMB_LNA2;
1632 }
1633
1634 goto div_comb_done;
1635 } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
1636 (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
1637 /* Set alt to another LNA */
1638 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
1639 div_ant_conf.alt_lna_conf =
1640 ATH_ANT_DIV_COMB_LNA1;
1641 else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
1642 div_ant_conf.alt_lna_conf =
1643 ATH_ANT_DIV_COMB_LNA2;
1644
1645 goto div_comb_done;
1646 }
1647
1648 if ((alt_rssi_avg < (main_rssi_avg +
1649 div_ant_conf.lna1_lna2_delta)))
1650 goto div_comb_done;
1651 }
1652
1653 if (!antcomb->scan_not_start) {
1654 switch (curr_alt_set) {
1655 case ATH_ANT_DIV_COMB_LNA2:
1656 antcomb->rssi_lna2 = alt_rssi_avg;
1657 antcomb->rssi_lna1 = main_rssi_avg;
1658 antcomb->scan = true;
1659 /* set to A+B */
1660 div_ant_conf.main_lna_conf =
1661 ATH_ANT_DIV_COMB_LNA1;
1662 div_ant_conf.alt_lna_conf =
1663 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1664 break;
1665 case ATH_ANT_DIV_COMB_LNA1:
1666 antcomb->rssi_lna1 = alt_rssi_avg;
1667 antcomb->rssi_lna2 = main_rssi_avg;
1668 antcomb->scan = true;
1669 /* set to A+B */
1670 div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1671 div_ant_conf.alt_lna_conf =
1672 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1673 break;
1674 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
1675 antcomb->rssi_add = alt_rssi_avg;
1676 antcomb->scan = true;
1677 /* set to A-B */
1678 div_ant_conf.alt_lna_conf =
1679 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1680 break;
1681 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
1682 antcomb->rssi_sub = alt_rssi_avg;
1683 antcomb->scan = false;
1684 if (antcomb->rssi_lna2 >
1685 (antcomb->rssi_lna1 +
1686 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
1687 /* use LNA2 as main LNA */
1688 if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
1689 (antcomb->rssi_add > antcomb->rssi_sub)) {
1690 /* set to A+B */
1691 div_ant_conf.main_lna_conf =
1692 ATH_ANT_DIV_COMB_LNA2;
1693 div_ant_conf.alt_lna_conf =
1694 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1695 } else if (antcomb->rssi_sub >
1696 antcomb->rssi_lna1) {
1697 /* set to A-B */
1698 div_ant_conf.main_lna_conf =
1699 ATH_ANT_DIV_COMB_LNA2;
1700 div_ant_conf.alt_lna_conf =
1701 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1702 } else {
1703 /* set to LNA1 */
1704 div_ant_conf.main_lna_conf =
1705 ATH_ANT_DIV_COMB_LNA2;
1706 div_ant_conf.alt_lna_conf =
1707 ATH_ANT_DIV_COMB_LNA1;
1708 }
1709 } else {
1710 /* use LNA1 as main LNA */
1711 if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
1712 (antcomb->rssi_add > antcomb->rssi_sub)) {
1713 /* set to A+B */
1714 div_ant_conf.main_lna_conf =
1715 ATH_ANT_DIV_COMB_LNA1;
1716 div_ant_conf.alt_lna_conf =
1717 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1718 } else if (antcomb->rssi_sub >
1719 antcomb->rssi_lna1) {
1720 /* set to A-B */
1721 div_ant_conf.main_lna_conf =
1722 ATH_ANT_DIV_COMB_LNA1;
1723 div_ant_conf.alt_lna_conf =
1724 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1725 } else {
1726 /* set to LNA2 */
1727 div_ant_conf.main_lna_conf =
1728 ATH_ANT_DIV_COMB_LNA1;
1729 div_ant_conf.alt_lna_conf =
1730 ATH_ANT_DIV_COMB_LNA2;
1731 }
1732 }
1733 break;
1734 default:
1735 break;
1736 }
1737 } else {
1738 if (!antcomb->alt_good) {
1739 antcomb->scan_not_start = false;
1740 /* Set alt to another LNA */
1741 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
1742 div_ant_conf.main_lna_conf =
1743 ATH_ANT_DIV_COMB_LNA2;
1744 div_ant_conf.alt_lna_conf =
1745 ATH_ANT_DIV_COMB_LNA1;
1746 } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
1747 div_ant_conf.main_lna_conf =
1748 ATH_ANT_DIV_COMB_LNA1;
1749 div_ant_conf.alt_lna_conf =
1750 ATH_ANT_DIV_COMB_LNA2;
1751 }
1752 goto div_comb_done;
1753 }
1754 }
1755
1756 ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
1757 main_rssi_avg, alt_rssi_avg,
1758 alt_ratio);
1759
1760 antcomb->quick_scan_cnt++;
1761
1762div_comb_done:
1763 ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio);
1764 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
1765
1766 antcomb->scan_start_time = jiffies;
1767 antcomb->total_pkt_count = 0;
1768 antcomb->main_total_rssi = 0;
1769 antcomb->alt_total_rssi = 0;
1770 antcomb->main_recv_cnt = 0;
1771 antcomb->alt_recv_cnt = 0;
1772}
1773
1774int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1034int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1775{ 1035{
1776 struct ath_buf *bf; 1036 struct ath_buf *bf;
@@ -1804,7 +1064,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1804 1064
1805 do { 1065 do {
1806 /* If handling rx interrupt and flush is in progress => exit */ 1066 /* If handling rx interrupt and flush is in progress => exit */
1807 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) 1067 if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0))
1808 break; 1068 break;
1809 1069
1810 memset(&rs, 0, sizeof(rs)); 1070 memset(&rs, 0, sizeof(rs));
@@ -1842,13 +1102,14 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1842 else 1102 else
1843 rs.is_mybeacon = false; 1103 rs.is_mybeacon = false;
1844 1104
1105 sc->rx.num_pkts++;
1845 ath_debug_stat_rx(sc, &rs); 1106 ath_debug_stat_rx(sc, &rs);
1846 1107
1847 /* 1108 /*
1848 * If we're asked to flush receive queue, directly 1109 * If we're asked to flush receive queue, directly
1849 * chain it back at the queue without processing it. 1110 * chain it back at the queue without processing it.
1850 */ 1111 */
1851 if (sc->sc_flags & SC_OP_RXFLUSH) { 1112 if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags)) {
1852 RX_STAT_INC(rx_drop_rxflush); 1113 RX_STAT_INC(rx_drop_rxflush);
1853 goto requeue_drop_frag; 1114 goto requeue_drop_frag;
1854 } 1115 }
@@ -1969,7 +1230,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1969 skb_trim(skb, skb->len - 8); 1230 skb_trim(skb, skb->len - 8);
1970 1231
1971 spin_lock_irqsave(&sc->sc_pm_lock, flags); 1232 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1972
1973 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | 1233 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
1974 PS_WAIT_FOR_CAB | 1234 PS_WAIT_FOR_CAB |
1975 PS_WAIT_FOR_PSPOLL_DATA)) || 1235 PS_WAIT_FOR_PSPOLL_DATA)) ||
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 458f81b4a7c..5046b282a93 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -2098,8 +2098,8 @@ enum {
2098#define AR_MCI_CONT_STATUS 0x1848 2098#define AR_MCI_CONT_STATUS 0x1848
2099#define AR_MCI_CONT_RSSI_POWER 0x000000FF 2099#define AR_MCI_CONT_RSSI_POWER 0x000000FF
2100#define AR_MCI_CONT_RSSI_POWER_S 0 2100#define AR_MCI_CONT_RSSI_POWER_S 0
2101#define AR_MCI_CONT_RRIORITY 0x0000FF00 2101#define AR_MCI_CONT_PRIORITY 0x0000FF00
2102#define AR_MCI_CONT_RRIORITY_S 8 2102#define AR_MCI_CONT_PRIORITY_S 8
2103#define AR_MCI_CONT_TXRX 0x00010000 2103#define AR_MCI_CONT_TXRX 0x00010000
2104#define AR_MCI_CONT_TXRX_S 16 2104#define AR_MCI_CONT_TXRX_S 16
2105 2105
@@ -2162,10 +2162,6 @@ enum {
2162#define AR_BTCOEX_CTRL_SPDT_POLARITY 0x80000000 2162#define AR_BTCOEX_CTRL_SPDT_POLARITY 0x80000000
2163#define AR_BTCOEX_CTRL_SPDT_POLARITY_S 31 2163#define AR_BTCOEX_CTRL_SPDT_POLARITY_S 31
2164 2164
2165#define AR_BTCOEX_WL_WEIGHTS0 0x18b0
2166#define AR_BTCOEX_WL_WEIGHTS1 0x18b4
2167#define AR_BTCOEX_WL_WEIGHTS2 0x18b8
2168#define AR_BTCOEX_WL_WEIGHTS3 0x18bc
2169#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2)) 2165#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2))
2170#define AR_BTCOEX_WL_LNA 0x1940 2166#define AR_BTCOEX_WL_LNA 0x1940
2171#define AR_BTCOEX_RFGAIN_CTRL 0x1944 2167#define AR_BTCOEX_RFGAIN_CTRL 0x1944
@@ -2211,5 +2207,7 @@ enum {
2211#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT 0x00000fff 2207#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT 0x00000fff
2212#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S 0 2208#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S 0
2213 2209
2210#define AR_GLB_SWREG_DISCONT_MODE 0x2002c
2211#define AR_GLB_SWREG_DISCONT_EN_BT_WLAN 0x3
2214 2212
2215#endif 2213#endif
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 4d571394c7a..cafb4a09729 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -105,19 +105,19 @@ static int ath_max_4ms_framelen[4][32] = {
105/* Aggregation logic */ 105/* Aggregation logic */
106/*********************/ 106/*********************/
107 107
108static void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq) 108void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
109 __acquires(&txq->axq_lock) 109 __acquires(&txq->axq_lock)
110{ 110{
111 spin_lock_bh(&txq->axq_lock); 111 spin_lock_bh(&txq->axq_lock);
112} 112}
113 113
114static void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq) 114void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
115 __releases(&txq->axq_lock) 115 __releases(&txq->axq_lock)
116{ 116{
117 spin_unlock_bh(&txq->axq_lock); 117 spin_unlock_bh(&txq->axq_lock);
118} 118}
119 119
120static void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) 120void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
121 __releases(&txq->axq_lock) 121 __releases(&txq->axq_lock)
122{ 122{
123 struct sk_buff_head q; 123 struct sk_buff_head q;
@@ -1165,6 +1165,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1165{ 1165{
1166 struct ath_atx_tid *txtid; 1166 struct ath_atx_tid *txtid;
1167 struct ath_node *an; 1167 struct ath_node *an;
1168 u8 density;
1168 1169
1169 an = (struct ath_node *)sta->drv_priv; 1170 an = (struct ath_node *)sta->drv_priv;
1170 txtid = ATH_AN_2_TID(an, tid); 1171 txtid = ATH_AN_2_TID(an, tid);
@@ -1172,6 +1173,17 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1172 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE)) 1173 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1173 return -EAGAIN; 1174 return -EAGAIN;
1174 1175
1176 /* update ampdu factor/density, they may have changed. This may happen
1177 * in HT IBSS when a beacon with HT-info is received after the station
1178 * has already been added.
1179 */
1180 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1181 an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
1182 sta->ht_cap.ampdu_factor);
1183 density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
1184 an->mpdudensity = density;
1185 }
1186
1175 txtid->state |= AGGR_ADDBA_PROGRESS; 1187 txtid->state |= AGGR_ADDBA_PROGRESS;
1176 txtid->paused = true; 1188 txtid->paused = true;
1177 *ssn = txtid->seq_start = txtid->seq_next; 1189 *ssn = txtid->seq_start = txtid->seq_next;
@@ -1526,7 +1538,7 @@ bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1526 int i; 1538 int i;
1527 u32 npend = 0; 1539 u32 npend = 0;
1528 1540
1529 if (sc->sc_flags & SC_OP_INVALID) 1541 if (test_bit(SC_OP_INVALID, &sc->sc_flags))
1530 return true; 1542 return true;
1531 1543
1532 ath9k_hw_abort_tx_dma(ah); 1544 ath9k_hw_abort_tx_dma(ah);
@@ -1999,6 +2011,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1999 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2011 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2000 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; 2012 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
2001 int q, padpos, padsize; 2013 int q, padpos, padsize;
2014 unsigned long flags;
2002 2015
2003 ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb); 2016 ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
2004 2017
@@ -2017,6 +2030,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2017 skb_pull(skb, padsize); 2030 skb_pull(skb, padsize);
2018 } 2031 }
2019 2032
2033 spin_lock_irqsave(&sc->sc_pm_lock, flags);
2020 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) { 2034 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
2021 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK; 2035 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
2022 ath_dbg(common, PS, 2036 ath_dbg(common, PS,
@@ -2026,6 +2040,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2026 PS_WAIT_FOR_PSPOLL_DATA | 2040 PS_WAIT_FOR_PSPOLL_DATA |
2027 PS_WAIT_FOR_TX_ACK)); 2041 PS_WAIT_FOR_TX_ACK));
2028 } 2042 }
2043 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
2029 2044
2030 q = skb_get_queue_mapping(skb); 2045 q = skb_get_queue_mapping(skb);
2031 if (txq == sc->tx.txq_map[q]) { 2046 if (txq == sc->tx.txq_map[q]) {
@@ -2236,46 +2251,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2236 ath_txq_unlock_complete(sc, txq); 2251 ath_txq_unlock_complete(sc, txq);
2237} 2252}
2238 2253
2239static void ath_tx_complete_poll_work(struct work_struct *work)
2240{
2241 struct ath_softc *sc = container_of(work, struct ath_softc,
2242 tx_complete_work.work);
2243 struct ath_txq *txq;
2244 int i;
2245 bool needreset = false;
2246#ifdef CONFIG_ATH9K_DEBUGFS
2247 sc->tx_complete_poll_work_seen++;
2248#endif
2249
2250 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2251 if (ATH_TXQ_SETUP(sc, i)) {
2252 txq = &sc->tx.txq[i];
2253 ath_txq_lock(sc, txq);
2254 if (txq->axq_depth) {
2255 if (txq->axq_tx_inprogress) {
2256 needreset = true;
2257 ath_txq_unlock(sc, txq);
2258 break;
2259 } else {
2260 txq->axq_tx_inprogress = true;
2261 }
2262 }
2263 ath_txq_unlock_complete(sc, txq);
2264 }
2265
2266 if (needreset) {
2267 ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
2268 "tx hung, resetting the chip\n");
2269 RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
2270 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
2271 }
2272
2273 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2274 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2275}
2276
2277
2278
2279void ath_tx_tasklet(struct ath_softc *sc) 2254void ath_tx_tasklet(struct ath_softc *sc)
2280{ 2255{
2281 struct ath_hw *ah = sc->sc_ah; 2256 struct ath_hw *ah = sc->sc_ah;
diff --git a/drivers/net/wireless/ath/carl9170/cmd.c b/drivers/net/wireless/ath/carl9170/cmd.c
index 195dc653811..39a63874b27 100644
--- a/drivers/net/wireless/ath/carl9170/cmd.c
+++ b/drivers/net/wireless/ath/carl9170/cmd.c
@@ -138,7 +138,7 @@ int carl9170_reboot(struct ar9170 *ar)
138 if (!cmd) 138 if (!cmd)
139 return -ENOMEM; 139 return -ENOMEM;
140 140
141 err = __carl9170_exec_cmd(ar, (struct carl9170_cmd *)cmd, true); 141 err = __carl9170_exec_cmd(ar, cmd, true);
142 return err; 142 return err;
143} 143}
144 144
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 84b22eec7ab..7a8e90eaad8 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -161,7 +161,7 @@ static void carl9170_cmd_callback(struct ar9170 *ar, u32 len, void *buffer)
161 161
162void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len) 162void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
163{ 163{
164 struct carl9170_rsp *cmd = (void *) buf; 164 struct carl9170_rsp *cmd = buf;
165 struct ieee80211_vif *vif; 165 struct ieee80211_vif *vif;
166 166
167 if (carl9170_check_sequence(ar, cmd->hdr.seq)) 167 if (carl9170_check_sequence(ar, cmd->hdr.seq))
@@ -520,7 +520,7 @@ static u8 *carl9170_find_ie(u8 *data, unsigned int len, u8 ie)
520 */ 520 */
521static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len) 521static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
522{ 522{
523 struct ieee80211_hdr *hdr = (void *) data; 523 struct ieee80211_hdr *hdr = data;
524 struct ieee80211_tim_ie *tim_ie; 524 struct ieee80211_tim_ie *tim_ie;
525 u8 *tim; 525 u8 *tim;
526 u8 tim_len; 526 u8 tim_len;
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index d07c0301da6..4a4e98f7180 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -2952,10 +2952,10 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2952 /* current AP address - only in reassoc frame */ 2952 /* current AP address - only in reassoc frame */
2953 if (is_reassoc) { 2953 if (is_reassoc) {
2954 memcpy(body.ap, priv->CurrentBSSID, 6); 2954 memcpy(body.ap, priv->CurrentBSSID, 6);
2955 ssid_el_p = (u8 *)&body.ssid_el_id; 2955 ssid_el_p = &body.ssid_el_id;
2956 bodysize = 18 + priv->SSID_size; 2956 bodysize = 18 + priv->SSID_size;
2957 } else { 2957 } else {
2958 ssid_el_p = (u8 *)&body.ap[0]; 2958 ssid_el_p = &body.ap[0];
2959 bodysize = 12 + priv->SSID_size; 2959 bodysize = 12 + priv->SSID_size;
2960 } 2960 }
2961 2961
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index c8baf020c20..2d3c6644f82 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -52,7 +52,7 @@ struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring,
52 desc = ring->descbase; 52 desc = ring->descbase;
53 desc = &(desc[slot]); 53 desc = &(desc[slot]);
54 54
55 return (struct b43legacy_dmadesc32 *)desc; 55 return desc;
56} 56}
57 57
58static void op32_fill_descriptor(struct b43legacy_dmaring *ring, 58static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index eae691e2f7d..8156135a059 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -1508,7 +1508,7 @@ static void b43legacy_release_firmware(struct b43legacy_wldev *dev)
1508 1508
1509static void b43legacy_print_fw_helptext(struct b43legacy_wl *wl) 1509static void b43legacy_print_fw_helptext(struct b43legacy_wl *wl)
1510{ 1510{
1511 b43legacyerr(wl, "You must go to http://linuxwireless.org/en/users/" 1511 b43legacyerr(wl, "You must go to http://wireless.kernel.org/en/users/"
1512 "Drivers/b43#devicefirmware " 1512 "Drivers/b43#devicefirmware "
1513 "and download the correct firmware (version 3).\n"); 1513 "and download the correct firmware (version 3).\n");
1514} 1514}
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index a8012f2749e..b8ffea6f5c6 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -269,8 +269,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
269 b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) 269 b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
270 (&txhdr->plcp), plcp_fragment_len, 270 (&txhdr->plcp), plcp_fragment_len,
271 rate); 271 rate);
272 b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) 272 b43legacy_generate_plcp_hdr(&txhdr->plcp_fb, plcp_fragment_len,
273 (&txhdr->plcp_fb), plcp_fragment_len,
274 rate_fb->hw_value); 273 rate_fb->hw_value);
275 274
276 /* PHY TX Control word */ 275 /* PHY TX Control word */
@@ -340,8 +339,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
340 b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) 339 b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
341 (&txhdr->rts_plcp), 340 (&txhdr->rts_plcp),
342 len, rts_rate); 341 len, rts_rate);
343 b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) 342 b43legacy_generate_plcp_hdr(&txhdr->rts_plcp_fb,
344 (&txhdr->rts_plcp_fb),
345 len, rts_rate_fb); 343 len, rts_rate_fb);
346 hdr = (struct ieee80211_hdr *)(&txhdr->rts_frame); 344 hdr = (struct ieee80211_hdr *)(&txhdr->rts_frame);
347 txhdr->rts_dur_fb = hdr->duration_id; 345 txhdr->rts_dur_fb = hdr->duration_id;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index abb48032753..9d5170b6df5 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -34,3 +34,5 @@ brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
34 sdio_chip.o 34 sdio_chip.o
35brcmfmac-$(CONFIG_BRCMFMAC_USB) += \ 35brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
36 usb.o 36 usb.o
37brcmfmac-$(CONFIG_BRCMDBG) += \
38 dhd_dbg.o \ No newline at end of file
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 82f51dbd0d6..49765d34b4e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -44,6 +44,7 @@
44 44
45#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 45#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
46#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330 46#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
47#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
47 48
48#define SDIO_FUNC1_BLOCKSIZE 64 49#define SDIO_FUNC1_BLOCKSIZE 64
49#define SDIO_FUNC2_BLOCKSIZE 512 50#define SDIO_FUNC2_BLOCKSIZE 512
@@ -52,6 +53,7 @@
52static const struct sdio_device_id brcmf_sdmmc_ids[] = { 53static const struct sdio_device_id brcmf_sdmmc_ids[] = {
53 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)}, 54 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
54 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)}, 55 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
56 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
55 { /* end: all zeroes */ }, 57 { /* end: all zeroes */ },
56}; 58};
57MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids); 59MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 9f637014486..a11fe54f595 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -613,6 +613,9 @@ struct brcmf_pub {
613 struct work_struct multicast_work; 613 struct work_struct multicast_work;
614 u8 macvalue[ETH_ALEN]; 614 u8 macvalue[ETH_ALEN];
615 atomic_t pend_8021x_cnt; 615 atomic_t pend_8021x_cnt;
616#ifdef DEBUG
617 struct dentry *dbgfs_dir;
618#endif
616}; 619};
617 620
618struct brcmf_if_event { 621struct brcmf_if_event {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index 366916494be..537f499cc5d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -36,6 +36,13 @@ struct dngl_stats {
36 unsigned long multicast; /* multicast packets received */ 36 unsigned long multicast; /* multicast packets received */
37}; 37};
38 38
39struct brcmf_bus_dcmd {
40 char *name;
41 char *param;
42 int param_len;
43 struct list_head list;
44};
45
39/* interface structure between common and bus layer */ 46/* interface structure between common and bus layer */
40struct brcmf_bus { 47struct brcmf_bus {
41 u8 type; /* bus type */ 48 u8 type; /* bus type */
@@ -50,6 +57,7 @@ struct brcmf_bus {
50 unsigned long tx_realloc; /* Tx packets realloced for headroom */ 57 unsigned long tx_realloc; /* Tx packets realloced for headroom */
51 struct dngl_stats dstats; /* Stats for dongle-based data */ 58 struct dngl_stats dstats; /* Stats for dongle-based data */
52 u8 align; /* bus alignment requirement */ 59 u8 align; /* bus alignment requirement */
60 struct list_head dcmd_list;
53 61
54 /* interface functions pointers */ 62 /* interface functions pointers */
55 /* Stop bus module: clear pending frames, disable data flow */ 63 /* Stop bus module: clear pending frames, disable data flow */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 236cb9fa460..2621dd3d7dc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -800,13 +800,13 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
800 char iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; /* Room for 800 char iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; /* Room for
801 "event_msgs" + '\0' + bitvec */ 801 "event_msgs" + '\0' + bitvec */
802 char buf[128], *ptr; 802 char buf[128], *ptr;
803 u32 dongle_align = drvr->bus_if->align;
804 u32 glom = 0;
805 u32 roaming = 1; 803 u32 roaming = 1;
806 uint bcn_timeout = 3; 804 uint bcn_timeout = 3;
807 int scan_assoc_time = 40; 805 int scan_assoc_time = 40;
808 int scan_unassoc_time = 40; 806 int scan_unassoc_time = 40;
809 int i; 807 int i;
808 struct brcmf_bus_dcmd *cmdlst;
809 struct list_head *cur, *q;
810 810
811 mutex_lock(&drvr->proto_block); 811 mutex_lock(&drvr->proto_block);
812 812
@@ -827,17 +827,6 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
827 /* Print fw version info */ 827 /* Print fw version info */
828 brcmf_dbg(ERROR, "Firmware version = %s\n", buf); 828 brcmf_dbg(ERROR, "Firmware version = %s\n", buf);
829 829
830 /* Match Host and Dongle rx alignment */
831 brcmf_c_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf,
832 sizeof(iovbuf));
833 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
834 sizeof(iovbuf));
835
836 /* disable glom option per default */
837 brcmf_c_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
838 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
839 sizeof(iovbuf));
840
841 /* Setup timeout if Beacons are lost and roam is off to report 830 /* Setup timeout if Beacons are lost and roam is off to report
842 link down */ 831 link down */
843 brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, 832 brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf,
@@ -874,6 +863,20 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
874 0, true); 863 0, true);
875 } 864 }
876 865
866 /* set bus specific command if there is any */
867 list_for_each_safe(cur, q, &drvr->bus_if->dcmd_list) {
868 cmdlst = list_entry(cur, struct brcmf_bus_dcmd, list);
869 if (cmdlst->name && cmdlst->param && cmdlst->param_len) {
870 brcmf_c_mkiovar(cmdlst->name, cmdlst->param,
871 cmdlst->param_len, iovbuf,
872 sizeof(iovbuf));
873 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
874 iovbuf, sizeof(iovbuf));
875 }
876 list_del(cur);
877 kfree(cmdlst);
878 }
879
877 mutex_unlock(&drvr->proto_block); 880 mutex_unlock(&drvr->proto_block);
878 881
879 return 0; 882 return 0;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
new file mode 100644
index 00000000000..7f89540b56d
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
@@ -0,0 +1,126 @@
1/*
2 * Copyright (c) 2012 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#include <linux/debugfs.h>
17#include <linux/if_ether.h>
18#include <linux/if.h>
19#include <linux/ieee80211.h>
20#include <linux/module.h>
21
22#include <defs.h>
23#include <brcmu_wifi.h>
24#include <brcmu_utils.h>
25#include "dhd.h"
26#include "dhd_bus.h"
27#include "dhd_dbg.h"
28
29static struct dentry *root_folder;
30
31void brcmf_debugfs_init(void)
32{
33 root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL);
34 if (IS_ERR(root_folder))
35 root_folder = NULL;
36}
37
38void brcmf_debugfs_exit(void)
39{
40 if (!root_folder)
41 return;
42
43 debugfs_remove_recursive(root_folder);
44 root_folder = NULL;
45}
46
47int brcmf_debugfs_attach(struct brcmf_pub *drvr)
48{
49 if (!root_folder)
50 return -ENODEV;
51
52 drvr->dbgfs_dir = debugfs_create_dir(dev_name(drvr->dev), root_folder);
53 return PTR_RET(drvr->dbgfs_dir);
54}
55
56void brcmf_debugfs_detach(struct brcmf_pub *drvr)
57{
58 if (!IS_ERR_OR_NULL(drvr->dbgfs_dir))
59 debugfs_remove_recursive(drvr->dbgfs_dir);
60}
61
62struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr)
63{
64 return drvr->dbgfs_dir;
65}
66
67static
68ssize_t brcmf_debugfs_sdio_counter_read(struct file *f, char __user *data,
69 size_t count, loff_t *ppos)
70{
71 struct brcmf_sdio_count *sdcnt = f->private_data;
72 char buf[750];
73 int res;
74
75 /* only allow read from start */
76 if (*ppos > 0)
77 return 0;
78
79 res = scnprintf(buf, sizeof(buf),
80 "intrcount: %u\nlastintrs: %u\n"
81 "pollcnt: %u\nregfails: %u\n"
82 "tx_sderrs: %u\nfcqueued: %u\n"
83 "rxrtx: %u\nrx_toolong: %u\n"
84 "rxc_errors: %u\nrx_hdrfail: %u\n"
85 "rx_badhdr: %u\nrx_badseq: %u\n"
86 "fc_rcvd: %u\nfc_xoff: %u\n"
87 "fc_xon: %u\nrxglomfail: %u\n"
88 "rxglomframes: %u\nrxglompkts: %u\n"
89 "f2rxhdrs: %u\nf2rxdata: %u\n"
90 "f2txdata: %u\nf1regdata: %u\n"
91 "tickcnt: %u\ntx_ctlerrs: %lu\n"
92 "tx_ctlpkts: %lu\nrx_ctlerrs: %lu\n"
93 "rx_ctlpkts: %lu\nrx_readahead: %lu\n",
94 sdcnt->intrcount, sdcnt->lastintrs,
95 sdcnt->pollcnt, sdcnt->regfails,
96 sdcnt->tx_sderrs, sdcnt->fcqueued,
97 sdcnt->rxrtx, sdcnt->rx_toolong,
98 sdcnt->rxc_errors, sdcnt->rx_hdrfail,
99 sdcnt->rx_badhdr, sdcnt->rx_badseq,
100 sdcnt->fc_rcvd, sdcnt->fc_xoff,
101 sdcnt->fc_xon, sdcnt->rxglomfail,
102 sdcnt->rxglomframes, sdcnt->rxglompkts,
103 sdcnt->f2rxhdrs, sdcnt->f2rxdata,
104 sdcnt->f2txdata, sdcnt->f1regdata,
105 sdcnt->tickcnt, sdcnt->tx_ctlerrs,
106 sdcnt->tx_ctlpkts, sdcnt->rx_ctlerrs,
107 sdcnt->rx_ctlpkts, sdcnt->rx_readahead_cnt);
108
109 return simple_read_from_buffer(data, count, ppos, buf, res);
110}
111
112static const struct file_operations brcmf_debugfs_sdio_counter_ops = {
113 .owner = THIS_MODULE,
114 .open = simple_open,
115 .read = brcmf_debugfs_sdio_counter_read
116};
117
118void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr,
119 struct brcmf_sdio_count *sdcnt)
120{
121 struct dentry *dentry = drvr->dbgfs_dir;
122
123 if (!IS_ERR_OR_NULL(dentry))
124 debugfs_create_file("counters", S_IRUGO, dentry,
125 sdcnt, &brcmf_debugfs_sdio_counter_ops);
126}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index a2c4576cf9f..b784920532d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -76,4 +76,63 @@ do { \
76 76
77extern int brcmf_msg_level; 77extern int brcmf_msg_level;
78 78
79/*
80 * hold counter variables used in brcmfmac sdio driver.
81 */
82struct brcmf_sdio_count {
83 uint intrcount; /* Count of device interrupt callbacks */
84 uint lastintrs; /* Count as of last watchdog timer */
85 uint pollcnt; /* Count of active polls */
86 uint regfails; /* Count of R_REG failures */
87 uint tx_sderrs; /* Count of tx attempts with sd errors */
88 uint fcqueued; /* Tx packets that got queued */
89 uint rxrtx; /* Count of rtx requests (NAK to dongle) */
90 uint rx_toolong; /* Receive frames too long to receive */
91 uint rxc_errors; /* SDIO errors when reading control frames */
92 uint rx_hdrfail; /* SDIO errors on header reads */
93 uint rx_badhdr; /* Bad received headers (roosync?) */
94 uint rx_badseq; /* Mismatched rx sequence number */
95 uint fc_rcvd; /* Number of flow-control events received */
96 uint fc_xoff; /* Number which turned on flow-control */
97 uint fc_xon; /* Number which turned off flow-control */
98 uint rxglomfail; /* Failed deglom attempts */
99 uint rxglomframes; /* Number of glom frames (superframes) */
100 uint rxglompkts; /* Number of packets from glom frames */
101 uint f2rxhdrs; /* Number of header reads */
102 uint f2rxdata; /* Number of frame data reads */
103 uint f2txdata; /* Number of f2 frame writes */
104 uint f1regdata; /* Number of f1 register accesses */
105 uint tickcnt; /* Number of watchdog been schedule */
106 ulong tx_ctlerrs; /* Err of sending ctrl frames */
107 ulong tx_ctlpkts; /* Ctrl frames sent to dongle */
108 ulong rx_ctlerrs; /* Err of processing rx ctrl frames */
109 ulong rx_ctlpkts; /* Ctrl frames processed from dongle */
110 ulong rx_readahead_cnt; /* packets where header read-ahead was used */
111};
112
113struct brcmf_pub;
114#ifdef DEBUG
115void brcmf_debugfs_init(void);
116void brcmf_debugfs_exit(void);
117int brcmf_debugfs_attach(struct brcmf_pub *drvr);
118void brcmf_debugfs_detach(struct brcmf_pub *drvr);
119struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr);
120void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr,
121 struct brcmf_sdio_count *sdcnt);
122#else
123static inline void brcmf_debugfs_init(void)
124{
125}
126static inline void brcmf_debugfs_exit(void)
127{
128}
129static inline int brcmf_debugfs_attach(struct brcmf_pub *drvr)
130{
131 return 0;
132}
133static inline void brcmf_debugfs_detach(struct brcmf_pub *drvr)
134{
135}
136#endif
137
79#endif /* _BRCMF_DBG_H_ */ 138#endif /* _BRCMF_DBG_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 8933f9b31a9..57bf1d7ee80 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -1007,6 +1007,9 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
1007 drvr->bus_if->drvr = drvr; 1007 drvr->bus_if->drvr = drvr;
1008 drvr->dev = dev; 1008 drvr->dev = dev;
1009 1009
1010 /* create device debugfs folder */
1011 brcmf_debugfs_attach(drvr);
1012
1010 /* Attach and link in the protocol */ 1013 /* Attach and link in the protocol */
1011 ret = brcmf_proto_attach(drvr); 1014 ret = brcmf_proto_attach(drvr);
1012 if (ret != 0) { 1015 if (ret != 0) {
@@ -1017,6 +1020,8 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
1017 INIT_WORK(&drvr->setmacaddr_work, _brcmf_set_mac_address); 1020 INIT_WORK(&drvr->setmacaddr_work, _brcmf_set_mac_address);
1018 INIT_WORK(&drvr->multicast_work, _brcmf_set_multicast_list); 1021 INIT_WORK(&drvr->multicast_work, _brcmf_set_multicast_list);
1019 1022
1023 INIT_LIST_HEAD(&drvr->bus_if->dcmd_list);
1024
1020 return ret; 1025 return ret;
1021 1026
1022fail: 1027fail:
@@ -1123,6 +1128,7 @@ void brcmf_detach(struct device *dev)
1123 brcmf_proto_detach(drvr); 1128 brcmf_proto_detach(drvr);
1124 } 1129 }
1125 1130
1131 brcmf_debugfs_detach(drvr);
1126 bus_if->drvr = NULL; 1132 bus_if->drvr = NULL;
1127 kfree(drvr); 1133 kfree(drvr);
1128} 1134}
@@ -1192,6 +1198,8 @@ exit:
1192 1198
1193static void brcmf_driver_init(struct work_struct *work) 1199static void brcmf_driver_init(struct work_struct *work)
1194{ 1200{
1201 brcmf_debugfs_init();
1202
1195#ifdef CONFIG_BRCMFMAC_SDIO 1203#ifdef CONFIG_BRCMFMAC_SDIO
1196 brcmf_sdio_init(); 1204 brcmf_sdio_init();
1197#endif 1205#endif
@@ -1219,6 +1227,7 @@ static void __exit brcmfmac_module_exit(void)
1219#ifdef CONFIG_BRCMFMAC_USB 1227#ifdef CONFIG_BRCMFMAC_USB
1220 brcmf_usb_exit(); 1228 brcmf_usb_exit();
1221#endif 1229#endif
1230 brcmf_debugfs_exit();
1222} 1231}
1223 1232
1224module_init(brcmfmac_module_init); 1233module_init(brcmfmac_module_init);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 1dbf2be478c..472f2ef5c65 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -31,6 +31,8 @@
31#include <linux/firmware.h> 31#include <linux/firmware.h>
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/bcma/bcma.h> 33#include <linux/bcma/bcma.h>
34#include <linux/debugfs.h>
35#include <linux/vmalloc.h>
34#include <asm/unaligned.h> 36#include <asm/unaligned.h>
35#include <defs.h> 37#include <defs.h>
36#include <brcmu_wifi.h> 38#include <brcmu_wifi.h>
@@ -48,6 +50,9 @@
48 50
49#define CBUF_LEN (128) 51#define CBUF_LEN (128)
50 52
53/* Device console log buffer state */
54#define CONSOLE_BUFFER_MAX 2024
55
51struct rte_log_le { 56struct rte_log_le {
52 __le32 buf; /* Can't be pointer on (64-bit) hosts */ 57 __le32 buf; /* Can't be pointer on (64-bit) hosts */
53 __le32 buf_size; 58 __le32 buf_size;
@@ -281,7 +286,7 @@ struct rte_console {
281 * Shared structure between dongle and the host. 286 * Shared structure between dongle and the host.
282 * The structure contains pointers to trap or assert information. 287 * The structure contains pointers to trap or assert information.
283 */ 288 */
284#define SDPCM_SHARED_VERSION 0x0002 289#define SDPCM_SHARED_VERSION 0x0003
285#define SDPCM_SHARED_VERSION_MASK 0x00FF 290#define SDPCM_SHARED_VERSION_MASK 0x00FF
286#define SDPCM_SHARED_ASSERT_BUILT 0x0100 291#define SDPCM_SHARED_ASSERT_BUILT 0x0100
287#define SDPCM_SHARED_ASSERT 0x0200 292#define SDPCM_SHARED_ASSERT 0x0200
@@ -428,6 +433,29 @@ struct brcmf_console {
428 u8 *buf; /* Log buffer (host copy) */ 433 u8 *buf; /* Log buffer (host copy) */
429 uint last; /* Last buffer read index */ 434 uint last; /* Last buffer read index */
430}; 435};
436
437struct brcmf_trap_info {
438 __le32 type;
439 __le32 epc;
440 __le32 cpsr;
441 __le32 spsr;
442 __le32 r0; /* a1 */
443 __le32 r1; /* a2 */
444 __le32 r2; /* a3 */
445 __le32 r3; /* a4 */
446 __le32 r4; /* v1 */
447 __le32 r5; /* v2 */
448 __le32 r6; /* v3 */
449 __le32 r7; /* v4 */
450 __le32 r8; /* v5 */
451 __le32 r9; /* sb/v6 */
452 __le32 r10; /* sl/v7 */
453 __le32 r11; /* fp/v8 */
454 __le32 r12; /* ip */
455 __le32 r13; /* sp */
456 __le32 r14; /* lr */
457 __le32 pc; /* r15 */
458};
431#endif /* DEBUG */ 459#endif /* DEBUG */
432 460
433struct sdpcm_shared { 461struct sdpcm_shared {
@@ -439,6 +467,7 @@ struct sdpcm_shared {
439 u32 console_addr; /* Address of struct rte_console */ 467 u32 console_addr; /* Address of struct rte_console */
440 u32 msgtrace_addr; 468 u32 msgtrace_addr;
441 u8 tag[32]; 469 u8 tag[32];
470 u32 brpt_addr;
442}; 471};
443 472
444struct sdpcm_shared_le { 473struct sdpcm_shared_le {
@@ -450,6 +479,7 @@ struct sdpcm_shared_le {
450 __le32 console_addr; /* Address of struct rte_console */ 479 __le32 console_addr; /* Address of struct rte_console */
451 __le32 msgtrace_addr; 480 __le32 msgtrace_addr;
452 u8 tag[32]; 481 u8 tag[32];
482 __le32 brpt_addr;
453}; 483};
454 484
455 485
@@ -502,12 +532,9 @@ struct brcmf_sdio {
502 bool intr; /* Use interrupts */ 532 bool intr; /* Use interrupts */
503 bool poll; /* Use polling */ 533 bool poll; /* Use polling */
504 bool ipend; /* Device interrupt is pending */ 534 bool ipend; /* Device interrupt is pending */
505 uint intrcount; /* Count of device interrupt callbacks */
506 uint lastintrs; /* Count as of last watchdog timer */
507 uint spurious; /* Count of spurious interrupts */ 535 uint spurious; /* Count of spurious interrupts */
508 uint pollrate; /* Ticks between device polls */ 536 uint pollrate; /* Ticks between device polls */
509 uint polltick; /* Tick counter */ 537 uint polltick; /* Tick counter */
510 uint pollcnt; /* Count of active polls */
511 538
512#ifdef DEBUG 539#ifdef DEBUG
513 uint console_interval; 540 uint console_interval;
@@ -515,8 +542,6 @@ struct brcmf_sdio {
515 uint console_addr; /* Console address from shared struct */ 542 uint console_addr; /* Console address from shared struct */
516#endif /* DEBUG */ 543#endif /* DEBUG */
517 544
518 uint regfails; /* Count of R_REG failures */
519
520 uint clkstate; /* State of sd and backplane clock(s) */ 545 uint clkstate; /* State of sd and backplane clock(s) */
521 bool activity; /* Activity flag for clock down */ 546 bool activity; /* Activity flag for clock down */
522 s32 idletime; /* Control for activity timeout */ 547 s32 idletime; /* Control for activity timeout */
@@ -531,33 +556,6 @@ struct brcmf_sdio {
531/* Field to decide if rx of control frames happen in rxbuf or lb-pool */ 556/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
532 bool usebufpool; 557 bool usebufpool;
533 558
534 /* Some additional counters */
535 uint tx_sderrs; /* Count of tx attempts with sd errors */
536 uint fcqueued; /* Tx packets that got queued */
537 uint rxrtx; /* Count of rtx requests (NAK to dongle) */
538 uint rx_toolong; /* Receive frames too long to receive */
539 uint rxc_errors; /* SDIO errors when reading control frames */
540 uint rx_hdrfail; /* SDIO errors on header reads */
541 uint rx_badhdr; /* Bad received headers (roosync?) */
542 uint rx_badseq; /* Mismatched rx sequence number */
543 uint fc_rcvd; /* Number of flow-control events received */
544 uint fc_xoff; /* Number which turned on flow-control */
545 uint fc_xon; /* Number which turned off flow-control */
546 uint rxglomfail; /* Failed deglom attempts */
547 uint rxglomframes; /* Number of glom frames (superframes) */
548 uint rxglompkts; /* Number of packets from glom frames */
549 uint f2rxhdrs; /* Number of header reads */
550 uint f2rxdata; /* Number of frame data reads */
551 uint f2txdata; /* Number of f2 frame writes */
552 uint f1regdata; /* Number of f1 register accesses */
553 uint tickcnt; /* Number of watchdog been schedule */
554 unsigned long tx_ctlerrs; /* Err of sending ctrl frames */
555 unsigned long tx_ctlpkts; /* Ctrl frames sent to dongle */
556 unsigned long rx_ctlerrs; /* Err of processing rx ctrl frames */
557 unsigned long rx_ctlpkts; /* Ctrl frames processed from dongle */
558 unsigned long rx_readahead_cnt; /* Number of packets where header
559 * read-ahead was used. */
560
561 u8 *ctrl_frame_buf; 559 u8 *ctrl_frame_buf;
562 u32 ctrl_frame_len; 560 u32 ctrl_frame_len;
563 bool ctrl_frame_stat; 561 bool ctrl_frame_stat;
@@ -583,6 +581,7 @@ struct brcmf_sdio {
583 u32 fw_ptr; 581 u32 fw_ptr;
584 582
585 bool txoff; /* Transmit flow-controlled */ 583 bool txoff; /* Transmit flow-controlled */
584 struct brcmf_sdio_count sdcnt;
586}; 585};
587 586
588/* clkstate */ 587/* clkstate */
@@ -945,7 +944,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
945 if (ret == 0) 944 if (ret == 0)
946 w_sdreg32(bus, SMB_INT_ACK, 945 w_sdreg32(bus, SMB_INT_ACK,
947 offsetof(struct sdpcmd_regs, tosbmailbox)); 946 offsetof(struct sdpcmd_regs, tosbmailbox));
948 bus->f1regdata += 2; 947 bus->sdcnt.f1regdata += 2;
949 948
950 /* Dongle recomposed rx frames, accept them again */ 949 /* Dongle recomposed rx frames, accept them again */
951 if (hmb_data & HMB_DATA_NAKHANDLED) { 950 if (hmb_data & HMB_DATA_NAKHANDLED) {
@@ -984,12 +983,12 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
984 HMB_DATA_FCDATA_SHIFT; 983 HMB_DATA_FCDATA_SHIFT;
985 984
986 if (fcbits & ~bus->flowcontrol) 985 if (fcbits & ~bus->flowcontrol)
987 bus->fc_xoff++; 986 bus->sdcnt.fc_xoff++;
988 987
989 if (bus->flowcontrol & ~fcbits) 988 if (bus->flowcontrol & ~fcbits)
990 bus->fc_xon++; 989 bus->sdcnt.fc_xon++;
991 990
992 bus->fc_rcvd++; 991 bus->sdcnt.fc_rcvd++;
993 bus->flowcontrol = fcbits; 992 bus->flowcontrol = fcbits;
994 } 993 }
995 994
@@ -1021,7 +1020,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
1021 1020
1022 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, 1021 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
1023 SFC_RF_TERM, &err); 1022 SFC_RF_TERM, &err);
1024 bus->f1regdata++; 1023 bus->sdcnt.f1regdata++;
1025 1024
1026 /* Wait until the packet has been flushed (device/FIFO stable) */ 1025 /* Wait until the packet has been flushed (device/FIFO stable) */
1027 for (lastrbc = retries = 0xffff; retries > 0; retries--) { 1026 for (lastrbc = retries = 0xffff; retries > 0; retries--) {
@@ -1029,7 +1028,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
1029 SBSDIO_FUNC1_RFRAMEBCHI, &err); 1028 SBSDIO_FUNC1_RFRAMEBCHI, &err);
1030 lo = brcmf_sdio_regrb(bus->sdiodev, 1029 lo = brcmf_sdio_regrb(bus->sdiodev,
1031 SBSDIO_FUNC1_RFRAMEBCLO, &err); 1030 SBSDIO_FUNC1_RFRAMEBCLO, &err);
1032 bus->f1regdata += 2; 1031 bus->sdcnt.f1regdata += 2;
1033 1032
1034 if ((hi == 0) && (lo == 0)) 1033 if ((hi == 0) && (lo == 0))
1035 break; 1034 break;
@@ -1047,11 +1046,11 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
1047 brcmf_dbg(INFO, "flush took %d iterations\n", 0xffff - retries); 1046 brcmf_dbg(INFO, "flush took %d iterations\n", 0xffff - retries);
1048 1047
1049 if (rtx) { 1048 if (rtx) {
1050 bus->rxrtx++; 1049 bus->sdcnt.rxrtx++;
1051 err = w_sdreg32(bus, SMB_NAK, 1050 err = w_sdreg32(bus, SMB_NAK,
1052 offsetof(struct sdpcmd_regs, tosbmailbox)); 1051 offsetof(struct sdpcmd_regs, tosbmailbox));
1053 1052
1054 bus->f1regdata++; 1053 bus->sdcnt.f1regdata++;
1055 if (err == 0) 1054 if (err == 0)
1056 bus->rxskip = true; 1055 bus->rxskip = true;
1057 } 1056 }
@@ -1243,7 +1242,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1243 dlen); 1242 dlen);
1244 errcode = -1; 1243 errcode = -1;
1245 } 1244 }
1246 bus->f2rxdata++; 1245 bus->sdcnt.f2rxdata++;
1247 1246
1248 /* On failure, kill the superframe, allow a couple retries */ 1247 /* On failure, kill the superframe, allow a couple retries */
1249 if (errcode < 0) { 1248 if (errcode < 0) {
@@ -1256,7 +1255,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1256 } else { 1255 } else {
1257 bus->glomerr = 0; 1256 bus->glomerr = 0;
1258 brcmf_sdbrcm_rxfail(bus, true, false); 1257 brcmf_sdbrcm_rxfail(bus, true, false);
1259 bus->rxglomfail++; 1258 bus->sdcnt.rxglomfail++;
1260 brcmf_sdbrcm_free_glom(bus); 1259 brcmf_sdbrcm_free_glom(bus);
1261 } 1260 }
1262 return 0; 1261 return 0;
@@ -1312,7 +1311,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1312 if (rxseq != seq) { 1311 if (rxseq != seq) {
1313 brcmf_dbg(INFO, "(superframe) rx_seq %d, expected %d\n", 1312 brcmf_dbg(INFO, "(superframe) rx_seq %d, expected %d\n",
1314 seq, rxseq); 1313 seq, rxseq);
1315 bus->rx_badseq++; 1314 bus->sdcnt.rx_badseq++;
1316 rxseq = seq; 1315 rxseq = seq;
1317 } 1316 }
1318 1317
@@ -1376,7 +1375,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1376 } else { 1375 } else {
1377 bus->glomerr = 0; 1376 bus->glomerr = 0;
1378 brcmf_sdbrcm_rxfail(bus, true, false); 1377 brcmf_sdbrcm_rxfail(bus, true, false);
1379 bus->rxglomfail++; 1378 bus->sdcnt.rxglomfail++;
1380 brcmf_sdbrcm_free_glom(bus); 1379 brcmf_sdbrcm_free_glom(bus);
1381 } 1380 }
1382 bus->nextlen = 0; 1381 bus->nextlen = 0;
@@ -1402,7 +1401,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1402 if (rxseq != seq) { 1401 if (rxseq != seq) {
1403 brcmf_dbg(GLOM, "rx_seq %d, expected %d\n", 1402 brcmf_dbg(GLOM, "rx_seq %d, expected %d\n",
1404 seq, rxseq); 1403 seq, rxseq);
1405 bus->rx_badseq++; 1404 bus->sdcnt.rx_badseq++;
1406 rxseq = seq; 1405 rxseq = seq;
1407 } 1406 }
1408 rxseq++; 1407 rxseq++;
@@ -1441,8 +1440,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1441 down(&bus->sdsem); 1440 down(&bus->sdsem);
1442 } 1441 }
1443 1442
1444 bus->rxglomframes++; 1443 bus->sdcnt.rxglomframes++;
1445 bus->rxglompkts += bus->glom.qlen; 1444 bus->sdcnt.rxglompkts += bus->glom.qlen;
1446 } 1445 }
1447 return num; 1446 return num;
1448} 1447}
@@ -1526,7 +1525,7 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
1526 brcmf_dbg(ERROR, "%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n", 1525 brcmf_dbg(ERROR, "%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
1527 len, len - doff, bus->sdiodev->bus_if->maxctl); 1526 len, len - doff, bus->sdiodev->bus_if->maxctl);
1528 bus->sdiodev->bus_if->dstats.rx_errors++; 1527 bus->sdiodev->bus_if->dstats.rx_errors++;
1529 bus->rx_toolong++; 1528 bus->sdcnt.rx_toolong++;
1530 brcmf_sdbrcm_rxfail(bus, false, false); 1529 brcmf_sdbrcm_rxfail(bus, false, false);
1531 goto done; 1530 goto done;
1532 } 1531 }
@@ -1536,13 +1535,13 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
1536 bus->sdiodev->sbwad, 1535 bus->sdiodev->sbwad,
1537 SDIO_FUNC_2, 1536 SDIO_FUNC_2,
1538 F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen); 1537 F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen);
1539 bus->f2rxdata++; 1538 bus->sdcnt.f2rxdata++;
1540 1539
1541 /* Control frame failures need retransmission */ 1540 /* Control frame failures need retransmission */
1542 if (sdret < 0) { 1541 if (sdret < 0) {
1543 brcmf_dbg(ERROR, "read %d control bytes failed: %d\n", 1542 brcmf_dbg(ERROR, "read %d control bytes failed: %d\n",
1544 rdlen, sdret); 1543 rdlen, sdret);
1545 bus->rxc_errors++; 1544 bus->sdcnt.rxc_errors++;
1546 brcmf_sdbrcm_rxfail(bus, true, true); 1545 brcmf_sdbrcm_rxfail(bus, true, true);
1547 goto done; 1546 goto done;
1548 } 1547 }
@@ -1589,7 +1588,7 @@ brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen,
1589 /* Read the entire frame */ 1588 /* Read the entire frame */
1590 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad, 1589 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1591 SDIO_FUNC_2, F2SYNC, *pkt); 1590 SDIO_FUNC_2, F2SYNC, *pkt);
1592 bus->f2rxdata++; 1591 bus->sdcnt.f2rxdata++;
1593 1592
1594 if (sdret < 0) { 1593 if (sdret < 0) {
1595 brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n", 1594 brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n",
@@ -1630,7 +1629,7 @@ brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf,
1630 if ((u16)~(*len ^ check)) { 1629 if ((u16)~(*len ^ check)) {
1631 brcmf_dbg(ERROR, "(nextlen): HW hdr error: nextlen/len/check 0x%04x/0x%04x/0x%04x\n", 1630 brcmf_dbg(ERROR, "(nextlen): HW hdr error: nextlen/len/check 0x%04x/0x%04x/0x%04x\n",
1632 nextlen, *len, check); 1631 nextlen, *len, check);
1633 bus->rx_badhdr++; 1632 bus->sdcnt.rx_badhdr++;
1634 brcmf_sdbrcm_rxfail(bus, false, false); 1633 brcmf_sdbrcm_rxfail(bus, false, false);
1635 goto fail; 1634 goto fail;
1636 } 1635 }
@@ -1746,7 +1745,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1746 bus->nextlen = 0; 1745 bus->nextlen = 0;
1747 } 1746 }
1748 1747
1749 bus->rx_readahead_cnt++; 1748 bus->sdcnt.rx_readahead_cnt++;
1750 1749
1751 /* Handle Flow Control */ 1750 /* Handle Flow Control */
1752 fcbits = SDPCM_FCMASK_VALUE( 1751 fcbits = SDPCM_FCMASK_VALUE(
@@ -1754,12 +1753,12 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1754 1753
1755 if (bus->flowcontrol != fcbits) { 1754 if (bus->flowcontrol != fcbits) {
1756 if (~bus->flowcontrol & fcbits) 1755 if (~bus->flowcontrol & fcbits)
1757 bus->fc_xoff++; 1756 bus->sdcnt.fc_xoff++;
1758 1757
1759 if (bus->flowcontrol & ~fcbits) 1758 if (bus->flowcontrol & ~fcbits)
1760 bus->fc_xon++; 1759 bus->sdcnt.fc_xon++;
1761 1760
1762 bus->fc_rcvd++; 1761 bus->sdcnt.fc_rcvd++;
1763 bus->flowcontrol = fcbits; 1762 bus->flowcontrol = fcbits;
1764 } 1763 }
1765 1764
@@ -1767,7 +1766,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1767 if (rxseq != seq) { 1766 if (rxseq != seq) {
1768 brcmf_dbg(INFO, "(nextlen): rx_seq %d, expected %d\n", 1767 brcmf_dbg(INFO, "(nextlen): rx_seq %d, expected %d\n",
1769 seq, rxseq); 1768 seq, rxseq);
1770 bus->rx_badseq++; 1769 bus->sdcnt.rx_badseq++;
1771 rxseq = seq; 1770 rxseq = seq;
1772 } 1771 }
1773 1772
@@ -1814,11 +1813,11 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1814 sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad, 1813 sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
1815 SDIO_FUNC_2, F2SYNC, bus->rxhdr, 1814 SDIO_FUNC_2, F2SYNC, bus->rxhdr,
1816 BRCMF_FIRSTREAD); 1815 BRCMF_FIRSTREAD);
1817 bus->f2rxhdrs++; 1816 bus->sdcnt.f2rxhdrs++;
1818 1817
1819 if (sdret < 0) { 1818 if (sdret < 0) {
1820 brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n", sdret); 1819 brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n", sdret);
1821 bus->rx_hdrfail++; 1820 bus->sdcnt.rx_hdrfail++;
1822 brcmf_sdbrcm_rxfail(bus, true, true); 1821 brcmf_sdbrcm_rxfail(bus, true, true);
1823 continue; 1822 continue;
1824 } 1823 }
@@ -1840,7 +1839,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1840 if ((u16) ~(len ^ check)) { 1839 if ((u16) ~(len ^ check)) {
1841 brcmf_dbg(ERROR, "HW hdr err: len/check 0x%04x/0x%04x\n", 1840 brcmf_dbg(ERROR, "HW hdr err: len/check 0x%04x/0x%04x\n",
1842 len, check); 1841 len, check);
1843 bus->rx_badhdr++; 1842 bus->sdcnt.rx_badhdr++;
1844 brcmf_sdbrcm_rxfail(bus, false, false); 1843 brcmf_sdbrcm_rxfail(bus, false, false);
1845 continue; 1844 continue;
1846 } 1845 }
@@ -1861,7 +1860,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1861 if ((doff < SDPCM_HDRLEN) || (doff > len)) { 1860 if ((doff < SDPCM_HDRLEN) || (doff > len)) {
1862 brcmf_dbg(ERROR, "Bad data offset %d: HW len %d, min %d seq %d\n", 1861 brcmf_dbg(ERROR, "Bad data offset %d: HW len %d, min %d seq %d\n",
1863 doff, len, SDPCM_HDRLEN, seq); 1862 doff, len, SDPCM_HDRLEN, seq);
1864 bus->rx_badhdr++; 1863 bus->sdcnt.rx_badhdr++;
1865 brcmf_sdbrcm_rxfail(bus, false, false); 1864 brcmf_sdbrcm_rxfail(bus, false, false);
1866 continue; 1865 continue;
1867 } 1866 }
@@ -1880,19 +1879,19 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1880 1879
1881 if (bus->flowcontrol != fcbits) { 1880 if (bus->flowcontrol != fcbits) {
1882 if (~bus->flowcontrol & fcbits) 1881 if (~bus->flowcontrol & fcbits)
1883 bus->fc_xoff++; 1882 bus->sdcnt.fc_xoff++;
1884 1883
1885 if (bus->flowcontrol & ~fcbits) 1884 if (bus->flowcontrol & ~fcbits)
1886 bus->fc_xon++; 1885 bus->sdcnt.fc_xon++;
1887 1886
1888 bus->fc_rcvd++; 1887 bus->sdcnt.fc_rcvd++;
1889 bus->flowcontrol = fcbits; 1888 bus->flowcontrol = fcbits;
1890 } 1889 }
1891 1890
1892 /* Check and update sequence number */ 1891 /* Check and update sequence number */
1893 if (rxseq != seq) { 1892 if (rxseq != seq) {
1894 brcmf_dbg(INFO, "rx_seq %d, expected %d\n", seq, rxseq); 1893 brcmf_dbg(INFO, "rx_seq %d, expected %d\n", seq, rxseq);
1895 bus->rx_badseq++; 1894 bus->sdcnt.rx_badseq++;
1896 rxseq = seq; 1895 rxseq = seq;
1897 } 1896 }
1898 1897
@@ -1937,7 +1936,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1937 brcmf_dbg(ERROR, "too long: len %d rdlen %d\n", 1936 brcmf_dbg(ERROR, "too long: len %d rdlen %d\n",
1938 len, rdlen); 1937 len, rdlen);
1939 bus->sdiodev->bus_if->dstats.rx_errors++; 1938 bus->sdiodev->bus_if->dstats.rx_errors++;
1940 bus->rx_toolong++; 1939 bus->sdcnt.rx_toolong++;
1941 brcmf_sdbrcm_rxfail(bus, false, false); 1940 brcmf_sdbrcm_rxfail(bus, false, false);
1942 continue; 1941 continue;
1943 } 1942 }
@@ -1960,7 +1959,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1960 /* Read the remaining frame data */ 1959 /* Read the remaining frame data */
1961 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad, 1960 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1962 SDIO_FUNC_2, F2SYNC, pkt); 1961 SDIO_FUNC_2, F2SYNC, pkt);
1963 bus->f2rxdata++; 1962 bus->sdcnt.f2rxdata++;
1964 1963
1965 if (sdret < 0) { 1964 if (sdret < 0) {
1966 brcmf_dbg(ERROR, "read %d %s bytes failed: %d\n", rdlen, 1965 brcmf_dbg(ERROR, "read %d %s bytes failed: %d\n", rdlen,
@@ -2147,18 +2146,18 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
2147 2146
2148 ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad, 2147 ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
2149 SDIO_FUNC_2, F2SYNC, pkt); 2148 SDIO_FUNC_2, F2SYNC, pkt);
2150 bus->f2txdata++; 2149 bus->sdcnt.f2txdata++;
2151 2150
2152 if (ret < 0) { 2151 if (ret < 0) {
2153 /* On failure, abort the command and terminate the frame */ 2152 /* On failure, abort the command and terminate the frame */
2154 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n", 2153 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2155 ret); 2154 ret);
2156 bus->tx_sderrs++; 2155 bus->sdcnt.tx_sderrs++;
2157 2156
2158 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2); 2157 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
2159 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, 2158 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2160 SFC_WF_TERM, NULL); 2159 SFC_WF_TERM, NULL);
2161 bus->f1regdata++; 2160 bus->sdcnt.f1regdata++;
2162 2161
2163 for (i = 0; i < 3; i++) { 2162 for (i = 0; i < 3; i++) {
2164 u8 hi, lo; 2163 u8 hi, lo;
@@ -2166,7 +2165,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
2166 SBSDIO_FUNC1_WFRAMEBCHI, NULL); 2165 SBSDIO_FUNC1_WFRAMEBCHI, NULL);
2167 lo = brcmf_sdio_regrb(bus->sdiodev, 2166 lo = brcmf_sdio_regrb(bus->sdiodev,
2168 SBSDIO_FUNC1_WFRAMEBCLO, NULL); 2167 SBSDIO_FUNC1_WFRAMEBCLO, NULL);
2169 bus->f1regdata += 2; 2168 bus->sdcnt.f1regdata += 2;
2170 if ((hi == 0) && (lo == 0)) 2169 if ((hi == 0) && (lo == 0))
2171 break; 2170 break;
2172 } 2171 }
@@ -2224,7 +2223,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
2224 ret = r_sdreg32(bus, &intstatus, 2223 ret = r_sdreg32(bus, &intstatus,
2225 offsetof(struct sdpcmd_regs, 2224 offsetof(struct sdpcmd_regs,
2226 intstatus)); 2225 intstatus));
2227 bus->f2txdata++; 2226 bus->sdcnt.f2txdata++;
2228 if (ret != 0) 2227 if (ret != 0)
2229 break; 2228 break;
2230 if (intstatus & bus->hostintmask) 2229 if (intstatus & bus->hostintmask)
@@ -2417,7 +2416,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2417 bus->ipend = false; 2416 bus->ipend = false;
2418 err = r_sdreg32(bus, &newstatus, 2417 err = r_sdreg32(bus, &newstatus,
2419 offsetof(struct sdpcmd_regs, intstatus)); 2418 offsetof(struct sdpcmd_regs, intstatus));
2420 bus->f1regdata++; 2419 bus->sdcnt.f1regdata++;
2421 if (err != 0) 2420 if (err != 0)
2422 newstatus = 0; 2421 newstatus = 0;
2423 newstatus &= bus->hostintmask; 2422 newstatus &= bus->hostintmask;
@@ -2426,7 +2425,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2426 err = w_sdreg32(bus, newstatus, 2425 err = w_sdreg32(bus, newstatus,
2427 offsetof(struct sdpcmd_regs, 2426 offsetof(struct sdpcmd_regs,
2428 intstatus)); 2427 intstatus));
2429 bus->f1regdata++; 2428 bus->sdcnt.f1regdata++;
2430 } 2429 }
2431 } 2430 }
2432 2431
@@ -2445,7 +2444,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2445 2444
2446 err = r_sdreg32(bus, &newstatus, 2445 err = r_sdreg32(bus, &newstatus,
2447 offsetof(struct sdpcmd_regs, intstatus)); 2446 offsetof(struct sdpcmd_regs, intstatus));
2448 bus->f1regdata += 2; 2447 bus->sdcnt.f1regdata += 2;
2449 bus->fcstate = 2448 bus->fcstate =
2450 !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)); 2449 !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
2451 intstatus |= (newstatus & bus->hostintmask); 2450 intstatus |= (newstatus & bus->hostintmask);
@@ -2502,7 +2501,7 @@ clkwait:
2502 int ret, i; 2501 int ret, i;
2503 2502
2504 ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad, 2503 ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
2505 SDIO_FUNC_2, F2SYNC, (u8 *) bus->ctrl_frame_buf, 2504 SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf,
2506 (u32) bus->ctrl_frame_len); 2505 (u32) bus->ctrl_frame_len);
2507 2506
2508 if (ret < 0) { 2507 if (ret < 0) {
@@ -2510,13 +2509,13 @@ clkwait:
2510 terminate the frame */ 2509 terminate the frame */
2511 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n", 2510 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2512 ret); 2511 ret);
2513 bus->tx_sderrs++; 2512 bus->sdcnt.tx_sderrs++;
2514 2513
2515 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2); 2514 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
2516 2515
2517 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, 2516 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2518 SFC_WF_TERM, &err); 2517 SFC_WF_TERM, &err);
2519 bus->f1regdata++; 2518 bus->sdcnt.f1regdata++;
2520 2519
2521 for (i = 0; i < 3; i++) { 2520 for (i = 0; i < 3; i++) {
2522 u8 hi, lo; 2521 u8 hi, lo;
@@ -2526,7 +2525,7 @@ clkwait:
2526 lo = brcmf_sdio_regrb(bus->sdiodev, 2525 lo = brcmf_sdio_regrb(bus->sdiodev,
2527 SBSDIO_FUNC1_WFRAMEBCLO, 2526 SBSDIO_FUNC1_WFRAMEBCLO,
2528 &err); 2527 &err);
2529 bus->f1regdata += 2; 2528 bus->sdcnt.f1regdata += 2;
2530 if ((hi == 0) && (lo == 0)) 2529 if ((hi == 0) && (lo == 0))
2531 break; 2530 break;
2532 } 2531 }
@@ -2657,7 +2656,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2657 /* Check for existing queue, current flow-control, 2656 /* Check for existing queue, current flow-control,
2658 pending event, or pending clock */ 2657 pending event, or pending clock */
2659 brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq)); 2658 brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq));
2660 bus->fcqueued++; 2659 bus->sdcnt.fcqueued++;
2661 2660
2662 /* Priority based enq */ 2661 /* Priority based enq */
2663 spin_lock_bh(&bus->txqlock); 2662 spin_lock_bh(&bus->txqlock);
@@ -2845,13 +2844,13 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
2845 /* On failure, abort the command and terminate the frame */ 2844 /* On failure, abort the command and terminate the frame */
2846 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n", 2845 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2847 ret); 2846 ret);
2848 bus->tx_sderrs++; 2847 bus->sdcnt.tx_sderrs++;
2849 2848
2850 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2); 2849 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
2851 2850
2852 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, 2851 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2853 SFC_WF_TERM, NULL); 2852 SFC_WF_TERM, NULL);
2854 bus->f1regdata++; 2853 bus->sdcnt.f1regdata++;
2855 2854
2856 for (i = 0; i < 3; i++) { 2855 for (i = 0; i < 3; i++) {
2857 u8 hi, lo; 2856 u8 hi, lo;
@@ -2859,7 +2858,7 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
2859 SBSDIO_FUNC1_WFRAMEBCHI, NULL); 2858 SBSDIO_FUNC1_WFRAMEBCHI, NULL);
2860 lo = brcmf_sdio_regrb(bus->sdiodev, 2859 lo = brcmf_sdio_regrb(bus->sdiodev,
2861 SBSDIO_FUNC1_WFRAMEBCLO, NULL); 2860 SBSDIO_FUNC1_WFRAMEBCLO, NULL);
2862 bus->f1regdata += 2; 2861 bus->sdcnt.f1regdata += 2;
2863 if (hi == 0 && lo == 0) 2862 if (hi == 0 && lo == 0)
2864 break; 2863 break;
2865 } 2864 }
@@ -2976,13 +2975,324 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2976 up(&bus->sdsem); 2975 up(&bus->sdsem);
2977 2976
2978 if (ret) 2977 if (ret)
2979 bus->tx_ctlerrs++; 2978 bus->sdcnt.tx_ctlerrs++;
2980 else 2979 else
2981 bus->tx_ctlpkts++; 2980 bus->sdcnt.tx_ctlpkts++;
2982 2981
2983 return ret ? -EIO : 0; 2982 return ret ? -EIO : 0;
2984} 2983}
2985 2984
2985#ifdef DEBUG
2986static inline bool brcmf_sdio_valid_shared_address(u32 addr)
2987{
2988 return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
2989}
2990
2991static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
2992 struct sdpcm_shared *sh)
2993{
2994 u32 addr;
2995 int rv;
2996 u32 shaddr = 0;
2997 struct sdpcm_shared_le sh_le;
2998 __le32 addr_le;
2999
3000 shaddr = bus->ramsize - 4;
3001
3002 /*
3003 * Read last word in socram to determine
3004 * address of sdpcm_shared structure
3005 */
3006 rv = brcmf_sdbrcm_membytes(bus, false, shaddr,
3007 (u8 *)&addr_le, 4);
3008 if (rv < 0)
3009 return rv;
3010
3011 addr = le32_to_cpu(addr_le);
3012
3013 brcmf_dbg(INFO, "sdpcm_shared address 0x%08X\n", addr);
3014
3015 /*
3016 * Check if addr is valid.
3017 * NVRAM length at the end of memory should have been overwritten.
3018 */
3019 if (!brcmf_sdio_valid_shared_address(addr)) {
3020 brcmf_dbg(ERROR, "invalid sdpcm_shared address 0x%08X\n",
3021 addr);
3022 return -EINVAL;
3023 }
3024
3025 /* Read hndrte_shared structure */
3026 rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *)&sh_le,
3027 sizeof(struct sdpcm_shared_le));
3028 if (rv < 0)
3029 return rv;
3030
3031 /* Endianness */
3032 sh->flags = le32_to_cpu(sh_le.flags);
3033 sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
3034 sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
3035 sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
3036 sh->assert_line = le32_to_cpu(sh_le.assert_line);
3037 sh->console_addr = le32_to_cpu(sh_le.console_addr);
3038 sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
3039
3040 if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) {
3041 brcmf_dbg(ERROR,
3042 "sdpcm_shared version mismatch: dhd %d dongle %d\n",
3043 SDPCM_SHARED_VERSION,
3044 sh->flags & SDPCM_SHARED_VERSION_MASK);
3045 return -EPROTO;
3046 }
3047
3048 return 0;
3049}
3050
3051static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
3052 struct sdpcm_shared *sh, char __user *data,
3053 size_t count)
3054{
3055 u32 addr, console_ptr, console_size, console_index;
3056 char *conbuf = NULL;
3057 __le32 sh_val;
3058 int rv;
3059 loff_t pos = 0;
3060 int nbytes = 0;
3061
3062 /* obtain console information from device memory */
3063 addr = sh->console_addr + offsetof(struct rte_console, log_le);
3064 rv = brcmf_sdbrcm_membytes(bus, false, addr,
3065 (u8 *)&sh_val, sizeof(u32));
3066 if (rv < 0)
3067 return rv;
3068 console_ptr = le32_to_cpu(sh_val);
3069
3070 addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size);
3071 rv = brcmf_sdbrcm_membytes(bus, false, addr,
3072 (u8 *)&sh_val, sizeof(u32));
3073 if (rv < 0)
3074 return rv;
3075 console_size = le32_to_cpu(sh_val);
3076
3077 addr = sh->console_addr + offsetof(struct rte_console, log_le.idx);
3078 rv = brcmf_sdbrcm_membytes(bus, false, addr,
3079 (u8 *)&sh_val, sizeof(u32));
3080 if (rv < 0)
3081 return rv;
3082 console_index = le32_to_cpu(sh_val);
3083
3084 /* allocate buffer for console data */
3085 if (console_size <= CONSOLE_BUFFER_MAX)
3086 conbuf = vzalloc(console_size+1);
3087
3088 if (!conbuf)
3089 return -ENOMEM;
3090
3091 /* obtain the console data from device */
3092 conbuf[console_size] = '\0';
3093 rv = brcmf_sdbrcm_membytes(bus, false, console_ptr, (u8 *)conbuf,
3094 console_size);
3095 if (rv < 0)
3096 goto done;
3097
3098 rv = simple_read_from_buffer(data, count, &pos,
3099 conbuf + console_index,
3100 console_size - console_index);
3101 if (rv < 0)
3102 goto done;
3103
3104 nbytes = rv;
3105 if (console_index > 0) {
3106 pos = 0;
3107 rv = simple_read_from_buffer(data+nbytes, count, &pos,
3108 conbuf, console_index - 1);
3109 if (rv < 0)
3110 goto done;
3111 rv += nbytes;
3112 }
3113done:
3114 vfree(conbuf);
3115 return rv;
3116}
3117
3118static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
3119 char __user *data, size_t count)
3120{
3121 int error, res;
3122 char buf[350];
3123 struct brcmf_trap_info tr;
3124 int nbytes;
3125 loff_t pos = 0;
3126
3127 if ((sh->flags & SDPCM_SHARED_TRAP) == 0)
3128 return 0;
3129
3130 error = brcmf_sdbrcm_membytes(bus, false, sh->trap_addr, (u8 *)&tr,
3131 sizeof(struct brcmf_trap_info));
3132 if (error < 0)
3133 return error;
3134
3135 nbytes = brcmf_sdio_dump_console(bus, sh, data, count);
3136 if (nbytes < 0)
3137 return nbytes;
3138
3139 res = scnprintf(buf, sizeof(buf),
3140 "dongle trap info: type 0x%x @ epc 0x%08x\n"
3141 " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
3142 " lr 0x%08x pc 0x%08x offset 0x%x\n"
3143 " r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n"
3144 " r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n",
3145 le32_to_cpu(tr.type), le32_to_cpu(tr.epc),
3146 le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr),
3147 le32_to_cpu(tr.r13), le32_to_cpu(tr.r14),
3148 le32_to_cpu(tr.pc), sh->trap_addr,
3149 le32_to_cpu(tr.r0), le32_to_cpu(tr.r1),
3150 le32_to_cpu(tr.r2), le32_to_cpu(tr.r3),
3151 le32_to_cpu(tr.r4), le32_to_cpu(tr.r5),
3152 le32_to_cpu(tr.r6), le32_to_cpu(tr.r7));
3153
3154 error = simple_read_from_buffer(data+nbytes, count, &pos, buf, res);
3155 if (error < 0)
3156 return error;
3157
3158 nbytes += error;
3159 return nbytes;
3160}
3161
3162static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
3163 struct sdpcm_shared *sh, char __user *data,
3164 size_t count)
3165{
3166 int error = 0;
3167 char buf[200];
3168 char file[80] = "?";
3169 char expr[80] = "<???>";
3170 int res;
3171 loff_t pos = 0;
3172
3173 if ((sh->flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
3174 brcmf_dbg(INFO, "firmware not built with -assert\n");
3175 return 0;
3176 } else if ((sh->flags & SDPCM_SHARED_ASSERT) == 0) {
3177 brcmf_dbg(INFO, "no assert in dongle\n");
3178 return 0;
3179 }
3180
3181 if (sh->assert_file_addr != 0) {
3182 error = brcmf_sdbrcm_membytes(bus, false, sh->assert_file_addr,
3183 (u8 *)file, 80);
3184 if (error < 0)
3185 return error;
3186 }
3187 if (sh->assert_exp_addr != 0) {
3188 error = brcmf_sdbrcm_membytes(bus, false, sh->assert_exp_addr,
3189 (u8 *)expr, 80);
3190 if (error < 0)
3191 return error;
3192 }
3193
3194 res = scnprintf(buf, sizeof(buf),
3195 "dongle assert: %s:%d: assert(%s)\n",
3196 file, sh->assert_line, expr);
3197 return simple_read_from_buffer(data, count, &pos, buf, res);
3198}
3199
3200static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
3201{
3202 int error;
3203 struct sdpcm_shared sh;
3204
3205 down(&bus->sdsem);
3206 error = brcmf_sdio_readshared(bus, &sh);
3207 up(&bus->sdsem);
3208
3209 if (error < 0)
3210 return error;
3211
3212 if ((sh.flags & SDPCM_SHARED_ASSERT_BUILT) == 0)
3213 brcmf_dbg(INFO, "firmware not built with -assert\n");
3214 else if (sh.flags & SDPCM_SHARED_ASSERT)
3215 brcmf_dbg(ERROR, "assertion in dongle\n");
3216
3217 if (sh.flags & SDPCM_SHARED_TRAP)
3218 brcmf_dbg(ERROR, "firmware trap in dongle\n");
3219
3220 return 0;
3221}
3222
3223static int brcmf_sdbrcm_died_dump(struct brcmf_sdio *bus, char __user *data,
3224 size_t count, loff_t *ppos)
3225{
3226 int error = 0;
3227 struct sdpcm_shared sh;
3228 int nbytes = 0;
3229 loff_t pos = *ppos;
3230
3231 if (pos != 0)
3232 return 0;
3233
3234 down(&bus->sdsem);
3235 error = brcmf_sdio_readshared(bus, &sh);
3236 if (error < 0)
3237 goto done;
3238
3239 error = brcmf_sdio_assert_info(bus, &sh, data, count);
3240 if (error < 0)
3241 goto done;
3242
3243 nbytes = error;
3244 error = brcmf_sdio_trap_info(bus, &sh, data, count);
3245 if (error < 0)
3246 goto done;
3247
3248 error += nbytes;
3249 *ppos += error;
3250done:
3251 up(&bus->sdsem);
3252 return error;
3253}
3254
3255static ssize_t brcmf_sdio_forensic_read(struct file *f, char __user *data,
3256 size_t count, loff_t *ppos)
3257{
3258 struct brcmf_sdio *bus = f->private_data;
3259 int res;
3260
3261 res = brcmf_sdbrcm_died_dump(bus, data, count, ppos);
3262 if (res > 0)
3263 *ppos += res;
3264 return (ssize_t)res;
3265}
3266
3267static const struct file_operations brcmf_sdio_forensic_ops = {
3268 .owner = THIS_MODULE,
3269 .open = simple_open,
3270 .read = brcmf_sdio_forensic_read
3271};
3272
3273static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
3274{
3275 struct brcmf_pub *drvr = bus->sdiodev->bus_if->drvr;
3276 struct dentry *dentry = brcmf_debugfs_get_devdir(drvr);
3277
3278 if (IS_ERR_OR_NULL(dentry))
3279 return;
3280
3281 debugfs_create_file("forensics", S_IRUGO, dentry, bus,
3282 &brcmf_sdio_forensic_ops);
3283 brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
3284}
3285#else
3286static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
3287{
3288 return 0;
3289}
3290
3291static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
3292{
3293}
3294#endif /* DEBUG */
3295
2986static int 3296static int
2987brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen) 3297brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
2988{ 3298{
@@ -3009,60 +3319,27 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
3009 rxlen, msglen); 3319 rxlen, msglen);
3010 } else if (timeleft == 0) { 3320 } else if (timeleft == 0) {
3011 brcmf_dbg(ERROR, "resumed on timeout\n"); 3321 brcmf_dbg(ERROR, "resumed on timeout\n");
3322 brcmf_sdbrcm_checkdied(bus);
3012 } else if (pending) { 3323 } else if (pending) {
3013 brcmf_dbg(CTL, "cancelled\n"); 3324 brcmf_dbg(CTL, "cancelled\n");
3014 return -ERESTARTSYS; 3325 return -ERESTARTSYS;
3015 } else { 3326 } else {
3016 brcmf_dbg(CTL, "resumed for unknown reason?\n"); 3327 brcmf_dbg(CTL, "resumed for unknown reason?\n");
3328 brcmf_sdbrcm_checkdied(bus);
3017 } 3329 }
3018 3330
3019 if (rxlen) 3331 if (rxlen)
3020 bus->rx_ctlpkts++; 3332 bus->sdcnt.rx_ctlpkts++;
3021 else 3333 else
3022 bus->rx_ctlerrs++; 3334 bus->sdcnt.rx_ctlerrs++;
3023 3335
3024 return rxlen ? (int)rxlen : -ETIMEDOUT; 3336 return rxlen ? (int)rxlen : -ETIMEDOUT;
3025} 3337}
3026 3338
3027static int brcmf_sdbrcm_downloadvars(struct brcmf_sdio *bus, void *arg, int len)
3028{
3029 int bcmerror = 0;
3030
3031 brcmf_dbg(TRACE, "Enter\n");
3032
3033 /* Basic sanity checks */
3034 if (bus->sdiodev->bus_if->drvr_up) {
3035 bcmerror = -EISCONN;
3036 goto err;
3037 }
3038 if (!len) {
3039 bcmerror = -EOVERFLOW;
3040 goto err;
3041 }
3042
3043 /* Free the old ones and replace with passed variables */
3044 kfree(bus->vars);
3045
3046 bus->vars = kmalloc(len, GFP_ATOMIC);
3047 bus->varsz = bus->vars ? len : 0;
3048 if (bus->vars == NULL) {
3049 bcmerror = -ENOMEM;
3050 goto err;
3051 }
3052
3053 /* Copy the passed variables, which should include the
3054 terminating double-null */
3055 memcpy(bus->vars, arg, bus->varsz);
3056err:
3057 return bcmerror;
3058}
3059
3060static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus) 3339static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
3061{ 3340{
3062 int bcmerror = 0; 3341 int bcmerror = 0;
3063 u32 varsize;
3064 u32 varaddr; 3342 u32 varaddr;
3065 u8 *vbuffer;
3066 u32 varsizew; 3343 u32 varsizew;
3067 __le32 varsizew_le; 3344 __le32 varsizew_le;
3068#ifdef DEBUG 3345#ifdef DEBUG
@@ -3071,56 +3348,44 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
3071 3348
3072 /* Even if there are no vars are to be written, we still 3349 /* Even if there are no vars are to be written, we still
3073 need to set the ramsize. */ 3350 need to set the ramsize. */
3074 varsize = bus->varsz ? roundup(bus->varsz, 4) : 0; 3351 varaddr = (bus->ramsize - 4) - bus->varsz;
3075 varaddr = (bus->ramsize - 4) - varsize;
3076 3352
3077 if (bus->vars) { 3353 if (bus->vars) {
3078 vbuffer = kzalloc(varsize, GFP_ATOMIC);
3079 if (!vbuffer)
3080 return -ENOMEM;
3081
3082 memcpy(vbuffer, bus->vars, bus->varsz);
3083
3084 /* Write the vars list */ 3354 /* Write the vars list */
3085 bcmerror = 3355 bcmerror = brcmf_sdbrcm_membytes(bus, true, varaddr,
3086 brcmf_sdbrcm_membytes(bus, true, varaddr, vbuffer, varsize); 3356 bus->vars, bus->varsz);
3087#ifdef DEBUG 3357#ifdef DEBUG
3088 /* Verify NVRAM bytes */ 3358 /* Verify NVRAM bytes */
3089 brcmf_dbg(INFO, "Compare NVRAM dl & ul; varsize=%d\n", varsize); 3359 brcmf_dbg(INFO, "Compare NVRAM dl & ul; varsize=%d\n",
3090 nvram_ularray = kmalloc(varsize, GFP_ATOMIC); 3360 bus->varsz);
3091 if (!nvram_ularray) { 3361 nvram_ularray = kmalloc(bus->varsz, GFP_ATOMIC);
3092 kfree(vbuffer); 3362 if (!nvram_ularray)
3093 return -ENOMEM; 3363 return -ENOMEM;
3094 }
3095 3364
3096 /* Upload image to verify downloaded contents. */ 3365 /* Upload image to verify downloaded contents. */
3097 memset(nvram_ularray, 0xaa, varsize); 3366 memset(nvram_ularray, 0xaa, bus->varsz);
3098 3367
3099 /* Read the vars list to temp buffer for comparison */ 3368 /* Read the vars list to temp buffer for comparison */
3100 bcmerror = 3369 bcmerror = brcmf_sdbrcm_membytes(bus, false, varaddr,
3101 brcmf_sdbrcm_membytes(bus, false, varaddr, nvram_ularray, 3370 nvram_ularray, bus->varsz);
3102 varsize);
3103 if (bcmerror) { 3371 if (bcmerror) {
3104 brcmf_dbg(ERROR, "error %d on reading %d nvram bytes at 0x%08x\n", 3372 brcmf_dbg(ERROR, "error %d on reading %d nvram bytes at 0x%08x\n",
3105 bcmerror, varsize, varaddr); 3373 bcmerror, bus->varsz, varaddr);
3106 } 3374 }
3107 /* Compare the org NVRAM with the one read from RAM */ 3375 /* Compare the org NVRAM with the one read from RAM */
3108 if (memcmp(vbuffer, nvram_ularray, varsize)) 3376 if (memcmp(bus->vars, nvram_ularray, bus->varsz))
3109 brcmf_dbg(ERROR, "Downloaded NVRAM image is corrupted\n"); 3377 brcmf_dbg(ERROR, "Downloaded NVRAM image is corrupted\n");
3110 else 3378 else
3111 brcmf_dbg(ERROR, "Download/Upload/Compare of NVRAM ok\n"); 3379 brcmf_dbg(ERROR, "Download/Upload/Compare of NVRAM ok\n");
3112 3380
3113 kfree(nvram_ularray); 3381 kfree(nvram_ularray);
3114#endif /* DEBUG */ 3382#endif /* DEBUG */
3115
3116 kfree(vbuffer);
3117 } 3383 }
3118 3384
3119 /* adjust to the user specified RAM */ 3385 /* adjust to the user specified RAM */
3120 brcmf_dbg(INFO, "Physical memory size: %d\n", bus->ramsize); 3386 brcmf_dbg(INFO, "Physical memory size: %d\n", bus->ramsize);
3121 brcmf_dbg(INFO, "Vars are at %d, orig varsize is %d\n", 3387 brcmf_dbg(INFO, "Vars are at %d, orig varsize is %d\n",
3122 varaddr, varsize); 3388 varaddr, bus->varsz);
3123 varsize = ((bus->ramsize - 4) - varaddr);
3124 3389
3125 /* 3390 /*
3126 * Determine the length token: 3391 * Determine the length token:
@@ -3131,13 +3396,13 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
3131 varsizew = 0; 3396 varsizew = 0;
3132 varsizew_le = cpu_to_le32(0); 3397 varsizew_le = cpu_to_le32(0);
3133 } else { 3398 } else {
3134 varsizew = varsize / 4; 3399 varsizew = bus->varsz / 4;
3135 varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF); 3400 varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
3136 varsizew_le = cpu_to_le32(varsizew); 3401 varsizew_le = cpu_to_le32(varsizew);
3137 } 3402 }
3138 3403
3139 brcmf_dbg(INFO, "New varsize is %d, length token=0x%08x\n", 3404 brcmf_dbg(INFO, "New varsize is %d, length token=0x%08x\n",
3140 varsize, varsizew); 3405 bus->varsz, varsizew);
3141 3406
3142 /* Write the length token to the last word */ 3407 /* Write the length token to the last word */
3143 bcmerror = brcmf_sdbrcm_membytes(bus, true, (bus->ramsize - 4), 3408 bcmerror = brcmf_sdbrcm_membytes(bus, true, (bus->ramsize - 4),
@@ -3261,13 +3526,21 @@ err:
3261 * by two NULs. 3526 * by two NULs.
3262*/ 3527*/
3263 3528
3264static uint brcmf_process_nvram_vars(char *varbuf, uint len) 3529static int brcmf_process_nvram_vars(struct brcmf_sdio *bus)
3265{ 3530{
3531 char *varbuf;
3266 char *dp; 3532 char *dp;
3267 bool findNewline; 3533 bool findNewline;
3268 int column; 3534 int column;
3269 uint buf_len, n; 3535 int ret = 0;
3536 uint buf_len, n, len;
3270 3537
3538 len = bus->firmware->size;
3539 varbuf = vmalloc(len);
3540 if (!varbuf)
3541 return -ENOMEM;
3542
3543 memcpy(varbuf, bus->firmware->data, len);
3271 dp = varbuf; 3544 dp = varbuf;
3272 3545
3273 findNewline = false; 3546 findNewline = false;
@@ -3296,56 +3569,44 @@ static uint brcmf_process_nvram_vars(char *varbuf, uint len)
3296 column++; 3569 column++;
3297 } 3570 }
3298 buf_len = dp - varbuf; 3571 buf_len = dp - varbuf;
3299
3300 while (dp < varbuf + n) 3572 while (dp < varbuf + n)
3301 *dp++ = 0; 3573 *dp++ = 0;
3302 3574
3303 return buf_len; 3575 kfree(bus->vars);
3576 /* roundup needed for download to device */
3577 bus->varsz = roundup(buf_len + 1, 4);
3578 bus->vars = kmalloc(bus->varsz, GFP_KERNEL);
3579 if (bus->vars == NULL) {
3580 bus->varsz = 0;
3581 ret = -ENOMEM;
3582 goto err;
3583 }
3584
3585 /* copy the processed variables and add null termination */
3586 memcpy(bus->vars, varbuf, buf_len);
3587 bus->vars[buf_len] = 0;
3588err:
3589 vfree(varbuf);
3590 return ret;
3304} 3591}
3305 3592
3306static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus) 3593static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
3307{ 3594{
3308 uint len;
3309 char *memblock = NULL;
3310 char *bufp;
3311 int ret; 3595 int ret;
3312 3596
3597 if (bus->sdiodev->bus_if->drvr_up)
3598 return -EISCONN;
3599
3313 ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME, 3600 ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME,
3314 &bus->sdiodev->func[2]->dev); 3601 &bus->sdiodev->func[2]->dev);
3315 if (ret) { 3602 if (ret) {
3316 brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret); 3603 brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret);
3317 return ret; 3604 return ret;
3318 } 3605 }
3319 bus->fw_ptr = 0;
3320
3321 memblock = kmalloc(MEMBLOCK, GFP_ATOMIC);
3322 if (memblock == NULL) {
3323 ret = -ENOMEM;
3324 goto err;
3325 }
3326
3327 len = brcmf_sdbrcm_get_image(memblock, MEMBLOCK, bus);
3328
3329 if (len > 0 && len < MEMBLOCK) {
3330 bufp = (char *)memblock;
3331 bufp[len] = 0;
3332 len = brcmf_process_nvram_vars(bufp, len);
3333 bufp += len;
3334 *bufp++ = 0;
3335 if (len)
3336 ret = brcmf_sdbrcm_downloadvars(bus, memblock, len + 1);
3337 if (ret)
3338 brcmf_dbg(ERROR, "error downloading vars: %d\n", ret);
3339 } else {
3340 brcmf_dbg(ERROR, "error reading nvram file: %d\n", len);
3341 ret = -EIO;
3342 }
3343 3606
3344err: 3607 ret = brcmf_process_nvram_vars(bus);
3345 kfree(memblock);
3346 3608
3347 release_firmware(bus->firmware); 3609 release_firmware(bus->firmware);
3348 bus->fw_ptr = 0;
3349 3610
3350 return ret; 3611 return ret;
3351} 3612}
@@ -3419,7 +3680,7 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
3419 return 0; 3680 return 0;
3420 3681
3421 /* Start the watchdog timer */ 3682 /* Start the watchdog timer */
3422 bus->tickcnt = 0; 3683 bus->sdcnt.tickcnt = 0;
3423 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS); 3684 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
3424 3685
3425 down(&bus->sdsem); 3686 down(&bus->sdsem);
@@ -3512,7 +3773,7 @@ void brcmf_sdbrcm_isr(void *arg)
3512 return; 3773 return;
3513 } 3774 }
3514 /* Count the interrupt call */ 3775 /* Count the interrupt call */
3515 bus->intrcount++; 3776 bus->sdcnt.intrcount++;
3516 bus->ipend = true; 3777 bus->ipend = true;
3517 3778
3518 /* Shouldn't get this interrupt if we're sleeping? */ 3779 /* Shouldn't get this interrupt if we're sleeping? */
@@ -3554,7 +3815,8 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3554 bus->polltick = 0; 3815 bus->polltick = 0;
3555 3816
3556 /* Check device if no interrupts */ 3817 /* Check device if no interrupts */
3557 if (!bus->intr || (bus->intrcount == bus->lastintrs)) { 3818 if (!bus->intr ||
3819 (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
3558 3820
3559 if (!bus->dpc_sched) { 3821 if (!bus->dpc_sched) {
3560 u8 devpend; 3822 u8 devpend;
@@ -3569,7 +3831,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3569 /* If there is something, make like the ISR and 3831 /* If there is something, make like the ISR and
3570 schedule the DPC */ 3832 schedule the DPC */
3571 if (intstatus) { 3833 if (intstatus) {
3572 bus->pollcnt++; 3834 bus->sdcnt.pollcnt++;
3573 bus->ipend = true; 3835 bus->ipend = true;
3574 3836
3575 bus->dpc_sched = true; 3837 bus->dpc_sched = true;
@@ -3581,7 +3843,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3581 } 3843 }
3582 3844
3583 /* Update interrupt tracking */ 3845 /* Update interrupt tracking */
3584 bus->lastintrs = bus->intrcount; 3846 bus->sdcnt.lastintrs = bus->sdcnt.intrcount;
3585 } 3847 }
3586#ifdef DEBUG 3848#ifdef DEBUG
3587 /* Poll for console output periodically */ 3849 /* Poll for console output periodically */
@@ -3623,6 +3885,8 @@ static bool brcmf_sdbrcm_chipmatch(u16 chipid)
3623 return true; 3885 return true;
3624 if (chipid == BCM4330_CHIP_ID) 3886 if (chipid == BCM4330_CHIP_ID)
3625 return true; 3887 return true;
3888 if (chipid == BCM4334_CHIP_ID)
3889 return true;
3626 return false; 3890 return false;
3627} 3891}
3628 3892
@@ -3793,7 +4057,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
3793 if (!wait_for_completion_interruptible(&bus->watchdog_wait)) { 4057 if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
3794 brcmf_sdbrcm_bus_watchdog(bus); 4058 brcmf_sdbrcm_bus_watchdog(bus);
3795 /* Count the tick for reference */ 4059 /* Count the tick for reference */
3796 bus->tickcnt++; 4060 bus->sdcnt.tickcnt++;
3797 } else 4061 } else
3798 break; 4062 break;
3799 } 4063 }
@@ -3856,6 +4120,10 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3856{ 4120{
3857 int ret; 4121 int ret;
3858 struct brcmf_sdio *bus; 4122 struct brcmf_sdio *bus;
4123 struct brcmf_bus_dcmd *dlst;
4124 u32 dngl_txglom;
4125 u32 dngl_txglomalign;
4126 u8 idx;
3859 4127
3860 brcmf_dbg(TRACE, "Enter\n"); 4128 brcmf_dbg(TRACE, "Enter\n");
3861 4129
@@ -3938,8 +4206,29 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3938 goto fail; 4206 goto fail;
3939 } 4207 }
3940 4208
4209 brcmf_sdio_debugfs_create(bus);
3941 brcmf_dbg(INFO, "completed!!\n"); 4210 brcmf_dbg(INFO, "completed!!\n");
3942 4211
4212 /* sdio bus core specific dcmd */
4213 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
4214 dlst = kzalloc(sizeof(struct brcmf_bus_dcmd), GFP_KERNEL);
4215 if (dlst) {
4216 if (bus->ci->c_inf[idx].rev < 12) {
4217 /* for sdio core rev < 12, disable txgloming */
4218 dngl_txglom = 0;
4219 dlst->name = "bus:txglom";
4220 dlst->param = (char *)&dngl_txglom;
4221 dlst->param_len = sizeof(u32);
4222 } else {
4223 /* otherwise, set txglomalign */
4224 dngl_txglomalign = bus->sdiodev->bus_if->align;
4225 dlst->name = "bus:txglomalign";
4226 dlst->param = (char *)&dngl_txglomalign;
4227 dlst->param_len = sizeof(u32);
4228 }
4229 list_add(&dlst->list, &bus->sdiodev->bus_if->dcmd_list);
4230 }
4231
3943 /* if firmware path present try to download and bring up bus */ 4232 /* if firmware path present try to download and bring up bus */
3944 ret = brcmf_bus_start(bus->sdiodev->dev); 4233 ret = brcmf_bus_start(bus->sdiodev->dev);
3945 if (ret != 0) { 4234 if (ret != 0) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index f8e1f1c84d0..58155e23d22 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -403,6 +403,23 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
403 ci->c_inf[3].cib = 0x03004211; 403 ci->c_inf[3].cib = 0x03004211;
404 ci->ramsize = 0x48000; 404 ci->ramsize = 0x48000;
405 break; 405 break;
406 case BCM4334_CHIP_ID:
407 ci->c_inf[0].wrapbase = 0x18100000;
408 ci->c_inf[0].cib = 0x29004211;
409 ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
410 ci->c_inf[1].base = 0x18002000;
411 ci->c_inf[1].wrapbase = 0x18102000;
412 ci->c_inf[1].cib = 0x0d004211;
413 ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
414 ci->c_inf[2].base = 0x18004000;
415 ci->c_inf[2].wrapbase = 0x18104000;
416 ci->c_inf[2].cib = 0x13080401;
417 ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
418 ci->c_inf[3].base = 0x18003000;
419 ci->c_inf[3].wrapbase = 0x18103000;
420 ci->c_inf[3].cib = 0x07004211;
421 ci->ramsize = 0x80000;
422 break;
406 default: 423 default:
407 brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip); 424 brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip);
408 return -ENODEV; 425 return -ENODEV;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index 6d8b7213643..3c6f9b1e8d0 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -631,9 +631,8 @@ uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val)
631 cc = sii->icbus->drv_cc.core; 631 cc = sii->icbus->drv_cc.core;
632 632
633 /* mask and set */ 633 /* mask and set */
634 if (mask || val) { 634 if (mask || val)
635 bcma_maskset32(cc, regoff, ~mask, val); 635 bcma_maskset32(cc, regoff, ~mask, val);
636 }
637 636
638 /* readback */ 637 /* readback */
639 w = bcma_read32(cc, regoff); 638 w = bcma_read32(cc, regoff);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
index d9f04a683bd..d6fa9829af9 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
@@ -193,7 +193,7 @@ extern void ai_detach(struct si_pub *sih);
193extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val); 193extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
194extern void ai_clkctl_init(struct si_pub *sih); 194extern void ai_clkctl_init(struct si_pub *sih);
195extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih); 195extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
196extern bool ai_clkctl_cc(struct si_pub *sih, uint mode); 196extern bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
197extern bool ai_deviceremoved(struct si_pub *sih); 197extern bool ai_deviceremoved(struct si_pub *sih);
198 198
199extern void ai_pci_down(struct si_pub *sih); 199extern void ai_pci_down(struct si_pub *sih);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index 95b5902bc4b..01b190a25d9 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -735,10 +735,8 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
735 * a candidate for aggregation 735 * a candidate for aggregation
736 */ 736 */
737 p = pktq_ppeek(&qi->q, prec); 737 p = pktq_ppeek(&qi->q, prec);
738 /* tx_info must be checked with current p */
739 tx_info = IEEE80211_SKB_CB(p);
740
741 if (p) { 738 if (p) {
739 tx_info = IEEE80211_SKB_CB(p);
742 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && 740 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
743 ((u8) (p->priority) == tid)) { 741 ((u8) (p->priority) == tid)) {
744 plen = p->len + AMPDU_MAX_MPDU_OVERHEAD; 742 plen = p->len + AMPDU_MAX_MPDU_OVERHEAD;
@@ -759,6 +757,7 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
759 p = NULL; 757 p = NULL;
760 continue; 758 continue;
761 } 759 }
760 /* next packet fit for aggregation so dequeue */
762 p = brcmu_pktq_pdeq(&qi->q, prec); 761 p = brcmu_pktq_pdeq(&qi->q, prec);
763 } else { 762 } else {
764 p = NULL; 763 p = NULL;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index eb77ac3cfb6..2d365d3486d 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -15,7 +15,9 @@
15 */ 15 */
16 16
17#include <linux/types.h> 17#include <linux/types.h>
18#include <net/cfg80211.h>
18#include <net/mac80211.h> 19#include <net/mac80211.h>
20#include <net/regulatory.h>
19 21
20#include <defs.h> 22#include <defs.h>
21#include "pub.h" 23#include "pub.h"
@@ -23,73 +25,17 @@
23#include "main.h" 25#include "main.h"
24#include "stf.h" 26#include "stf.h"
25#include "channel.h" 27#include "channel.h"
28#include "mac80211_if.h"
26 29
27/* QDB() macro takes a dB value and converts to a quarter dB value */ 30/* QDB() macro takes a dB value and converts to a quarter dB value */
28#define QDB(n) ((n) * BRCMS_TXPWR_DB_FACTOR) 31#define QDB(n) ((n) * BRCMS_TXPWR_DB_FACTOR)
29 32
30#define LOCALE_CHAN_01_11 (1<<0)
31#define LOCALE_CHAN_12_13 (1<<1)
32#define LOCALE_CHAN_14 (1<<2)
33#define LOCALE_SET_5G_LOW_JP1 (1<<3) /* 34-48, step 2 */
34#define LOCALE_SET_5G_LOW_JP2 (1<<4) /* 34-46, step 4 */
35#define LOCALE_SET_5G_LOW1 (1<<5) /* 36-48, step 4 */
36#define LOCALE_SET_5G_LOW2 (1<<6) /* 52 */
37#define LOCALE_SET_5G_LOW3 (1<<7) /* 56-64, step 4 */
38#define LOCALE_SET_5G_MID1 (1<<8) /* 100-116, step 4 */
39#define LOCALE_SET_5G_MID2 (1<<9) /* 120-124, step 4 */
40#define LOCALE_SET_5G_MID3 (1<<10) /* 128 */
41#define LOCALE_SET_5G_HIGH1 (1<<11) /* 132-140, step 4 */
42#define LOCALE_SET_5G_HIGH2 (1<<12) /* 149-161, step 4 */
43#define LOCALE_SET_5G_HIGH3 (1<<13) /* 165 */
44#define LOCALE_CHAN_52_140_ALL (1<<14)
45#define LOCALE_SET_5G_HIGH4 (1<<15) /* 184-216 */
46
47#define LOCALE_CHAN_36_64 (LOCALE_SET_5G_LOW1 | \
48 LOCALE_SET_5G_LOW2 | \
49 LOCALE_SET_5G_LOW3)
50#define LOCALE_CHAN_52_64 (LOCALE_SET_5G_LOW2 | LOCALE_SET_5G_LOW3)
51#define LOCALE_CHAN_100_124 (LOCALE_SET_5G_MID1 | LOCALE_SET_5G_MID2)
52#define LOCALE_CHAN_100_140 (LOCALE_SET_5G_MID1 | LOCALE_SET_5G_MID2 | \
53 LOCALE_SET_5G_MID3 | LOCALE_SET_5G_HIGH1)
54#define LOCALE_CHAN_149_165 (LOCALE_SET_5G_HIGH2 | LOCALE_SET_5G_HIGH3)
55#define LOCALE_CHAN_184_216 LOCALE_SET_5G_HIGH4
56
57#define LOCALE_CHAN_01_14 (LOCALE_CHAN_01_11 | \
58 LOCALE_CHAN_12_13 | \
59 LOCALE_CHAN_14)
60
61#define LOCALE_RADAR_SET_NONE 0
62#define LOCALE_RADAR_SET_1 1
63
64#define LOCALE_RESTRICTED_NONE 0
65#define LOCALE_RESTRICTED_SET_2G_SHORT 1
66#define LOCALE_RESTRICTED_CHAN_165 2
67#define LOCALE_CHAN_ALL_5G 3
68#define LOCALE_RESTRICTED_JAPAN_LEGACY 4
69#define LOCALE_RESTRICTED_11D_2G 5
70#define LOCALE_RESTRICTED_11D_5G 6
71#define LOCALE_RESTRICTED_LOW_HI 7
72#define LOCALE_RESTRICTED_12_13_14 8
73
74#define LOCALE_2G_IDX_i 0
75#define LOCALE_5G_IDX_11 0
76#define LOCALE_MIMO_IDX_bn 0 33#define LOCALE_MIMO_IDX_bn 0
77#define LOCALE_MIMO_IDX_11n 0 34#define LOCALE_MIMO_IDX_11n 0
78 35
79/* max of BAND_5G_PWR_LVLS and 6 for 2.4 GHz */
80#define BRCMS_MAXPWR_TBL_SIZE 6
81/* max of BAND_5G_PWR_LVLS and 14 for 2.4 GHz */ 36/* max of BAND_5G_PWR_LVLS and 14 for 2.4 GHz */
82#define BRCMS_MAXPWR_MIMO_TBL_SIZE 14 37#define BRCMS_MAXPWR_MIMO_TBL_SIZE 14
83 38
84/* power level in group of 2.4GHz band channels:
85 * maxpwr[0] - CCK channels [1]
86 * maxpwr[1] - CCK channels [2-10]
87 * maxpwr[2] - CCK channels [11-14]
88 * maxpwr[3] - OFDM channels [1]
89 * maxpwr[4] - OFDM channels [2-10]
90 * maxpwr[5] - OFDM channels [11-14]
91 */
92
93/* maxpwr mapping to 5GHz band channels: 39/* maxpwr mapping to 5GHz band channels:
94 * maxpwr[0] - channels [34-48] 40 * maxpwr[0] - channels [34-48]
95 * maxpwr[1] - channels [52-60] 41 * maxpwr[1] - channels [52-60]
@@ -101,16 +47,8 @@
101 47
102#define LC(id) LOCALE_MIMO_IDX_ ## id 48#define LC(id) LOCALE_MIMO_IDX_ ## id
103 49
104#define LC_2G(id) LOCALE_2G_IDX_ ## id 50#define LOCALES(mimo2, mimo5) \
105 51 {LC(mimo2), LC(mimo5)}
106#define LC_5G(id) LOCALE_5G_IDX_ ## id
107
108#define LOCALES(band2, band5, mimo2, mimo5) \
109 {LC_2G(band2), LC_5G(band5), LC(mimo2), LC(mimo5)}
110
111/* macro to get 2.4 GHz channel group index for tx power */
112#define CHANNEL_POWER_IDX_2G_CCK(c) (((c) < 2) ? 0 : (((c) < 11) ? 1 : 2))
113#define CHANNEL_POWER_IDX_2G_OFDM(c) (((c) < 2) ? 3 : (((c) < 11) ? 4 : 5))
114 52
115/* macro to get 5 GHz channel group index for tx power */ 53/* macro to get 5 GHz channel group index for tx power */
116#define CHANNEL_POWER_IDX_5G(c) (((c) < 52) ? 0 : \ 54#define CHANNEL_POWER_IDX_5G(c) (((c) < 52) ? 0 : \
@@ -118,18 +56,37 @@
118 (((c) < 100) ? 2 : \ 56 (((c) < 100) ? 2 : \
119 (((c) < 149) ? 3 : 4)))) 57 (((c) < 149) ? 3 : 4))))
120 58
121#define ISDFS_EU(fl) (((fl) & BRCMS_DFS_EU) == BRCMS_DFS_EU) 59#define BRCM_2GHZ_2412_2462 REG_RULE(2412-10, 2462+10, 40, 0, 19, 0)
122 60#define BRCM_2GHZ_2467_2472 REG_RULE(2467-10, 2472+10, 20, 0, 19, \
123struct brcms_cm_band { 61 NL80211_RRF_PASSIVE_SCAN | \
124 /* struct locale_info flags */ 62 NL80211_RRF_NO_IBSS)
125 u8 locale_flags; 63
126 /* List of valid channels in the country */ 64#define BRCM_5GHZ_5180_5240 REG_RULE(5180-10, 5240+10, 40, 0, 21, \
127 struct brcms_chanvec valid_channels; 65 NL80211_RRF_PASSIVE_SCAN | \
128 /* List of restricted use channels */ 66 NL80211_RRF_NO_IBSS)
129 const struct brcms_chanvec *restricted_channels; 67#define BRCM_5GHZ_5260_5320 REG_RULE(5260-10, 5320+10, 40, 0, 21, \
130 /* List of radar sensitive channels */ 68 NL80211_RRF_PASSIVE_SCAN | \
131 const struct brcms_chanvec *radar_channels; 69 NL80211_RRF_DFS | \
132 u8 PAD[8]; 70 NL80211_RRF_NO_IBSS)
71#define BRCM_5GHZ_5500_5700 REG_RULE(5500-10, 5700+10, 40, 0, 21, \
72 NL80211_RRF_PASSIVE_SCAN | \
73 NL80211_RRF_DFS | \
74 NL80211_RRF_NO_IBSS)
75#define BRCM_5GHZ_5745_5825 REG_RULE(5745-10, 5825+10, 40, 0, 21, \
76 NL80211_RRF_PASSIVE_SCAN | \
77 NL80211_RRF_NO_IBSS)
78
79static const struct ieee80211_regdomain brcms_regdom_x2 = {
80 .n_reg_rules = 7,
81 .alpha2 = "X2",
82 .reg_rules = {
83 BRCM_2GHZ_2412_2462,
84 BRCM_2GHZ_2467_2472,
85 BRCM_5GHZ_5180_5240,
86 BRCM_5GHZ_5260_5320,
87 BRCM_5GHZ_5500_5700,
88 BRCM_5GHZ_5745_5825,
89 }
133}; 90};
134 91
135 /* locale per-channel tx power limits for MIMO frames 92 /* locale per-channel tx power limits for MIMO frames
@@ -141,337 +98,23 @@ struct locale_mimo_info {
141 s8 maxpwr20[BRCMS_MAXPWR_MIMO_TBL_SIZE]; 98 s8 maxpwr20[BRCMS_MAXPWR_MIMO_TBL_SIZE];
142 /* tx 40 MHz power limits, qdBm units */ 99 /* tx 40 MHz power limits, qdBm units */
143 s8 maxpwr40[BRCMS_MAXPWR_MIMO_TBL_SIZE]; 100 s8 maxpwr40[BRCMS_MAXPWR_MIMO_TBL_SIZE];
144 u8 flags;
145}; 101};
146 102
147/* Country names and abbreviations with locale defined from ISO 3166 */ 103/* Country names and abbreviations with locale defined from ISO 3166 */
148struct country_info { 104struct country_info {
149 const u8 locale_2G; /* 2.4G band locale */
150 const u8 locale_5G; /* 5G band locale */
151 const u8 locale_mimo_2G; /* 2.4G mimo info */ 105 const u8 locale_mimo_2G; /* 2.4G mimo info */
152 const u8 locale_mimo_5G; /* 5G mimo info */ 106 const u8 locale_mimo_5G; /* 5G mimo info */
153}; 107};
154 108
109struct brcms_regd {
110 struct country_info country;
111 const struct ieee80211_regdomain *regdomain;
112};
113
155struct brcms_cm_info { 114struct brcms_cm_info {
156 struct brcms_pub *pub; 115 struct brcms_pub *pub;
157 struct brcms_c_info *wlc; 116 struct brcms_c_info *wlc;
158 char srom_ccode[BRCM_CNTRY_BUF_SZ]; /* Country Code in SROM */ 117 const struct brcms_regd *world_regd;
159 uint srom_regrev; /* Regulatory Rev for the SROM ccode */
160 const struct country_info *country; /* current country def */
161 char ccode[BRCM_CNTRY_BUF_SZ]; /* current internal Country Code */
162 uint regrev; /* current Regulatory Revision */
163 char country_abbrev[BRCM_CNTRY_BUF_SZ]; /* current advertised ccode */
164 /* per-band state (one per phy/radio) */
165 struct brcms_cm_band bandstate[MAXBANDS];
166 /* quiet channels currently for radar sensitivity or 11h support */
167 /* channels on which we cannot transmit */
168 struct brcms_chanvec quiet_channels;
169};
170
171/* locale channel and power info. */
172struct locale_info {
173 u32 valid_channels;
174 /* List of radar sensitive channels */
175 u8 radar_channels;
176 /* List of channels used only if APs are detected */
177 u8 restricted_channels;
178 /* Max tx pwr in qdBm for each sub-band */
179 s8 maxpwr[BRCMS_MAXPWR_TBL_SIZE];
180 /* Country IE advertised max tx pwr in dBm per sub-band */
181 s8 pub_maxpwr[BAND_5G_PWR_LVLS];
182 u8 flags;
183};
184
185/* Regulatory Matrix Spreadsheet (CLM) MIMO v3.7.9 */
186
187/*
188 * Some common channel sets
189 */
190
191/* No channels */
192static const struct brcms_chanvec chanvec_none = {
193 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
195 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00}
197};
198
199/* All 2.4 GHz HW channels */
200static const struct brcms_chanvec chanvec_all_2G = {
201 {0xfe, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
202 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
203 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00}
205};
206
207/* All 5 GHz HW channels */
208static const struct brcms_chanvec chanvec_all_5G = {
209 {0x00, 0x00, 0x00, 0x00, 0x54, 0x55, 0x11, 0x11,
210 0x01, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x11,
211 0x11, 0x11, 0x20, 0x22, 0x22, 0x00, 0x00, 0x11,
212 0x11, 0x11, 0x11, 0x01}
213};
214
215/*
216 * Radar channel sets
217 */
218
219/* Channels 52 - 64, 100 - 140 */
220static const struct brcms_chanvec radar_set1 = {
221 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, /* 52 - 60 */
222 0x01, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x11, /* 64, 100 - 124 */
223 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 128 - 140 */
224 0x00, 0x00, 0x00, 0x00}
225};
226
227/*
228 * Restricted channel sets
229 */
230
231/* Channels 34, 38, 42, 46 */
232static const struct brcms_chanvec restricted_set_japan_legacy = {
233 {0x00, 0x00, 0x00, 0x00, 0x44, 0x44, 0x00, 0x00,
234 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
235 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
236 0x00, 0x00, 0x00, 0x00}
237};
238
239/* Channels 12, 13 */
240static const struct brcms_chanvec restricted_set_2g_short = {
241 {0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
242 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
243 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
244 0x00, 0x00, 0x00, 0x00}
245};
246
247/* Channel 165 */
248static const struct brcms_chanvec restricted_chan_165 = {
249 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
251 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
252 0x00, 0x00, 0x00, 0x00}
253};
254
255/* Channels 36 - 48 & 149 - 165 */
256static const struct brcms_chanvec restricted_low_hi = {
257 {0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x01, 0x00,
258 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
259 0x00, 0x00, 0x20, 0x22, 0x22, 0x00, 0x00, 0x00,
260 0x00, 0x00, 0x00, 0x00}
261};
262
263/* Channels 12 - 14 */
264static const struct brcms_chanvec restricted_set_12_13_14 = {
265 {0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
267 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
268 0x00, 0x00, 0x00, 0x00}
269};
270
271/* global memory to provide working buffer for expanded locale */
272
273static const struct brcms_chanvec *g_table_radar_set[] = {
274 &chanvec_none,
275 &radar_set1
276};
277
278static const struct brcms_chanvec *g_table_restricted_chan[] = {
279 &chanvec_none, /* restricted_set_none */
280 &restricted_set_2g_short,
281 &restricted_chan_165,
282 &chanvec_all_5G,
283 &restricted_set_japan_legacy,
284 &chanvec_all_2G, /* restricted_set_11d_2G */
285 &chanvec_all_5G, /* restricted_set_11d_5G */
286 &restricted_low_hi,
287 &restricted_set_12_13_14
288};
289
290static const struct brcms_chanvec locale_2g_01_11 = {
291 {0xfe, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
292 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
293 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
294 0x00, 0x00, 0x00, 0x00}
295};
296
297static const struct brcms_chanvec locale_2g_12_13 = {
298 {0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
299 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
300 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00}
302};
303
304static const struct brcms_chanvec locale_2g_14 = {
305 {0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
306 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
307 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00}
309};
310
311static const struct brcms_chanvec locale_5g_LOW_JP1 = {
312 {0x00, 0x00, 0x00, 0x00, 0x54, 0x55, 0x01, 0x00,
313 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
314 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00}
316};
317
318static const struct brcms_chanvec locale_5g_LOW_JP2 = {
319 {0x00, 0x00, 0x00, 0x00, 0x44, 0x44, 0x00, 0x00,
320 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
321 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
322 0x00, 0x00, 0x00, 0x00}
323};
324
325static const struct brcms_chanvec locale_5g_LOW1 = {
326 {0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x01, 0x00,
327 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
329 0x00, 0x00, 0x00, 0x00}
330};
331
332static const struct brcms_chanvec locale_5g_LOW2 = {
333 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,
334 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00}
337};
338
339static const struct brcms_chanvec locale_5g_LOW3 = {
340 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
341 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
342 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00}
344};
345
346static const struct brcms_chanvec locale_5g_MID1 = {
347 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x00,
349 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00, 0x00}
351};
352
353static const struct brcms_chanvec locale_5g_MID2 = {
354 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
355 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
356 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
357 0x00, 0x00, 0x00, 0x00}
358};
359
360static const struct brcms_chanvec locale_5g_MID3 = {
361 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
363 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
364 0x00, 0x00, 0x00, 0x00}
365};
366
367static const struct brcms_chanvec locale_5g_HIGH1 = {
368 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
370 0x10, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, 0x00, 0x00}
372};
373
374static const struct brcms_chanvec locale_5g_HIGH2 = {
375 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x20, 0x22, 0x02, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00}
379};
380
381static const struct brcms_chanvec locale_5g_HIGH3 = {
382 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
384 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00}
386};
387
388static const struct brcms_chanvec locale_5g_52_140_ALL = {
389 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11,
390 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
391 0x11, 0x11, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
392 0x00, 0x00, 0x00, 0x00}
393};
394
395static const struct brcms_chanvec locale_5g_HIGH4 = {
396 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
397 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
398 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
399 0x11, 0x11, 0x11, 0x11}
400};
401
402static const struct brcms_chanvec *g_table_locale_base[] = {
403 &locale_2g_01_11,
404 &locale_2g_12_13,
405 &locale_2g_14,
406 &locale_5g_LOW_JP1,
407 &locale_5g_LOW_JP2,
408 &locale_5g_LOW1,
409 &locale_5g_LOW2,
410 &locale_5g_LOW3,
411 &locale_5g_MID1,
412 &locale_5g_MID2,
413 &locale_5g_MID3,
414 &locale_5g_HIGH1,
415 &locale_5g_HIGH2,
416 &locale_5g_HIGH3,
417 &locale_5g_52_140_ALL,
418 &locale_5g_HIGH4
419};
420
421static void brcms_c_locale_add_channels(struct brcms_chanvec *target,
422 const struct brcms_chanvec *channels)
423{
424 u8 i;
425 for (i = 0; i < sizeof(struct brcms_chanvec); i++)
426 target->vec[i] |= channels->vec[i];
427}
428
429static void brcms_c_locale_get_channels(const struct locale_info *locale,
430 struct brcms_chanvec *channels)
431{
432 u8 i;
433
434 memset(channels, 0, sizeof(struct brcms_chanvec));
435
436 for (i = 0; i < ARRAY_SIZE(g_table_locale_base); i++) {
437 if (locale->valid_channels & (1 << i))
438 brcms_c_locale_add_channels(channels,
439 g_table_locale_base[i]);
440 }
441}
442
443/*
444 * Locale Definitions - 2.4 GHz
445 */
446static const struct locale_info locale_i = { /* locale i. channel 1 - 13 */
447 LOCALE_CHAN_01_11 | LOCALE_CHAN_12_13,
448 LOCALE_RADAR_SET_NONE,
449 LOCALE_RESTRICTED_SET_2G_SHORT,
450 {QDB(19), QDB(19), QDB(19),
451 QDB(19), QDB(19), QDB(19)},
452 {20, 20, 20, 0},
453 BRCMS_EIRP
454};
455
456/*
457 * Locale Definitions - 5 GHz
458 */
459static const struct locale_info locale_11 = {
460 /* locale 11. channel 36 - 48, 52 - 64, 100 - 140, 149 - 165 */
461 LOCALE_CHAN_36_64 | LOCALE_CHAN_100_140 | LOCALE_CHAN_149_165,
462 LOCALE_RADAR_SET_1,
463 LOCALE_RESTRICTED_NONE,
464 {QDB(21), QDB(21), QDB(21), QDB(21), QDB(21)},
465 {23, 23, 23, 30, 30},
466 BRCMS_EIRP | BRCMS_DFS_EU
467};
468
469static const struct locale_info *g_locale_2g_table[] = {
470 &locale_i
471};
472
473static const struct locale_info *g_locale_5g_table[] = {
474 &locale_11
475}; 118};
476 119
477/* 120/*
@@ -484,7 +127,6 @@ static const struct locale_mimo_info locale_bn = {
484 {0, 0, QDB(13), QDB(13), QDB(13), 127 {0, 0, QDB(13), QDB(13), QDB(13),
485 QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), 128 QDB(13), QDB(13), QDB(13), QDB(13), QDB(13),
486 QDB(13), 0, 0}, 129 QDB(13), 0, 0},
487 0
488}; 130};
489 131
490static const struct locale_mimo_info *g_mimo_2g_table[] = { 132static const struct locale_mimo_info *g_mimo_2g_table[] = {
@@ -497,114 +139,20 @@ static const struct locale_mimo_info *g_mimo_2g_table[] = {
497static const struct locale_mimo_info locale_11n = { 139static const struct locale_mimo_info locale_11n = {
498 { /* 12.5 dBm */ 50, 50, 50, QDB(15), QDB(15)}, 140 { /* 12.5 dBm */ 50, 50, 50, QDB(15), QDB(15)},
499 {QDB(14), QDB(15), QDB(15), QDB(15), QDB(15)}, 141 {QDB(14), QDB(15), QDB(15), QDB(15), QDB(15)},
500 0
501}; 142};
502 143
503static const struct locale_mimo_info *g_mimo_5g_table[] = { 144static const struct locale_mimo_info *g_mimo_5g_table[] = {
504 &locale_11n 145 &locale_11n
505}; 146};
506 147
507static const struct { 148static const struct brcms_regd cntry_locales[] = {
508 char abbrev[BRCM_CNTRY_BUF_SZ]; /* country abbreviation */ 149 /* Worldwide RoW 2, must always be at index 0 */
509 struct country_info country;
510} cntry_locales[] = {
511 { 150 {
512 "X2", LOCALES(i, 11, bn, 11n)}, /* Worldwide RoW 2 */ 151 .country = LOCALES(bn, 11n),
513}; 152 .regdomain = &brcms_regdom_x2,
514 153 },
515#ifdef SUPPORT_40MHZ
516/* 20MHz channel info for 40MHz pairing support */
517struct chan20_info {
518 u8 sb;
519 u8 adj_sbs;
520}; 154};
521 155
522/* indicates adjacent channels that are allowed for a 40 Mhz channel and
523 * those that permitted by the HT
524 */
525struct chan20_info chan20_info[] = {
526 /* 11b/11g */
527/* 0 */ {1, (CH_UPPER_SB | CH_EWA_VALID)},
528/* 1 */ {2, (CH_UPPER_SB | CH_EWA_VALID)},
529/* 2 */ {3, (CH_UPPER_SB | CH_EWA_VALID)},
530/* 3 */ {4, (CH_UPPER_SB | CH_EWA_VALID)},
531/* 4 */ {5, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
532/* 5 */ {6, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
533/* 6 */ {7, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
534/* 7 */ {8, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
535/* 8 */ {9, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
536/* 9 */ {10, (CH_LOWER_SB | CH_EWA_VALID)},
537/* 10 */ {11, (CH_LOWER_SB | CH_EWA_VALID)},
538/* 11 */ {12, (CH_LOWER_SB)},
539/* 12 */ {13, (CH_LOWER_SB)},
540/* 13 */ {14, (CH_LOWER_SB)},
541
542/* 11a japan high */
543/* 14 */ {34, (CH_UPPER_SB)},
544/* 15 */ {38, (CH_LOWER_SB)},
545/* 16 */ {42, (CH_LOWER_SB)},
546/* 17 */ {46, (CH_LOWER_SB)},
547
548/* 11a usa low */
549/* 18 */ {36, (CH_UPPER_SB | CH_EWA_VALID)},
550/* 19 */ {40, (CH_LOWER_SB | CH_EWA_VALID)},
551/* 20 */ {44, (CH_UPPER_SB | CH_EWA_VALID)},
552/* 21 */ {48, (CH_LOWER_SB | CH_EWA_VALID)},
553/* 22 */ {52, (CH_UPPER_SB | CH_EWA_VALID)},
554/* 23 */ {56, (CH_LOWER_SB | CH_EWA_VALID)},
555/* 24 */ {60, (CH_UPPER_SB | CH_EWA_VALID)},
556/* 25 */ {64, (CH_LOWER_SB | CH_EWA_VALID)},
557
558/* 11a Europe */
559/* 26 */ {100, (CH_UPPER_SB | CH_EWA_VALID)},
560/* 27 */ {104, (CH_LOWER_SB | CH_EWA_VALID)},
561/* 28 */ {108, (CH_UPPER_SB | CH_EWA_VALID)},
562/* 29 */ {112, (CH_LOWER_SB | CH_EWA_VALID)},
563/* 30 */ {116, (CH_UPPER_SB | CH_EWA_VALID)},
564/* 31 */ {120, (CH_LOWER_SB | CH_EWA_VALID)},
565/* 32 */ {124, (CH_UPPER_SB | CH_EWA_VALID)},
566/* 33 */ {128, (CH_LOWER_SB | CH_EWA_VALID)},
567/* 34 */ {132, (CH_UPPER_SB | CH_EWA_VALID)},
568/* 35 */ {136, (CH_LOWER_SB | CH_EWA_VALID)},
569/* 36 */ {140, (CH_LOWER_SB)},
570
571/* 11a usa high, ref5 only */
572/* The 0x80 bit in pdiv means these are REF5, other entries are REF20 */
573/* 37 */ {149, (CH_UPPER_SB | CH_EWA_VALID)},
574/* 38 */ {153, (CH_LOWER_SB | CH_EWA_VALID)},
575/* 39 */ {157, (CH_UPPER_SB | CH_EWA_VALID)},
576/* 40 */ {161, (CH_LOWER_SB | CH_EWA_VALID)},
577/* 41 */ {165, (CH_LOWER_SB)},
578
579/* 11a japan */
580/* 42 */ {184, (CH_UPPER_SB)},
581/* 43 */ {188, (CH_LOWER_SB)},
582/* 44 */ {192, (CH_UPPER_SB)},
583/* 45 */ {196, (CH_LOWER_SB)},
584/* 46 */ {200, (CH_UPPER_SB)},
585/* 47 */ {204, (CH_LOWER_SB)},
586/* 48 */ {208, (CH_UPPER_SB)},
587/* 49 */ {212, (CH_LOWER_SB)},
588/* 50 */ {216, (CH_LOWER_SB)}
589};
590#endif /* SUPPORT_40MHZ */
591
592static const struct locale_info *brcms_c_get_locale_2g(u8 locale_idx)
593{
594 if (locale_idx >= ARRAY_SIZE(g_locale_2g_table))
595 return NULL; /* error condition */
596
597 return g_locale_2g_table[locale_idx];
598}
599
600static const struct locale_info *brcms_c_get_locale_5g(u8 locale_idx)
601{
602 if (locale_idx >= ARRAY_SIZE(g_locale_5g_table))
603 return NULL; /* error condition */
604
605 return g_locale_5g_table[locale_idx];
606}
607
608static const struct locale_mimo_info *brcms_c_get_mimo_2g(u8 locale_idx) 156static const struct locale_mimo_info *brcms_c_get_mimo_2g(u8 locale_idx)
609{ 157{
610 if (locale_idx >= ARRAY_SIZE(g_mimo_2g_table)) 158 if (locale_idx >= ARRAY_SIZE(g_mimo_2g_table))
@@ -621,13 +169,6 @@ static const struct locale_mimo_info *brcms_c_get_mimo_5g(u8 locale_idx)
621 return g_mimo_5g_table[locale_idx]; 169 return g_mimo_5g_table[locale_idx];
622} 170}
623 171
624static int
625brcms_c_country_aggregate_map(struct brcms_cm_info *wlc_cm, const char *ccode,
626 char *mapped_ccode, uint *mapped_regrev)
627{
628 return false;
629}
630
631/* 172/*
632 * Indicates whether the country provided is valid to pass 173 * Indicates whether the country provided is valid to pass
633 * to cfg80211 or not. 174 * to cfg80211 or not.
@@ -662,155 +203,24 @@ static bool brcms_c_country_valid(const char *ccode)
662 return true; 203 return true;
663} 204}
664 205
665/* Lookup a country info structure from a null terminated country 206static const struct brcms_regd *brcms_world_regd(const char *regdom, int len)
666 * abbreviation and regrev directly with no translation.
667 */
668static const struct country_info *
669brcms_c_country_lookup_direct(const char *ccode, uint regrev)
670{ 207{
671 uint size, i; 208 const struct brcms_regd *regd = NULL;
672 209 int i;
673 /* Should just return 0 for single locale driver. */
674 /* Keep it this way in case we add more locales. (for now anyway) */
675
676 /*
677 * all other country def arrays are for regrev == 0, so if
678 * regrev is non-zero, fail
679 */
680 if (regrev > 0)
681 return NULL;
682
683 /* find matched table entry from country code */
684 size = ARRAY_SIZE(cntry_locales);
685 for (i = 0; i < size; i++) {
686 if (strcmp(ccode, cntry_locales[i].abbrev) == 0)
687 return &cntry_locales[i].country;
688 }
689 return NULL;
690}
691
692static const struct country_info *
693brcms_c_countrycode_map(struct brcms_cm_info *wlc_cm, const char *ccode,
694 char *mapped_ccode, uint *mapped_regrev)
695{
696 struct brcms_c_info *wlc = wlc_cm->wlc;
697 const struct country_info *country;
698 uint srom_regrev = wlc_cm->srom_regrev;
699 const char *srom_ccode = wlc_cm->srom_ccode;
700 int mapped;
701
702 /* check for currently supported ccode size */
703 if (strlen(ccode) > (BRCM_CNTRY_BUF_SZ - 1)) {
704 wiphy_err(wlc->wiphy, "wl%d: %s: ccode \"%s\" too long for "
705 "match\n", wlc->pub->unit, __func__, ccode);
706 return NULL;
707 }
708
709 /* default mapping is the given ccode and regrev 0 */
710 strncpy(mapped_ccode, ccode, BRCM_CNTRY_BUF_SZ);
711 *mapped_regrev = 0;
712
713 /* If the desired country code matches the srom country code,
714 * then the mapped country is the srom regulatory rev.
715 * Otherwise look for an aggregate mapping.
716 */
717 if (!strcmp(srom_ccode, ccode)) {
718 *mapped_regrev = srom_regrev;
719 mapped = 0;
720 wiphy_err(wlc->wiphy, "srom_code == ccode %s\n", __func__);
721 } else {
722 mapped =
723 brcms_c_country_aggregate_map(wlc_cm, ccode, mapped_ccode,
724 mapped_regrev);
725 }
726
727 /* find the matching built-in country definition */
728 country = brcms_c_country_lookup_direct(mapped_ccode, *mapped_regrev);
729
730 /* if there is not an exact rev match, default to rev zero */
731 if (country == NULL && *mapped_regrev != 0) {
732 *mapped_regrev = 0;
733 country =
734 brcms_c_country_lookup_direct(mapped_ccode, *mapped_regrev);
735 }
736
737 return country;
738}
739
740/* Lookup a country info structure from a null terminated country code
741 * The lookup is case sensitive.
742 */
743static const struct country_info *
744brcms_c_country_lookup(struct brcms_c_info *wlc, const char *ccode)
745{
746 const struct country_info *country;
747 char mapped_ccode[BRCM_CNTRY_BUF_SZ];
748 uint mapped_regrev;
749
750 /*
751 * map the country code to a built-in country code, regrev, and
752 * country_info struct
753 */
754 country = brcms_c_countrycode_map(wlc->cmi, ccode, mapped_ccode,
755 &mapped_regrev);
756
757 return country;
758}
759
760/*
761 * reset the quiet channels vector to the union
762 * of the restricted and radar channel sets
763 */
764static void brcms_c_quiet_channels_reset(struct brcms_cm_info *wlc_cm)
765{
766 struct brcms_c_info *wlc = wlc_cm->wlc;
767 uint i, j;
768 struct brcms_band *band;
769 const struct brcms_chanvec *chanvec;
770
771 memset(&wlc_cm->quiet_channels, 0, sizeof(struct brcms_chanvec));
772
773 band = wlc->band;
774 for (i = 0; i < wlc->pub->_nbands;
775 i++, band = wlc->bandstate[OTHERBANDUNIT(wlc)]) {
776
777 /* initialize quiet channels for restricted channels */
778 chanvec = wlc_cm->bandstate[band->bandunit].restricted_channels;
779 for (j = 0; j < sizeof(struct brcms_chanvec); j++)
780 wlc_cm->quiet_channels.vec[j] |= chanvec->vec[j];
781 210
211 for (i = 0; i < ARRAY_SIZE(cntry_locales); i++) {
212 if (!strncmp(regdom, cntry_locales[i].regdomain->alpha2, len)) {
213 regd = &cntry_locales[i];
214 break;
215 }
782 } 216 }
783}
784
785/* Is the channel valid for the current locale and current band? */
786static bool brcms_c_valid_channel20(struct brcms_cm_info *wlc_cm, uint val)
787{
788 struct brcms_c_info *wlc = wlc_cm->wlc;
789 217
790 return ((val < MAXCHANNEL) && 218 return regd;
791 isset(wlc_cm->bandstate[wlc->band->bandunit].valid_channels.vec,
792 val));
793} 219}
794 220
795/* Is the channel valid for the current locale and specified band? */ 221static const struct brcms_regd *brcms_default_world_regd(void)
796static bool brcms_c_valid_channel20_in_band(struct brcms_cm_info *wlc_cm,
797 uint bandunit, uint val)
798{
799 return ((val < MAXCHANNEL)
800 && isset(wlc_cm->bandstate[bandunit].valid_channels.vec, val));
801}
802
803/* Is the channel valid for the current locale? (but don't consider channels not
804 * available due to bandlocking)
805 */
806static bool brcms_c_valid_channel20_db(struct brcms_cm_info *wlc_cm, uint val)
807{ 222{
808 struct brcms_c_info *wlc = wlc_cm->wlc; 223 return &cntry_locales[0];
809
810 return brcms_c_valid_channel20(wlc->cmi, val) ||
811 (!wlc->bandlocked
812 && brcms_c_valid_channel20_in_band(wlc->cmi,
813 OTHERBANDUNIT(wlc), val));
814} 224}
815 225
816/* JP, J1 - J10 are Japan ccodes */ 226/* JP, J1 - J10 are Japan ccodes */
@@ -820,12 +230,6 @@ static bool brcms_c_japan_ccode(const char *ccode)
820 (ccode[1] == 'P' || (ccode[1] >= '1' && ccode[1] <= '9'))); 230 (ccode[1] == 'P' || (ccode[1] >= '1' && ccode[1] <= '9')));
821} 231}
822 232
823/* Returns true if currently set country is Japan or variant */
824static bool brcms_c_japan(struct brcms_c_info *wlc)
825{
826 return brcms_c_japan_ccode(wlc->cmi->country_abbrev);
827}
828
829static void 233static void
830brcms_c_channel_min_txpower_limits_with_local_constraint( 234brcms_c_channel_min_txpower_limits_with_local_constraint(
831 struct brcms_cm_info *wlc_cm, struct txpwr_limits *txpwr, 235 struct brcms_cm_info *wlc_cm, struct txpwr_limits *txpwr,
@@ -901,140 +305,16 @@ brcms_c_channel_min_txpower_limits_with_local_constraint(
901 305
902} 306}
903 307
904/* Update the radio state (enable/disable) and tx power targets
905 * based on a new set of channel/regulatory information
906 */
907static void brcms_c_channels_commit(struct brcms_cm_info *wlc_cm)
908{
909 struct brcms_c_info *wlc = wlc_cm->wlc;
910 uint chan;
911 struct txpwr_limits txpwr;
912
913 /* search for the existence of any valid channel */
914 for (chan = 0; chan < MAXCHANNEL; chan++) {
915 if (brcms_c_valid_channel20_db(wlc->cmi, chan))
916 break;
917 }
918 if (chan == MAXCHANNEL)
919 chan = INVCHANNEL;
920
921 /*
922 * based on the channel search above, set or
923 * clear WL_RADIO_COUNTRY_DISABLE.
924 */
925 if (chan == INVCHANNEL) {
926 /*
927 * country/locale with no valid channels, set
928 * the radio disable bit
929 */
930 mboolset(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
931 wiphy_err(wlc->wiphy, "wl%d: %s: no valid channel for \"%s\" "
932 "nbands %d bandlocked %d\n", wlc->pub->unit,
933 __func__, wlc_cm->country_abbrev, wlc->pub->_nbands,
934 wlc->bandlocked);
935 } else if (mboolisset(wlc->pub->radio_disabled,
936 WL_RADIO_COUNTRY_DISABLE)) {
937 /*
938 * country/locale with valid channel, clear
939 * the radio disable bit
940 */
941 mboolclr(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
942 }
943
944 /*
945 * Now that the country abbreviation is set, if the radio supports 2G,
946 * then set channel 14 restrictions based on the new locale.
947 */
948 if (wlc->pub->_nbands > 1 || wlc->band->bandtype == BRCM_BAND_2G)
949 wlc_phy_chanspec_ch14_widefilter_set(wlc->band->pi,
950 brcms_c_japan(wlc) ? true :
951 false);
952
953 if (wlc->pub->up && chan != INVCHANNEL) {
954 brcms_c_channel_reg_limits(wlc_cm, wlc->chanspec, &txpwr);
955 brcms_c_channel_min_txpower_limits_with_local_constraint(wlc_cm,
956 &txpwr, BRCMS_TXPWR_MAX);
957 wlc_phy_txpower_limit_set(wlc->band->pi, &txpwr, wlc->chanspec);
958 }
959}
960
961static int
962brcms_c_channels_init(struct brcms_cm_info *wlc_cm,
963 const struct country_info *country)
964{
965 struct brcms_c_info *wlc = wlc_cm->wlc;
966 uint i, j;
967 struct brcms_band *band;
968 const struct locale_info *li;
969 struct brcms_chanvec sup_chan;
970 const struct locale_mimo_info *li_mimo;
971
972 band = wlc->band;
973 for (i = 0; i < wlc->pub->_nbands;
974 i++, band = wlc->bandstate[OTHERBANDUNIT(wlc)]) {
975
976 li = (band->bandtype == BRCM_BAND_5G) ?
977 brcms_c_get_locale_5g(country->locale_5G) :
978 brcms_c_get_locale_2g(country->locale_2G);
979 wlc_cm->bandstate[band->bandunit].locale_flags = li->flags;
980 li_mimo = (band->bandtype == BRCM_BAND_5G) ?
981 brcms_c_get_mimo_5g(country->locale_mimo_5G) :
982 brcms_c_get_mimo_2g(country->locale_mimo_2G);
983
984 /* merge the mimo non-mimo locale flags */
985 wlc_cm->bandstate[band->bandunit].locale_flags |=
986 li_mimo->flags;
987
988 wlc_cm->bandstate[band->bandunit].restricted_channels =
989 g_table_restricted_chan[li->restricted_channels];
990 wlc_cm->bandstate[band->bandunit].radar_channels =
991 g_table_radar_set[li->radar_channels];
992
993 /*
994 * set the channel availability, masking out the channels
995 * that may not be supported on this phy.
996 */
997 wlc_phy_chanspec_band_validch(band->pi, band->bandtype,
998 &sup_chan);
999 brcms_c_locale_get_channels(li,
1000 &wlc_cm->bandstate[band->bandunit].
1001 valid_channels);
1002 for (j = 0; j < sizeof(struct brcms_chanvec); j++)
1003 wlc_cm->bandstate[band->bandunit].valid_channels.
1004 vec[j] &= sup_chan.vec[j];
1005 }
1006
1007 brcms_c_quiet_channels_reset(wlc_cm);
1008 brcms_c_channels_commit(wlc_cm);
1009
1010 return 0;
1011}
1012
1013/* 308/*
1014 * set the driver's current country and regulatory information 309 * set the driver's current country and regulatory information
1015 * using a country code as the source. Look up built in country 310 * using a country code as the source. Look up built in country
1016 * information found with the country code. 311 * information found with the country code.
1017 */ 312 */
1018static void 313static void
1019brcms_c_set_country_common(struct brcms_cm_info *wlc_cm, 314brcms_c_set_country(struct brcms_cm_info *wlc_cm,
1020 const char *country_abbrev, 315 const struct brcms_regd *regd)
1021 const char *ccode, uint regrev,
1022 const struct country_info *country)
1023{ 316{
1024 const struct locale_info *locale;
1025 struct brcms_c_info *wlc = wlc_cm->wlc; 317 struct brcms_c_info *wlc = wlc_cm->wlc;
1026 char prev_country_abbrev[BRCM_CNTRY_BUF_SZ];
1027
1028 /* save current country state */
1029 wlc_cm->country = country;
1030
1031 memset(&prev_country_abbrev, 0, BRCM_CNTRY_BUF_SZ);
1032 strncpy(prev_country_abbrev, wlc_cm->country_abbrev,
1033 BRCM_CNTRY_BUF_SZ - 1);
1034
1035 strncpy(wlc_cm->country_abbrev, country_abbrev, BRCM_CNTRY_BUF_SZ - 1);
1036 strncpy(wlc_cm->ccode, ccode, BRCM_CNTRY_BUF_SZ - 1);
1037 wlc_cm->regrev = regrev;
1038 318
1039 if ((wlc->pub->_n_enab & SUPPORT_11N) != 319 if ((wlc->pub->_n_enab & SUPPORT_11N) !=
1040 wlc->protection->nmode_user) 320 wlc->protection->nmode_user)
@@ -1042,75 +322,19 @@ brcms_c_set_country_common(struct brcms_cm_info *wlc_cm,
1042 322
1043 brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]); 323 brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]);
1044 brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]); 324 brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]);
1045 /* set or restore gmode as required by regulatory */
1046 locale = brcms_c_get_locale_2g(country->locale_2G);
1047 if (locale && (locale->flags & BRCMS_NO_OFDM))
1048 brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false);
1049 else
1050 brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
1051 325
1052 brcms_c_channels_init(wlc_cm, country); 326 brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
1053 327
1054 return; 328 return;
1055} 329}
1056 330
1057static int
1058brcms_c_set_countrycode_rev(struct brcms_cm_info *wlc_cm,
1059 const char *country_abbrev,
1060 const char *ccode, int regrev)
1061{
1062 const struct country_info *country;
1063 char mapped_ccode[BRCM_CNTRY_BUF_SZ];
1064 uint mapped_regrev;
1065
1066 /* if regrev is -1, lookup the mapped country code,
1067 * otherwise use the ccode and regrev directly
1068 */
1069 if (regrev == -1) {
1070 /*
1071 * map the country code to a built-in country
1072 * code, regrev, and country_info
1073 */
1074 country =
1075 brcms_c_countrycode_map(wlc_cm, ccode, mapped_ccode,
1076 &mapped_regrev);
1077 } else {
1078 /* find the matching built-in country definition */
1079 country = brcms_c_country_lookup_direct(ccode, regrev);
1080 strncpy(mapped_ccode, ccode, BRCM_CNTRY_BUF_SZ);
1081 mapped_regrev = regrev;
1082 }
1083
1084 if (country == NULL)
1085 return -EINVAL;
1086
1087 /* set the driver state for the country */
1088 brcms_c_set_country_common(wlc_cm, country_abbrev, mapped_ccode,
1089 mapped_regrev, country);
1090
1091 return 0;
1092}
1093
1094/*
1095 * set the driver's current country and regulatory information using
1096 * a country code as the source. Lookup built in country information
1097 * found with the country code.
1098 */
1099static int
1100brcms_c_set_countrycode(struct brcms_cm_info *wlc_cm, const char *ccode)
1101{
1102 char country_abbrev[BRCM_CNTRY_BUF_SZ];
1103 strncpy(country_abbrev, ccode, BRCM_CNTRY_BUF_SZ);
1104 return brcms_c_set_countrycode_rev(wlc_cm, country_abbrev, ccode, -1);
1105}
1106
1107struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc) 331struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
1108{ 332{
1109 struct brcms_cm_info *wlc_cm; 333 struct brcms_cm_info *wlc_cm;
1110 char country_abbrev[BRCM_CNTRY_BUF_SZ];
1111 const struct country_info *country;
1112 struct brcms_pub *pub = wlc->pub; 334 struct brcms_pub *pub = wlc->pub;
1113 struct ssb_sprom *sprom = &wlc->hw->d11core->bus->sprom; 335 struct ssb_sprom *sprom = &wlc->hw->d11core->bus->sprom;
336 const char *ccode = sprom->alpha2;
337 int ccode_len = sizeof(sprom->alpha2);
1114 338
1115 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); 339 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
1116 340
@@ -1122,24 +346,27 @@ struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
1122 wlc->cmi = wlc_cm; 346 wlc->cmi = wlc_cm;
1123 347
1124 /* store the country code for passing up as a regulatory hint */ 348 /* store the country code for passing up as a regulatory hint */
1125 if (sprom->alpha2 && brcms_c_country_valid(sprom->alpha2)) 349 wlc_cm->world_regd = brcms_world_regd(ccode, ccode_len);
1126 strncpy(wlc->pub->srom_ccode, sprom->alpha2, sizeof(sprom->alpha2)); 350 if (brcms_c_country_valid(ccode))
351 strncpy(wlc->pub->srom_ccode, ccode, ccode_len);
1127 352
1128 /* 353 /*
1129 * internal country information which must match 354 * If no custom world domain is found in the SROM, use the
1130 * regulatory constraints in firmware 355 * default "X2" domain.
1131 */ 356 */
1132 memset(country_abbrev, 0, BRCM_CNTRY_BUF_SZ); 357 if (!wlc_cm->world_regd) {
1133 strncpy(country_abbrev, "X2", sizeof(country_abbrev) - 1); 358 wlc_cm->world_regd = brcms_default_world_regd();
1134 country = brcms_c_country_lookup(wlc, country_abbrev); 359 ccode = wlc_cm->world_regd->regdomain->alpha2;
360 ccode_len = BRCM_CNTRY_BUF_SZ - 1;
361 }
1135 362
1136 /* save default country for exiting 11d regulatory mode */ 363 /* save default country for exiting 11d regulatory mode */
1137 strncpy(wlc->country_default, country_abbrev, BRCM_CNTRY_BUF_SZ - 1); 364 strncpy(wlc->country_default, ccode, ccode_len);
1138 365
1139 /* initialize autocountry_default to driver default */ 366 /* initialize autocountry_default to driver default */
1140 strncpy(wlc->autocountry_default, "X2", BRCM_CNTRY_BUF_SZ - 1); 367 strncpy(wlc->autocountry_default, ccode, ccode_len);
1141 368
1142 brcms_c_set_countrycode(wlc_cm, country_abbrev); 369 brcms_c_set_country(wlc_cm, wlc_cm->world_regd);
1143 370
1144 return wlc_cm; 371 return wlc_cm;
1145} 372}
@@ -1149,31 +376,15 @@ void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm)
1149 kfree(wlc_cm); 376 kfree(wlc_cm);
1150} 377}
1151 378
1152u8
1153brcms_c_channel_locale_flags_in_band(struct brcms_cm_info *wlc_cm,
1154 uint bandunit)
1155{
1156 return wlc_cm->bandstate[bandunit].locale_flags;
1157}
1158
1159static bool
1160brcms_c_quiet_chanspec(struct brcms_cm_info *wlc_cm, u16 chspec)
1161{
1162 return (wlc_cm->wlc->pub->_n_enab & SUPPORT_11N) &&
1163 CHSPEC_IS40(chspec) ?
1164 (isset(wlc_cm->quiet_channels.vec,
1165 lower_20_sb(CHSPEC_CHANNEL(chspec))) ||
1166 isset(wlc_cm->quiet_channels.vec,
1167 upper_20_sb(CHSPEC_CHANNEL(chspec)))) :
1168 isset(wlc_cm->quiet_channels.vec, CHSPEC_CHANNEL(chspec));
1169}
1170
1171void 379void
1172brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec, 380brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
1173 u8 local_constraint_qdbm) 381 u8 local_constraint_qdbm)
1174{ 382{
1175 struct brcms_c_info *wlc = wlc_cm->wlc; 383 struct brcms_c_info *wlc = wlc_cm->wlc;
384 struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel;
385 const struct ieee80211_reg_rule *reg_rule;
1176 struct txpwr_limits txpwr; 386 struct txpwr_limits txpwr;
387 int ret;
1177 388
1178 brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr); 389 brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr);
1179 390
@@ -1181,8 +392,15 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
1181 wlc_cm, &txpwr, local_constraint_qdbm 392 wlc_cm, &txpwr, local_constraint_qdbm
1182 ); 393 );
1183 394
395 /* set or restore gmode as required by regulatory */
396 ret = freq_reg_info(wlc->wiphy, ch->center_freq, 0, &reg_rule);
397 if (!ret && (reg_rule->flags & NL80211_RRF_NO_OFDM))
398 brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false);
399 else
400 brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
401
1184 brcms_b_set_chanspec(wlc->hw, chanspec, 402 brcms_b_set_chanspec(wlc->hw, chanspec,
1185 (brcms_c_quiet_chanspec(wlc_cm, chanspec) != 0), 403 !!(ch->flags & IEEE80211_CHAN_PASSIVE_SCAN),
1186 &txpwr); 404 &txpwr);
1187} 405}
1188 406
@@ -1191,15 +409,14 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
1191 struct txpwr_limits *txpwr) 409 struct txpwr_limits *txpwr)
1192{ 410{
1193 struct brcms_c_info *wlc = wlc_cm->wlc; 411 struct brcms_c_info *wlc = wlc_cm->wlc;
412 struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel;
1194 uint i; 413 uint i;
1195 uint chan; 414 uint chan;
1196 int maxpwr; 415 int maxpwr;
1197 int delta; 416 int delta;
1198 const struct country_info *country; 417 const struct country_info *country;
1199 struct brcms_band *band; 418 struct brcms_band *band;
1200 const struct locale_info *li;
1201 int conducted_max = BRCMS_TXPWR_MAX; 419 int conducted_max = BRCMS_TXPWR_MAX;
1202 int conducted_ofdm_max = BRCMS_TXPWR_MAX;
1203 const struct locale_mimo_info *li_mimo; 420 const struct locale_mimo_info *li_mimo;
1204 int maxpwr20, maxpwr40; 421 int maxpwr20, maxpwr40;
1205 int maxpwr_idx; 422 int maxpwr_idx;
@@ -1207,67 +424,35 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
1207 424
1208 memset(txpwr, 0, sizeof(struct txpwr_limits)); 425 memset(txpwr, 0, sizeof(struct txpwr_limits));
1209 426
1210 if (!brcms_c_valid_chanspec_db(wlc_cm, chanspec)) { 427 if (WARN_ON(!ch))
1211 country = brcms_c_country_lookup(wlc, wlc->autocountry_default); 428 return;
1212 if (country == NULL) 429
1213 return; 430 country = &wlc_cm->world_regd->country;
1214 } else {
1215 country = wlc_cm->country;
1216 }
1217 431
1218 chan = CHSPEC_CHANNEL(chanspec); 432 chan = CHSPEC_CHANNEL(chanspec);
1219 band = wlc->bandstate[chspec_bandunit(chanspec)]; 433 band = wlc->bandstate[chspec_bandunit(chanspec)];
1220 li = (band->bandtype == BRCM_BAND_5G) ?
1221 brcms_c_get_locale_5g(country->locale_5G) :
1222 brcms_c_get_locale_2g(country->locale_2G);
1223
1224 li_mimo = (band->bandtype == BRCM_BAND_5G) ? 434 li_mimo = (band->bandtype == BRCM_BAND_5G) ?
1225 brcms_c_get_mimo_5g(country->locale_mimo_5G) : 435 brcms_c_get_mimo_5g(country->locale_mimo_5G) :
1226 brcms_c_get_mimo_2g(country->locale_mimo_2G); 436 brcms_c_get_mimo_2g(country->locale_mimo_2G);
1227 437
1228 if (li->flags & BRCMS_EIRP) { 438 delta = band->antgain;
1229 delta = band->antgain;
1230 } else {
1231 delta = 0;
1232 if (band->antgain > QDB(6))
1233 delta = band->antgain - QDB(6); /* Excess over 6 dB */
1234 }
1235 439
1236 if (li == &locale_i) { 440 if (band->bandtype == BRCM_BAND_2G)
1237 conducted_max = QDB(22); 441 conducted_max = QDB(22);
1238 conducted_ofdm_max = QDB(22); 442
1239 } 443 maxpwr = QDB(ch->max_power) - delta;
444 maxpwr = max(maxpwr, 0);
445 maxpwr = min(maxpwr, conducted_max);
1240 446
1241 /* CCK txpwr limits for 2.4G band */ 447 /* CCK txpwr limits for 2.4G band */
1242 if (band->bandtype == BRCM_BAND_2G) { 448 if (band->bandtype == BRCM_BAND_2G) {
1243 maxpwr = li->maxpwr[CHANNEL_POWER_IDX_2G_CCK(chan)];
1244
1245 maxpwr = maxpwr - delta;
1246 maxpwr = max(maxpwr, 0);
1247 maxpwr = min(maxpwr, conducted_max);
1248
1249 for (i = 0; i < BRCMS_NUM_RATES_CCK; i++) 449 for (i = 0; i < BRCMS_NUM_RATES_CCK; i++)
1250 txpwr->cck[i] = (u8) maxpwr; 450 txpwr->cck[i] = (u8) maxpwr;
1251 } 451 }
1252 452
1253 /* OFDM txpwr limits for 2.4G or 5G bands */ 453 for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) {
1254 if (band->bandtype == BRCM_BAND_2G)
1255 maxpwr = li->maxpwr[CHANNEL_POWER_IDX_2G_OFDM(chan)];
1256 else
1257 maxpwr = li->maxpwr[CHANNEL_POWER_IDX_5G(chan)];
1258
1259 maxpwr = maxpwr - delta;
1260 maxpwr = max(maxpwr, 0);
1261 maxpwr = min(maxpwr, conducted_ofdm_max);
1262
1263 /* Keep OFDM lmit below CCK limit */
1264 if (band->bandtype == BRCM_BAND_2G)
1265 maxpwr = min_t(int, maxpwr, txpwr->cck[0]);
1266
1267 for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
1268 txpwr->ofdm[i] = (u8) maxpwr; 454 txpwr->ofdm[i] = (u8) maxpwr;
1269 455
1270 for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) {
1271 /* 456 /*
1272 * OFDM 40 MHz SISO has the same power as the corresponding 457 * OFDM 40 MHz SISO has the same power as the corresponding
1273 * MCS0-7 rate unless overriden by the locale specific code. 458 * MCS0-7 rate unless overriden by the locale specific code.
@@ -1282,14 +467,9 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
1282 txpwr->ofdm_40_cdd[i] = 0; 467 txpwr->ofdm_40_cdd[i] = 0;
1283 } 468 }
1284 469
1285 /* MIMO/HT specific limits */ 470 delta = 0;
1286 if (li_mimo->flags & BRCMS_EIRP) { 471 if (band->antgain > QDB(6))
1287 delta = band->antgain; 472 delta = band->antgain - QDB(6); /* Excess over 6 dB */
1288 } else {
1289 delta = 0;
1290 if (band->antgain > QDB(6))
1291 delta = band->antgain - QDB(6); /* Excess over 6 dB */
1292 }
1293 473
1294 if (band->bandtype == BRCM_BAND_2G) 474 if (band->bandtype == BRCM_BAND_2G)
1295 maxpwr_idx = (chan - 1); 475 maxpwr_idx = (chan - 1);
@@ -1431,8 +611,7 @@ static bool brcms_c_chspec_malformed(u16 chanspec)
1431 * and they are also a legal HT combination 611 * and they are also a legal HT combination
1432 */ 612 */
1433static bool 613static bool
1434brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, u16 chspec, 614brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, u16 chspec)
1435 bool dualband)
1436{ 615{
1437 struct brcms_c_info *wlc = wlc_cm->wlc; 616 struct brcms_c_info *wlc = wlc_cm->wlc;
1438 u8 channel = CHSPEC_CHANNEL(chspec); 617 u8 channel = CHSPEC_CHANNEL(chspec);
@@ -1448,59 +627,166 @@ brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, u16 chspec,
1448 chspec_bandunit(chspec)) 627 chspec_bandunit(chspec))
1449 return false; 628 return false;
1450 629
1451 /* Check a 20Mhz channel */ 630 return true;
1452 if (CHSPEC_IS20(chspec)) { 631}
1453 if (dualband) 632
1454 return brcms_c_valid_channel20_db(wlc_cm->wlc->cmi, 633bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, u16 chspec)
1455 channel); 634{
1456 else 635 return brcms_c_valid_chanspec_ext(wlc_cm, chspec);
1457 return brcms_c_valid_channel20(wlc_cm->wlc->cmi, 636}
1458 channel); 637
638static bool brcms_is_radar_freq(u16 center_freq)
639{
640 return center_freq >= 5260 && center_freq <= 5700;
641}
642
643static void brcms_reg_apply_radar_flags(struct wiphy *wiphy)
644{
645 struct ieee80211_supported_band *sband;
646 struct ieee80211_channel *ch;
647 int i;
648
649 sband = wiphy->bands[IEEE80211_BAND_5GHZ];
650 if (!sband)
651 return;
652
653 for (i = 0; i < sband->n_channels; i++) {
654 ch = &sband->channels[i];
655
656 if (!brcms_is_radar_freq(ch->center_freq))
657 continue;
658
659 /*
660 * All channels in this range should be passive and have
661 * DFS enabled.
662 */
663 if (!(ch->flags & IEEE80211_CHAN_DISABLED))
664 ch->flags |= IEEE80211_CHAN_RADAR |
665 IEEE80211_CHAN_NO_IBSS |
666 IEEE80211_CHAN_PASSIVE_SCAN;
1459 } 667 }
1460#ifdef SUPPORT_40MHZ 668}
1461 /* 669
1462 * We know we are now checking a 40MHZ channel, so we should 670static void
1463 * only be here for NPHYS 671brcms_reg_apply_beaconing_flags(struct wiphy *wiphy,
1464 */ 672 enum nl80211_reg_initiator initiator)
1465 if (BRCMS_ISNPHY(wlc->band) || BRCMS_ISSSLPNPHY(wlc->band)) { 673{
1466 u8 upper_sideband = 0, idx; 674 struct ieee80211_supported_band *sband;
1467 u8 num_ch20_entries = 675 struct ieee80211_channel *ch;
1468 sizeof(chan20_info) / sizeof(struct chan20_info); 676 const struct ieee80211_reg_rule *rule;
1469 677 int band, i, ret;
1470 if (!VALID_40CHANSPEC_IN_BAND(wlc, chspec_bandunit(chspec))) 678
1471 return false; 679 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1472 680 sband = wiphy->bands[band];
1473 if (dualband) { 681 if (!sband)
1474 if (!brcms_c_valid_channel20_db(wlc->cmi, 682 continue;
1475 lower_20_sb(channel)) || 683
1476 !brcms_c_valid_channel20_db(wlc->cmi, 684 for (i = 0; i < sband->n_channels; i++) {
1477 upper_20_sb(channel))) 685 ch = &sband->channels[i];
1478 return false; 686
1479 } else { 687 if (ch->flags &
1480 if (!brcms_c_valid_channel20(wlc->cmi, 688 (IEEE80211_CHAN_DISABLED | IEEE80211_CHAN_RADAR))
1481 lower_20_sb(channel)) || 689 continue;
1482 !brcms_c_valid_channel20(wlc->cmi, 690
1483 upper_20_sb(channel))) 691 if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
1484 return false; 692 ret = freq_reg_info(wiphy, ch->center_freq,
693 0, &rule);
694 if (ret)
695 continue;
696
697 if (!(rule->flags & NL80211_RRF_NO_IBSS))
698 ch->flags &= ~IEEE80211_CHAN_NO_IBSS;
699 if (!(rule->flags & NL80211_RRF_PASSIVE_SCAN))
700 ch->flags &=
701 ~IEEE80211_CHAN_PASSIVE_SCAN;
702 } else if (ch->beacon_found) {
703 ch->flags &= ~(IEEE80211_CHAN_NO_IBSS |
704 IEEE80211_CHAN_PASSIVE_SCAN);
705 }
1485 } 706 }
707 }
708}
1486 709
1487 /* find the lower sideband info in the sideband array */ 710static int brcms_reg_notifier(struct wiphy *wiphy,
1488 for (idx = 0; idx < num_ch20_entries; idx++) { 711 struct regulatory_request *request)
1489 if (chan20_info[idx].sb == lower_20_sb(channel)) 712{
1490 upper_sideband = chan20_info[idx].adj_sbs; 713 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
714 struct brcms_info *wl = hw->priv;
715 struct brcms_c_info *wlc = wl->wlc;
716 struct ieee80211_supported_band *sband;
717 struct ieee80211_channel *ch;
718 int band, i;
719 bool ch_found = false;
720
721 brcms_reg_apply_radar_flags(wiphy);
722
723 if (request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)
724 brcms_reg_apply_beaconing_flags(wiphy, request->initiator);
725
726 /* Disable radio if all channels disallowed by regulatory */
727 for (band = 0; !ch_found && band < IEEE80211_NUM_BANDS; band++) {
728 sband = wiphy->bands[band];
729 if (!sband)
730 continue;
731
732 for (i = 0; !ch_found && i < sband->n_channels; i++) {
733 ch = &sband->channels[i];
734
735 if (!(ch->flags & IEEE80211_CHAN_DISABLED))
736 ch_found = true;
1491 } 737 }
1492 /* check that the lower sideband allows an upper sideband */
1493 if ((upper_sideband & (CH_UPPER_SB | CH_EWA_VALID)) ==
1494 (CH_UPPER_SB | CH_EWA_VALID))
1495 return true;
1496 return false;
1497 } 738 }
1498#endif /* 40 MHZ */
1499 739
1500 return false; 740 if (ch_found) {
741 mboolclr(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
742 } else {
743 mboolset(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
744 wiphy_err(wlc->wiphy, "wl%d: %s: no valid channel for \"%s\"\n",
745 wlc->pub->unit, __func__, request->alpha2);
746 }
747
748 if (wlc->pub->_nbands > 1 || wlc->band->bandtype == BRCM_BAND_2G)
749 wlc_phy_chanspec_ch14_widefilter_set(wlc->band->pi,
750 brcms_c_japan_ccode(request->alpha2));
751
752 return 0;
1501} 753}
1502 754
1503bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, u16 chspec) 755void brcms_c_regd_init(struct brcms_c_info *wlc)
1504{ 756{
1505 return brcms_c_valid_chanspec_ext(wlc_cm, chspec, true); 757 struct wiphy *wiphy = wlc->wiphy;
758 const struct brcms_regd *regd = wlc->cmi->world_regd;
759 struct ieee80211_supported_band *sband;
760 struct ieee80211_channel *ch;
761 struct brcms_chanvec sup_chan;
762 struct brcms_band *band;
763 int band_idx, i;
764
765 /* Disable any channels not supported by the phy */
766 for (band_idx = 0; band_idx < IEEE80211_NUM_BANDS; band_idx++) {
767 if (band_idx == IEEE80211_BAND_2GHZ)
768 band = wlc->bandstate[BAND_2G_INDEX];
769 else
770 band = wlc->bandstate[BAND_5G_INDEX];
771
772 /* skip if band not initialized */
773 if (band->pi == NULL)
774 continue;
775
776 wlc_phy_chanspec_band_validch(band->pi, band->bandtype,
777 &sup_chan);
778
779 sband = wiphy->bands[band_idx];
780 for (i = 0; i < sband->n_channels; i++) {
781 ch = &sband->channels[i];
782 if (!isset(sup_chan.vec, ch->hw_value))
783 ch->flags |= IEEE80211_CHAN_DISABLED;
784 }
785 }
786
787 wlc->wiphy->reg_notifier = brcms_reg_notifier;
788 wlc->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
789 WIPHY_FLAG_STRICT_REGULATORY;
790 wiphy_apply_custom_regulatory(wlc->wiphy, regd->regdomain);
791 brcms_reg_apply_beaconing_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER);
1506} 792}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.h b/drivers/net/wireless/brcm80211/brcmsmac/channel.h
index 808cb4fbfbe..006483a0abe 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.h
@@ -37,9 +37,6 @@ brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
37 37
38extern void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm); 38extern void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm);
39 39
40extern u8 brcms_c_channel_locale_flags_in_band(struct brcms_cm_info *wlc_cm,
41 uint bandunit);
42
43extern bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, 40extern bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm,
44 u16 chspec); 41 u16 chspec);
45 42
@@ -49,5 +46,6 @@ extern void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm,
49extern void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, 46extern void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm,
50 u16 chanspec, 47 u16 chanspec,
51 u8 local_constraint_qdbm); 48 u8 local_constraint_qdbm);
49extern void brcms_c_regd_init(struct brcms_c_info *wlc);
52 50
53#endif /* _WLC_CHANNEL_H */ 51#endif /* _WLC_CHANNEL_H */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index 11054ae9d4f..7516639412e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -1433,7 +1433,7 @@ void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
1433 struct ieee80211_tx_info *tx_info; 1433 struct ieee80211_tx_info *tx_info;
1434 1434
1435 while (i != end) { 1435 while (i != end) {
1436 skb = (struct sk_buff *)di->txp[i]; 1436 skb = di->txp[i];
1437 if (skb != NULL) { 1437 if (skb != NULL) {
1438 tx_info = (struct ieee80211_tx_info *)skb->cb; 1438 tx_info = (struct ieee80211_tx_info *)skb->cb;
1439 (callback_fnc)(tx_info, arg_a); 1439 (callback_fnc)(tx_info, arg_a);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 50f92a0b7c4..2d5a4041269 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -721,14 +721,6 @@ static const struct ieee80211_ops brcms_ops = {
721 .flush = brcms_ops_flush, 721 .flush = brcms_ops_flush,
722}; 722};
723 723
724/*
725 * is called in brcms_bcma_probe() context, therefore no locking required.
726 */
727static int brcms_set_hint(struct brcms_info *wl, char *abbrev)
728{
729 return regulatory_hint(wl->pub->ieee_hw->wiphy, abbrev);
730}
731
732void brcms_dpc(unsigned long data) 724void brcms_dpc(unsigned long data)
733{ 725{
734 struct brcms_info *wl; 726 struct brcms_info *wl;
@@ -1058,6 +1050,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
1058 goto fail; 1050 goto fail;
1059 } 1051 }
1060 1052
1053 brcms_c_regd_init(wl->wlc);
1054
1061 memcpy(perm, &wl->pub->cur_etheraddr, ETH_ALEN); 1055 memcpy(perm, &wl->pub->cur_etheraddr, ETH_ALEN);
1062 if (WARN_ON(!is_valid_ether_addr(perm))) 1056 if (WARN_ON(!is_valid_ether_addr(perm)))
1063 goto fail; 1057 goto fail;
@@ -1068,9 +1062,9 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
1068 wiphy_err(wl->wiphy, "%s: ieee80211_register_hw failed, status" 1062 wiphy_err(wl->wiphy, "%s: ieee80211_register_hw failed, status"
1069 "%d\n", __func__, err); 1063 "%d\n", __func__, err);
1070 1064
1071 if (wl->pub->srom_ccode[0] && brcms_set_hint(wl, wl->pub->srom_ccode)) 1065 if (wl->pub->srom_ccode[0] &&
1072 wiphy_err(wl->wiphy, "%s: regulatory_hint failed, status %d\n", 1066 regulatory_hint(wl->wiphy, wl->pub->srom_ccode))
1073 __func__, err); 1067 wiphy_err(wl->wiphy, "%s: regulatory hint failed\n", __func__);
1074 1068
1075 n_adapters_found++; 1069 n_adapters_found++;
1076 return wl; 1070 return wl;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 19db4052c44..bb00b6528d8 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -18,6 +18,7 @@
18 18
19#include <linux/pci_ids.h> 19#include <linux/pci_ids.h>
20#include <linux/if_ether.h> 20#include <linux/if_ether.h>
21#include <net/cfg80211.h>
21#include <net/mac80211.h> 22#include <net/mac80211.h>
22#include <brcm_hw_ids.h> 23#include <brcm_hw_ids.h>
23#include <aiutils.h> 24#include <aiutils.h>
@@ -3139,20 +3140,6 @@ void brcms_c_reset(struct brcms_c_info *wlc)
3139 brcms_b_reset(wlc->hw); 3140 brcms_b_reset(wlc->hw);
3140} 3141}
3141 3142
3142/* Return the channel the driver should initialize during brcms_c_init.
3143 * the channel may have to be changed from the currently configured channel
3144 * if other configurations are in conflict (bandlocked, 11n mode disabled,
3145 * invalid channel for current country, etc.)
3146 */
3147static u16 brcms_c_init_chanspec(struct brcms_c_info *wlc)
3148{
3149 u16 chanspec =
3150 1 | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE |
3151 WL_CHANSPEC_BAND_2G;
3152
3153 return chanspec;
3154}
3155
3156void brcms_c_init_scb(struct scb *scb) 3143void brcms_c_init_scb(struct scb *scb)
3157{ 3144{
3158 int i; 3145 int i;
@@ -5129,6 +5116,8 @@ static void brcms_c_wme_retries_write(struct brcms_c_info *wlc)
5129/* make interface operational */ 5116/* make interface operational */
5130int brcms_c_up(struct brcms_c_info *wlc) 5117int brcms_c_up(struct brcms_c_info *wlc)
5131{ 5118{
5119 struct ieee80211_channel *ch;
5120
5132 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); 5121 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
5133 5122
5134 /* HW is turned off so don't try to access it */ 5123 /* HW is turned off so don't try to access it */
@@ -5195,8 +5184,9 @@ int brcms_c_up(struct brcms_c_info *wlc)
5195 wlc->pub->up = true; 5184 wlc->pub->up = true;
5196 5185
5197 if (wlc->bandinit_pending) { 5186 if (wlc->bandinit_pending) {
5187 ch = wlc->pub->ieee_hw->conf.channel;
5198 brcms_c_suspend_mac_and_wait(wlc); 5188 brcms_c_suspend_mac_and_wait(wlc);
5199 brcms_c_set_chanspec(wlc, wlc->default_bss->chanspec); 5189 brcms_c_set_chanspec(wlc, ch20mhz_chspec(ch->hw_value));
5200 wlc->bandinit_pending = false; 5190 wlc->bandinit_pending = false;
5201 brcms_c_enable_mac(wlc); 5191 brcms_c_enable_mac(wlc);
5202 } 5192 }
@@ -5397,11 +5387,6 @@ int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config)
5397 else 5387 else
5398 return -EINVAL; 5388 return -EINVAL;
5399 5389
5400 /* Legacy or bust when no OFDM is supported by regulatory */
5401 if ((brcms_c_channel_locale_flags_in_band(wlc->cmi, band->bandunit) &
5402 BRCMS_NO_OFDM) && (gmode != GMODE_LEGACY_B))
5403 return -EINVAL;
5404
5405 /* update configuration value */ 5390 /* update configuration value */
5406 if (config) 5391 if (config)
5407 brcms_c_protection_upd(wlc, BRCMS_PROT_G_USER, gmode); 5392 brcms_c_protection_upd(wlc, BRCMS_PROT_G_USER, gmode);
@@ -8201,19 +8186,12 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
8201void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx) 8186void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
8202{ 8187{
8203 struct bcma_device *core = wlc->hw->d11core; 8188 struct bcma_device *core = wlc->hw->d11core;
8189 struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel;
8204 u16 chanspec; 8190 u16 chanspec;
8205 8191
8206 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); 8192 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
8207 8193
8208 /* 8194 chanspec = ch20mhz_chspec(ch->hw_value);
8209 * This will happen if a big-hammer was executed. In
8210 * that case, we want to go back to the channel that
8211 * we were on and not new channel
8212 */
8213 if (wlc->pub->associated)
8214 chanspec = wlc->home_chanspec;
8215 else
8216 chanspec = brcms_c_init_chanspec(wlc);
8217 8195
8218 brcms_b_init(wlc->hw, chanspec); 8196 brcms_b_init(wlc->hw, chanspec);
8219 8197
@@ -8318,7 +8296,7 @@ brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
8318 struct brcms_pub *pub; 8296 struct brcms_pub *pub;
8319 8297
8320 /* allocate struct brcms_c_info state and its substructures */ 8298 /* allocate struct brcms_c_info state and its substructures */
8321 wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, 0); 8299 wlc = brcms_c_attach_malloc(unit, &err, 0);
8322 if (wlc == NULL) 8300 if (wlc == NULL)
8323 goto fail; 8301 goto fail;
8324 wlc->wiphy = wl->wiphy; 8302 wlc->wiphy = wl->wiphy;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
index 13b261517cc..36671814641 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
@@ -14358,7 +14358,7 @@ void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs)
14358 14358
14359 wlc_phy_write_txmacreg_nphy(pi, holdoff, delay); 14359 wlc_phy_write_txmacreg_nphy(pi, holdoff, delay);
14360 14360
14361 if (pi && pi->sh && (pi->sh->_rifs_phy != rifs)) 14361 if (pi->sh && (pi->sh->_rifs_phy != rifs))
14362 pi->sh->_rifs_phy = rifs; 14362 pi->sh->_rifs_phy = rifs;
14363} 14363}
14364 14364
diff --git a/drivers/net/wireless/brcm80211/brcmutil/utils.c b/drivers/net/wireless/brcm80211/brcmutil/utils.c
index b45ab34cdfd..3e6405e06ac 100644
--- a/drivers/net/wireless/brcm80211/brcmutil/utils.c
+++ b/drivers/net/wireless/brcm80211/brcmutil/utils.c
@@ -43,6 +43,8 @@ EXPORT_SYMBOL(brcmu_pkt_buf_get_skb);
43/* Free the driver packet. Free the tag if present */ 43/* Free the driver packet. Free the tag if present */
44void brcmu_pkt_buf_free_skb(struct sk_buff *skb) 44void brcmu_pkt_buf_free_skb(struct sk_buff *skb)
45{ 45{
46 if (!skb)
47 return;
46 WARN_ON(skb->next); 48 WARN_ON(skb->next);
47 if (skb->destructor) 49 if (skb->destructor)
48 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if 50 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 333193f20e1..bcc79b4e326 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -37,5 +37,6 @@
37#define BCM4329_CHIP_ID 0x4329 37#define BCM4329_CHIP_ID 0x4329
38#define BCM4330_CHIP_ID 0x4330 38#define BCM4330_CHIP_ID 0x4330
39#define BCM4331_CHIP_ID 0x4331 39#define BCM4331_CHIP_ID 0x4331
40#define BCM4334_CHIP_ID 0x4334
40 41
41#endif /* _BRCM_HW_IDS_H_ */ 42#endif /* _BRCM_HW_IDS_H_ */
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c
index 75ef8f04aab..dc447c1b5ab 100644
--- a/drivers/net/wireless/hostap/hostap_proc.c
+++ b/drivers/net/wireless/hostap/hostap_proc.c
@@ -58,8 +58,7 @@ static int prism2_stats_proc_read(char *page, char **start, off_t off,
58{ 58{
59 char *p = page; 59 char *p = page;
60 local_info_t *local = (local_info_t *) data; 60 local_info_t *local = (local_info_t *) data;
61 struct comm_tallies_sums *sums = (struct comm_tallies_sums *) 61 struct comm_tallies_sums *sums = &local->comm_tallies;
62 &local->comm_tallies;
63 62
64 if (off != 0) { 63 if (off != 0) {
65 *eof = 1; 64 *eof = 1;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 0036737fe8e..0df45914739 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -2701,6 +2701,20 @@ static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2701 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6); 2701 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2702} 2702}
2703 2703
2704static void ipw_read_eeprom(struct ipw_priv *priv)
2705{
2706 int i;
2707 __le16 *eeprom = (__le16 *) priv->eeprom;
2708
2709 IPW_DEBUG_TRACE(">>\n");
2710
2711 /* read entire contents of eeprom into private buffer */
2712 for (i = 0; i < 128; i++)
2713 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2714
2715 IPW_DEBUG_TRACE("<<\n");
2716}
2717
2704/* 2718/*
2705 * Either the device driver (i.e. the host) or the firmware can 2719 * Either the device driver (i.e. the host) or the firmware can
2706 * load eeprom data into the designated region in SRAM. If neither 2720 * load eeprom data into the designated region in SRAM. If neither
@@ -2712,14 +2726,9 @@ static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2712static void ipw_eeprom_init_sram(struct ipw_priv *priv) 2726static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2713{ 2727{
2714 int i; 2728 int i;
2715 __le16 *eeprom = (__le16 *) priv->eeprom;
2716 2729
2717 IPW_DEBUG_TRACE(">>\n"); 2730 IPW_DEBUG_TRACE(">>\n");
2718 2731
2719 /* read entire contents of eeprom into private buffer */
2720 for (i = 0; i < 128; i++)
2721 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2722
2723 /* 2732 /*
2724 If the data looks correct, then copy it to our private 2733 If the data looks correct, then copy it to our private
2725 copy. Otherwise let the firmware know to perform the operation 2734 copy. Otherwise let the firmware know to perform the operation
@@ -3643,8 +3652,10 @@ static int ipw_load(struct ipw_priv *priv)
3643 /* ack fw init done interrupt */ 3652 /* ack fw init done interrupt */
3644 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE); 3653 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3645 3654
3646 /* read eeprom data and initialize the eeprom region of sram */ 3655 /* read eeprom data */
3647 priv->eeprom_delay = 1; 3656 priv->eeprom_delay = 1;
3657 ipw_read_eeprom(priv);
3658 /* initialize the eeprom region of sram */
3648 ipw_eeprom_init_sram(priv); 3659 ipw_eeprom_init_sram(priv);
3649 3660
3650 /* enable interrupts */ 3661 /* enable interrupts */
@@ -7069,9 +7080,7 @@ static int ipw_qos_activate(struct ipw_priv *priv,
7069 } 7080 }
7070 7081
7071 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n"); 7082 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
7072 err = ipw_send_qos_params_command(priv, 7083 err = ipw_send_qos_params_command(priv, &qos_parameters[0]);
7073 (struct libipw_qos_parameters *)
7074 &(qos_parameters[0]));
7075 if (err) 7084 if (err)
7076 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n"); 7085 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
7077 7086
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index ff5d689e13f..34f61a0581a 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -5724,7 +5724,8 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
5724 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); 5724 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
5725 5725
5726 hw->wiphy->flags |= 5726 hw->wiphy->flags |=
5727 WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS; 5727 WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
5728 WIPHY_FLAG_IBSS_RSN;
5728 5729
5729 /* 5730 /*
5730 * For now, disable PS by default because it affects 5731 * For now, disable PS by default because it affects
@@ -5873,6 +5874,16 @@ il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5873 return -EOPNOTSUPP; 5874 return -EOPNOTSUPP;
5874 } 5875 }
5875 5876
5877 /*
5878 * To support IBSS RSN, don't program group keys in IBSS, the
5879 * hardware will then not attempt to decrypt the frames.
5880 */
5881 if (vif->type == NL80211_IFTYPE_ADHOC &&
5882 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
5883 D_MAC80211("leave - ad-hoc group key\n");
5884 return -EOPNOTSUPP;
5885 }
5886
5876 sta_id = il_sta_id_or_broadcast(il, sta); 5887 sta_id = il_sta_id_or_broadcast(il, sta);
5877 if (sta_id == IL_INVALID_STATION) 5888 if (sta_id == IL_INVALID_STATION)
5878 return -EINVAL; 5889 return -EINVAL;
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 5d4807c2b56..0f8a7703eea 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4717,10 +4717,11 @@ il_check_stuck_queue(struct il_priv *il, int cnt)
4717 struct il_tx_queue *txq = &il->txq[cnt]; 4717 struct il_tx_queue *txq = &il->txq[cnt];
4718 struct il_queue *q = &txq->q; 4718 struct il_queue *q = &txq->q;
4719 unsigned long timeout; 4719 unsigned long timeout;
4720 unsigned long now = jiffies;
4720 int ret; 4721 int ret;
4721 4722
4722 if (q->read_ptr == q->write_ptr) { 4723 if (q->read_ptr == q->write_ptr) {
4723 txq->time_stamp = jiffies; 4724 txq->time_stamp = now;
4724 return 0; 4725 return 0;
4725 } 4726 }
4726 4727
@@ -4728,9 +4729,9 @@ il_check_stuck_queue(struct il_priv *il, int cnt)
4728 txq->time_stamp + 4729 txq->time_stamp +
4729 msecs_to_jiffies(il->cfg->wd_timeout); 4730 msecs_to_jiffies(il->cfg->wd_timeout);
4730 4731
4731 if (time_after(jiffies, timeout)) { 4732 if (time_after(now, timeout)) {
4732 IL_ERR("Queue %d stuck for %u ms.\n", q->id, 4733 IL_ERR("Queue %d stuck for %u ms.\n", q->id,
4733 il->cfg->wd_timeout); 4734 jiffies_to_msecs(now - txq->time_stamp));
4734 ret = il_force_reset(il, false); 4735 ret = il_force_reset(il, false);
4735 return (ret == -EAGAIN) ? 0 : 1; 4736 return (ret == -EAGAIN) ? 0 : 1;
4736 } 4737 }
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 2463c062643..727fbb5db9d 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -6,6 +6,7 @@ config IWLWIFI
6 select LEDS_CLASS 6 select LEDS_CLASS
7 select LEDS_TRIGGERS 7 select LEDS_TRIGGERS
8 select MAC80211_LEDS 8 select MAC80211_LEDS
9 select IWLDVM
9 ---help--- 10 ---help---
10 Select to build the driver supporting the: 11 Select to build the driver supporting the:
11 12
@@ -41,6 +42,10 @@ config IWLWIFI
41 say M here and read <file:Documentation/kbuild/modules.txt>. The 42 say M here and read <file:Documentation/kbuild/modules.txt>. The
42 module will be called iwlwifi. 43 module will be called iwlwifi.
43 44
45config IWLDVM
46 tristate "Intel Wireless WiFi"
47 depends on IWLWIFI
48
44menu "Debugging Options" 49menu "Debugging Options"
45 depends on IWLWIFI 50 depends on IWLWIFI
46 51
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index d615eacbf05..170ec330d2a 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,27 +1,19 @@
1# WIFI 1# common
2obj-$(CONFIG_IWLWIFI) += iwlwifi.o 2obj-$(CONFIG_IWLWIFI) += iwlwifi.o
3iwlwifi-objs := iwl-agn.o iwl-agn-rs.o iwl-mac80211.o 3iwlwifi-objs += iwl-io.o
4iwlwifi-objs += iwl-ucode.o iwl-agn-tx.o iwl-debug.o
5iwlwifi-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
6iwlwifi-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o
7
8iwlwifi-objs += iwl-eeprom.o iwl-power.o
9iwlwifi-objs += iwl-scan.o iwl-led.o
10iwlwifi-objs += iwl-agn-rxon.o iwl-agn-devices.o
11iwlwifi-objs += iwl-5000.o
12iwlwifi-objs += iwl-6000.o
13iwlwifi-objs += iwl-1000.o
14iwlwifi-objs += iwl-2000.o
15iwlwifi-objs += iwl-pci.o
16iwlwifi-objs += iwl-drv.o 4iwlwifi-objs += iwl-drv.o
5iwlwifi-objs += iwl-debug.o
17iwlwifi-objs += iwl-notif-wait.o 6iwlwifi-objs += iwl-notif-wait.o
18iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o 7iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
19 8iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
9iwlwifi-objs += pcie/1000.o pcie/2000.o pcie/5000.o pcie/6000.o
20 10
21iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
22iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o 11iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
23iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o 12iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o
24 13
25CFLAGS_iwl-devtrace.o := -I$(src) 14ccflags-y += -D__CHECK_ENDIAN__ -I$(src)
26 15
27ccflags-y += -D__CHECK_ENDIAN__ 16
17obj-$(CONFIG_IWLDVM) += dvm/
18
19CFLAGS_iwl-devtrace.o := -I$(src)
diff --git a/drivers/net/wireless/iwlwifi/dvm/Makefile b/drivers/net/wireless/iwlwifi/dvm/Makefile
new file mode 100644
index 00000000000..5ff76b20414
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/dvm/Makefile
@@ -0,0 +1,13 @@
1# DVM
2obj-$(CONFIG_IWLDVM) += iwldvm.o
3iwldvm-objs += main.o rs.o mac80211.o ucode.o tx.o
4iwldvm-objs += lib.o calib.o tt.o sta.o rx.o
5
6iwldvm-objs += power.o
7iwldvm-objs += scan.o led.o
8iwldvm-objs += rxon.o devices.o
9
10iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
11iwldvm-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += testmode.o
12
13ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 79c0fe06f4d..9bb16bdf6d2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -63,9 +63,10 @@
63#ifndef __iwl_agn_h__ 63#ifndef __iwl_agn_h__
64#define __iwl_agn_h__ 64#define __iwl_agn_h__
65 65
66#include "iwl-dev.h"
67#include "iwl-config.h" 66#include "iwl-config.h"
68 67
68#include "dev.h"
69
69/* The first 11 queues (0-10) are used otherwise */ 70/* The first 11 queues (0-10) are used otherwise */
70#define IWLAGN_FIRST_AMPDU_QUEUE 11 71#define IWLAGN_FIRST_AMPDU_QUEUE 11
71 72
@@ -91,7 +92,6 @@ extern struct iwl_lib_ops iwl6030_lib;
91#define STATUS_CT_KILL 1 92#define STATUS_CT_KILL 1
92#define STATUS_ALIVE 2 93#define STATUS_ALIVE 2
93#define STATUS_READY 3 94#define STATUS_READY 3
94#define STATUS_GEO_CONFIGURED 4
95#define STATUS_EXIT_PENDING 5 95#define STATUS_EXIT_PENDING 5
96#define STATUS_STATISTICS 6 96#define STATUS_STATISTICS 6
97#define STATUS_SCANNING 7 97#define STATUS_SCANNING 7
@@ -101,6 +101,7 @@ extern struct iwl_lib_ops iwl6030_lib;
101#define STATUS_CHANNEL_SWITCH_PENDING 11 101#define STATUS_CHANNEL_SWITCH_PENDING 11
102#define STATUS_SCAN_COMPLETE 12 102#define STATUS_SCAN_COMPLETE 12
103#define STATUS_POWER_PMI 13 103#define STATUS_POWER_PMI 13
104#define STATUS_SCAN_ROC_EXPIRED 14
104 105
105struct iwl_ucode_capabilities; 106struct iwl_ucode_capabilities;
106 107
@@ -255,6 +256,10 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
255 enum iwl_scan_type scan_type, 256 enum iwl_scan_type scan_type,
256 enum ieee80211_band band); 257 enum ieee80211_band band);
257 258
259void iwl_scan_roc_expired(struct iwl_priv *priv);
260void iwl_scan_offchannel_skb(struct iwl_priv *priv);
261void iwl_scan_offchannel_skb_status(struct iwl_priv *priv);
262
258/* For faster active scanning, scan will move to the next channel if fewer than 263/* For faster active scanning, scan will move to the next channel if fewer than
259 * PLCP_QUIET_THRESH packets are heard on this channel within 264 * PLCP_QUIET_THRESH packets are heard on this channel within
260 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell 265 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
@@ -264,7 +269,7 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
264#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */ 269#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
265#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */ 270#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
266 271
267#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7) 272#define IWL_SCAN_CHECK_WATCHDOG (HZ * 15)
268 273
269 274
270/* bt coex */ 275/* bt coex */
@@ -390,8 +395,10 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
390} 395}
391 396
392extern int iwl_alive_start(struct iwl_priv *priv); 397extern int iwl_alive_start(struct iwl_priv *priv);
393/* svtool */ 398
399/* testmode support */
394#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE 400#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
401
395extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, 402extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data,
396 int len); 403 int len);
397extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, 404extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
@@ -399,13 +406,16 @@ extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
399 struct netlink_callback *cb, 406 struct netlink_callback *cb,
400 void *data, int len); 407 void *data, int len);
401extern void iwl_testmode_init(struct iwl_priv *priv); 408extern void iwl_testmode_init(struct iwl_priv *priv);
402extern void iwl_testmode_cleanup(struct iwl_priv *priv); 409extern void iwl_testmode_free(struct iwl_priv *priv);
410
403#else 411#else
412
404static inline 413static inline
405int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) 414int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
406{ 415{
407 return -ENOSYS; 416 return -ENOSYS;
408} 417}
418
409static inline 419static inline
410int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, 420int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
411 struct netlink_callback *cb, 421 struct netlink_callback *cb,
@@ -413,12 +423,12 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
413{ 423{
414 return -ENOSYS; 424 return -ENOSYS;
415} 425}
416static inline 426
417void iwl_testmode_init(struct iwl_priv *priv) 427static inline void iwl_testmode_init(struct iwl_priv *priv)
418{ 428{
419} 429}
420static inline 430
421void iwl_testmode_cleanup(struct iwl_priv *priv) 431static inline void iwl_testmode_free(struct iwl_priv *priv)
422{ 432{
423} 433}
424#endif 434#endif
@@ -437,10 +447,8 @@ static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv,
437 447
438static inline int iwl_is_ready(struct iwl_priv *priv) 448static inline int iwl_is_ready(struct iwl_priv *priv)
439{ 449{
440 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are 450 /* The adapter is 'ready' if READY EXIT_PENDING is not set */
441 * set but EXIT_PENDING is not */
442 return test_bit(STATUS_READY, &priv->status) && 451 return test_bit(STATUS_READY, &priv->status) &&
443 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
444 !test_bit(STATUS_EXIT_PENDING, &priv->status); 452 !test_bit(STATUS_EXIT_PENDING, &priv->status);
445} 453}
446 454
@@ -518,85 +526,4 @@ static inline const char *iwl_dvm_get_cmd_string(u8 cmd)
518 return s; 526 return s;
519 return "UNKNOWN"; 527 return "UNKNOWN";
520} 528}
521
522/* API method exported for mvm hybrid state */
523void iwl_setup_deferred_work(struct iwl_priv *priv);
524int iwl_send_wimax_coex(struct iwl_priv *priv);
525int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
526void iwl_option_config(struct iwl_priv *priv);
527void iwl_set_hw_params(struct iwl_priv *priv);
528void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags);
529int iwl_init_drv(struct iwl_priv *priv);
530void iwl_uninit_drv(struct iwl_priv *priv);
531void iwl_send_bt_config(struct iwl_priv *priv);
532void iwl_rf_kill_ct_config(struct iwl_priv *priv);
533int iwl_setup_interface(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
534void iwl_teardown_interface(struct iwl_priv *priv,
535 struct ieee80211_vif *vif,
536 bool mode_change);
537int iwl_full_rxon_required(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
538void iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
539void iwlagn_check_needed_chains(struct iwl_priv *priv,
540 struct iwl_rxon_context *ctx,
541 struct ieee80211_bss_conf *bss_conf);
542void iwlagn_chain_noise_reset(struct iwl_priv *priv);
543int iwlagn_update_beacon(struct iwl_priv *priv,
544 struct ieee80211_vif *vif);
545void iwl_tt_handler(struct iwl_priv *priv);
546void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode);
547void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue);
548void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state);
549void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb);
550void iwl_nic_error(struct iwl_op_mode *op_mode);
551void iwl_cmd_queue_full(struct iwl_op_mode *op_mode);
552void iwl_nic_config(struct iwl_op_mode *op_mode);
553int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
554 struct ieee80211_sta *sta, bool set);
555void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
556 enum ieee80211_rssi_event rssi_event);
557int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw);
558int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw);
559void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop);
560void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue);
561void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
562 struct ieee80211_channel_switch *ch_switch);
563int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
564 struct ieee80211_vif *vif,
565 struct ieee80211_sta *sta,
566 enum ieee80211_sta_state old_state,
567 enum ieee80211_sta_state new_state);
568int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
569 struct ieee80211_vif *vif,
570 enum ieee80211_ampdu_mlme_action action,
571 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
572 u8 buf_size);
573int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
574 struct ieee80211_vif *vif,
575 struct cfg80211_scan_request *req);
576void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
577 struct ieee80211_vif *vif,
578 enum sta_notify_cmd cmd,
579 struct ieee80211_sta *sta);
580void iwlagn_configure_filter(struct ieee80211_hw *hw,
581 unsigned int changed_flags,
582 unsigned int *total_flags,
583 u64 multicast);
584int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
585 struct ieee80211_vif *vif, u16 queue,
586 const struct ieee80211_tx_queue_params *params);
587void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
588 struct ieee80211_vif *vif,
589 struct cfg80211_gtk_rekey_data *data);
590void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
591 struct ieee80211_vif *vif,
592 struct ieee80211_key_conf *keyconf,
593 struct ieee80211_sta *sta,
594 u32 iv32, u16 *phase1key);
595int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
596 struct ieee80211_vif *vif,
597 struct ieee80211_sta *sta,
598 struct ieee80211_key_conf *key);
599void iwlagn_mac_stop(struct ieee80211_hw *hw);
600void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
601int iwlagn_mac_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
602#endif /* __iwl_agn_h__ */ 529#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
index 95f27f1a423..f2dd671d7dc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
@@ -63,10 +63,11 @@
63#include <linux/slab.h> 63#include <linux/slab.h>
64#include <net/mac80211.h> 64#include <net/mac80211.h>
65 65
66#include "iwl-dev.h"
67#include "iwl-agn-calib.h"
68#include "iwl-trans.h" 66#include "iwl-trans.h"
69#include "iwl-agn.h" 67
68#include "dev.h"
69#include "calib.h"
70#include "agn.h"
70 71
71/***************************************************************************** 72/*****************************************************************************
72 * INIT calibrations framework 73 * INIT calibrations framework
@@ -832,14 +833,14 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
832 * To be safe, simply mask out any chains that we know 833 * To be safe, simply mask out any chains that we know
833 * are not on the device. 834 * are not on the device.
834 */ 835 */
835 active_chains &= priv->hw_params.valid_rx_ant; 836 active_chains &= priv->eeprom_data->valid_rx_ant;
836 837
837 num_tx_chains = 0; 838 num_tx_chains = 0;
838 for (i = 0; i < NUM_RX_CHAINS; i++) { 839 for (i = 0; i < NUM_RX_CHAINS; i++) {
839 /* loops on all the bits of 840 /* loops on all the bits of
840 * priv->hw_setting.valid_tx_ant */ 841 * priv->hw_setting.valid_tx_ant */
841 u8 ant_msk = (1 << i); 842 u8 ant_msk = (1 << i);
842 if (!(priv->hw_params.valid_tx_ant & ant_msk)) 843 if (!(priv->eeprom_data->valid_tx_ant & ant_msk))
843 continue; 844 continue;
844 845
845 num_tx_chains++; 846 num_tx_chains++;
@@ -853,7 +854,7 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
853 * connect the first valid tx chain 854 * connect the first valid tx chain
854 */ 855 */
855 first_chain = 856 first_chain =
856 find_first_chain(priv->hw_params.valid_tx_ant); 857 find_first_chain(priv->eeprom_data->valid_tx_ant);
857 data->disconn_array[first_chain] = 0; 858 data->disconn_array[first_chain] = 0;
858 active_chains |= BIT(first_chain); 859 active_chains |= BIT(first_chain);
859 IWL_DEBUG_CALIB(priv, 860 IWL_DEBUG_CALIB(priv,
@@ -863,13 +864,13 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
863 } 864 }
864 } 865 }
865 866
866 if (active_chains != priv->hw_params.valid_rx_ant && 867 if (active_chains != priv->eeprom_data->valid_rx_ant &&
867 active_chains != priv->chain_noise_data.active_chains) 868 active_chains != priv->chain_noise_data.active_chains)
868 IWL_DEBUG_CALIB(priv, 869 IWL_DEBUG_CALIB(priv,
869 "Detected that not all antennas are connected! " 870 "Detected that not all antennas are connected! "
870 "Connected: %#x, valid: %#x.\n", 871 "Connected: %#x, valid: %#x.\n",
871 active_chains, 872 active_chains,
872 priv->hw_params.valid_rx_ant); 873 priv->eeprom_data->valid_rx_ant);
873 874
874 /* Save for use within RXON, TX, SCAN commands, etc. */ 875 /* Save for use within RXON, TX, SCAN commands, etc. */
875 data->active_chains = active_chains; 876 data->active_chains = active_chains;
@@ -1054,7 +1055,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
1054 priv->cfg->bt_params->advanced_bt_coexist) { 1055 priv->cfg->bt_params->advanced_bt_coexist) {
1055 /* Disable disconnected antenna algorithm for advanced 1056 /* Disable disconnected antenna algorithm for advanced
1056 bt coex, assuming valid antennas are connected */ 1057 bt coex, assuming valid antennas are connected */
1057 data->active_chains = priv->hw_params.valid_rx_ant; 1058 data->active_chains = priv->eeprom_data->valid_rx_ant;
1058 for (i = 0; i < NUM_RX_CHAINS; i++) 1059 for (i = 0; i < NUM_RX_CHAINS; i++)
1059 if (!(data->active_chains & (1<<i))) 1060 if (!(data->active_chains & (1<<i)))
1060 data->disconn_array[i] = 1; 1061 data->disconn_array[i] = 1;
@@ -1083,8 +1084,9 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
1083 IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n", 1084 IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
1084 min_average_noise, min_average_noise_antenna_i); 1085 min_average_noise, min_average_noise_antenna_i);
1085 1086
1086 iwlagn_gain_computation(priv, average_noise, 1087 iwlagn_gain_computation(
1087 find_first_chain(priv->hw_params.valid_rx_ant)); 1088 priv, average_noise,
1089 find_first_chain(priv->eeprom_data->valid_rx_ant));
1088 1090
1089 /* Some power changes may have been made during the calibration. 1091 /* Some power changes may have been made during the calibration.
1090 * Update and commit the RXON 1092 * Update and commit the RXON
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h b/drivers/net/wireless/iwlwifi/dvm/calib.h
index dbe13787f27..2349f393cc4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.h
@@ -62,8 +62,8 @@
62#ifndef __iwl_calib_h__ 62#ifndef __iwl_calib_h__
63#define __iwl_calib_h__ 63#define __iwl_calib_h__
64 64
65#include "iwl-dev.h" 65#include "dev.h"
66#include "iwl-commands.h" 66#include "commands.h"
67 67
68void iwl_chain_noise_calibration(struct iwl_priv *priv); 68void iwl_chain_noise_calibration(struct iwl_priv *priv);
69void iwl_sensitivity_calibration(struct iwl_priv *priv); 69void iwl_sensitivity_calibration(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index 9af6a239b38..64811cd9163 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -61,9 +61,9 @@
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63/* 63/*
64 * Please use this file (iwl-commands.h) only for uCode API definitions. 64 * Please use this file (commands.h) only for uCode API definitions.
65 * Please use iwl-xxxx-hw.h for hardware-related definitions. 65 * Please use iwl-xxxx-hw.h for hardware-related definitions.
66 * Please use iwl-dev.h for driver implementation definitions. 66 * Please use dev.h for driver implementation definitions.
67 */ 67 */
68 68
69#ifndef __iwl_commands_h__ 69#ifndef __iwl_commands_h__
@@ -197,9 +197,6 @@ enum {
197 * 197 *
198 *****************************************************************************/ 198 *****************************************************************************/
199 199
200/* iwl_cmd_header flags value */
201#define IWL_CMD_FAILED_MSK 0x40
202
203/** 200/**
204 * iwlagn rate_n_flags bit fields 201 * iwlagn rate_n_flags bit fields
205 * 202 *
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 7f97dec8534..b0eff1c340c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -30,16 +30,12 @@
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/debugfs.h> 32#include <linux/debugfs.h>
33
34#include <linux/ieee80211.h> 33#include <linux/ieee80211.h>
35#include <net/mac80211.h> 34#include <net/mac80211.h>
36
37
38#include "iwl-dev.h"
39#include "iwl-debug.h" 35#include "iwl-debug.h"
40#include "iwl-io.h" 36#include "iwl-io.h"
41#include "iwl-agn.h" 37#include "dev.h"
42#include "iwl-modparams.h" 38#include "agn.h"
43 39
44/* create and remove of files */ 40/* create and remove of files */
45#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ 41#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
@@ -307,13 +303,13 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
307 const u8 *ptr; 303 const u8 *ptr;
308 char *buf; 304 char *buf;
309 u16 eeprom_ver; 305 u16 eeprom_ver;
310 size_t eeprom_len = priv->cfg->base_params->eeprom_size; 306 size_t eeprom_len = priv->eeprom_blob_size;
311 buf_size = 4 * eeprom_len + 256; 307 buf_size = 4 * eeprom_len + 256;
312 308
313 if (eeprom_len % 16) 309 if (eeprom_len % 16)
314 return -ENODATA; 310 return -ENODATA;
315 311
316 ptr = priv->eeprom; 312 ptr = priv->eeprom_blob;
317 if (!ptr) 313 if (!ptr)
318 return -ENOMEM; 314 return -ENOMEM;
319 315
@@ -322,11 +318,9 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
322 if (!buf) 318 if (!buf)
323 return -ENOMEM; 319 return -ENOMEM;
324 320
325 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION); 321 eeprom_ver = priv->eeprom_data->eeprom_version;
326 pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, " 322 pos += scnprintf(buf + pos, buf_size - pos,
327 "version: 0x%x\n", 323 "NVM version: 0x%x\n", eeprom_ver);
328 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
329 ? "OTP" : "EEPROM", eeprom_ver);
330 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) { 324 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
331 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); 325 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
332 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos, 326 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
@@ -351,9 +345,6 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
351 char *buf; 345 char *buf;
352 ssize_t ret; 346 ssize_t ret;
353 347
354 if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
355 return -EAGAIN;
356
357 buf = kzalloc(bufsz, GFP_KERNEL); 348 buf = kzalloc(bufsz, GFP_KERNEL);
358 if (!buf) 349 if (!buf)
359 return -ENOMEM; 350 return -ENOMEM;
@@ -426,8 +417,6 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
426 test_bit(STATUS_ALIVE, &priv->status)); 417 test_bit(STATUS_ALIVE, &priv->status));
427 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n", 418 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
428 test_bit(STATUS_READY, &priv->status)); 419 test_bit(STATUS_READY, &priv->status));
429 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
430 test_bit(STATUS_GEO_CONFIGURED, &priv->status));
431 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n", 420 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
432 test_bit(STATUS_EXIT_PENDING, &priv->status)); 421 test_bit(STATUS_EXIT_PENDING, &priv->status));
433 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n", 422 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
@@ -1341,17 +1330,17 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1341 if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) { 1330 if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
1342 pos += scnprintf(buf + pos, bufsz - pos, 1331 pos += scnprintf(buf + pos, bufsz - pos,
1343 "tx power: (1/2 dB step)\n"); 1332 "tx power: (1/2 dB step)\n");
1344 if ((priv->hw_params.valid_tx_ant & ANT_A) && 1333 if ((priv->eeprom_data->valid_tx_ant & ANT_A) &&
1345 tx->tx_power.ant_a) 1334 tx->tx_power.ant_a)
1346 pos += scnprintf(buf + pos, bufsz - pos, 1335 pos += scnprintf(buf + pos, bufsz - pos,
1347 fmt_hex, "antenna A:", 1336 fmt_hex, "antenna A:",
1348 tx->tx_power.ant_a); 1337 tx->tx_power.ant_a);
1349 if ((priv->hw_params.valid_tx_ant & ANT_B) && 1338 if ((priv->eeprom_data->valid_tx_ant & ANT_B) &&
1350 tx->tx_power.ant_b) 1339 tx->tx_power.ant_b)
1351 pos += scnprintf(buf + pos, bufsz - pos, 1340 pos += scnprintf(buf + pos, bufsz - pos,
1352 fmt_hex, "antenna B:", 1341 fmt_hex, "antenna B:",
1353 tx->tx_power.ant_b); 1342 tx->tx_power.ant_b);
1354 if ((priv->hw_params.valid_tx_ant & ANT_C) && 1343 if ((priv->eeprom_data->valid_tx_ant & ANT_C) &&
1355 tx->tx_power.ant_c) 1344 tx->tx_power.ant_c)
1356 pos += scnprintf(buf + pos, bufsz - pos, 1345 pos += scnprintf(buf + pos, bufsz - pos,
1357 fmt_hex, "antenna C:", 1346 fmt_hex, "antenna C:",
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 70062379d0e..54cf085ddc8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -24,8 +24,8 @@
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
26/* 26/*
27 * Please use this file (iwl-dev.h) for driver implementation definitions. 27 * Please use this file (dev.h) for driver implementation definitions.
28 * Please use iwl-commands.h for uCode API definitions. 28 * Please use commands.h for uCode API definitions.
29 */ 29 */
30 30
31#ifndef __iwl_dev_h__ 31#ifndef __iwl_dev_h__
@@ -39,17 +39,20 @@
39#include <linux/mutex.h> 39#include <linux/mutex.h>
40 40
41#include "iwl-fw.h" 41#include "iwl-fw.h"
42#include "iwl-eeprom.h" 42#include "iwl-eeprom-parse.h"
43#include "iwl-csr.h" 43#include "iwl-csr.h"
44#include "iwl-debug.h" 44#include "iwl-debug.h"
45#include "iwl-agn-hw.h" 45#include "iwl-agn-hw.h"
46#include "iwl-led.h"
47#include "iwl-power.h"
48#include "iwl-agn-rs.h"
49#include "iwl-agn-tt.h"
50#include "iwl-trans.h"
51#include "iwl-op-mode.h" 46#include "iwl-op-mode.h"
52#include "iwl-notif-wait.h" 47#include "iwl-notif-wait.h"
48#include "iwl-trans.h"
49
50#include "led.h"
51#include "power.h"
52#include "rs.h"
53#include "tt.h"
54
55#include "iwl-test.h"
53 56
54/* CT-KILL constants */ 57/* CT-KILL constants */
55#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */ 58#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
@@ -88,33 +91,6 @@
88#define IWL_NUM_SCAN_RATES (2) 91#define IWL_NUM_SCAN_RATES (2)
89 92
90/* 93/*
91 * One for each channel, holds all channel setup data
92 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
93 * with one another!
94 */
95struct iwl_channel_info {
96 struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
97 struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
98 * HT40 channel */
99
100 u8 channel; /* channel number */
101 u8 flags; /* flags copied from EEPROM */
102 s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
103 s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
104 s8 min_power; /* always 0 */
105 s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
106
107 u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
108 u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
109 enum ieee80211_band band;
110
111 /* HT40 channel info */
112 s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
113 u8 ht40_flags; /* flags copied from EEPROM */
114 u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
115};
116
117/*
118 * Minimum number of queues. MAX_NUM is defined in hw specific files. 94 * Minimum number of queues. MAX_NUM is defined in hw specific files.
119 * Set the minimum to accommodate 95 * Set the minimum to accommodate
120 * - 4 standard TX queues 96 * - 4 standard TX queues
@@ -153,29 +129,6 @@ union iwl_ht_rate_supp {
153 }; 129 };
154}; 130};
155 131
156#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
157#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
158#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
159#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
160#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
161#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
162#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
163
164/*
165 * Maximal MPDU density for TX aggregation
166 * 4 - 2us density
167 * 5 - 4us density
168 * 6 - 8us density
169 * 7 - 16us density
170 */
171#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
172#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
173#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
174#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
175#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
176#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
177#define CFG_HT_MPDU_DENSITY_MIN (0x1)
178
179struct iwl_ht_config { 132struct iwl_ht_config {
180 bool single_chain_sufficient; 133 bool single_chain_sufficient;
181 enum ieee80211_smps_mode smps; /* current smps mode */ 134 enum ieee80211_smps_mode smps; /* current smps mode */
@@ -445,23 +398,6 @@ enum {
445 MEASUREMENT_ACTIVE = (1 << 1), 398 MEASUREMENT_ACTIVE = (1 << 1),
446}; 399};
447 400
448enum iwl_nvm_type {
449 NVM_DEVICE_TYPE_EEPROM = 0,
450 NVM_DEVICE_TYPE_OTP,
451};
452
453/*
454 * Two types of OTP memory access modes
455 * IWL_OTP_ACCESS_ABSOLUTE - absolute address mode,
456 * based on physical memory addressing
457 * IWL_OTP_ACCESS_RELATIVE - relative address mode,
458 * based on logical memory addressing
459 */
460enum iwl_access_mode {
461 IWL_OTP_ACCESS_ABSOLUTE,
462 IWL_OTP_ACCESS_RELATIVE,
463};
464
465/* reply_tx_statistics (for _agn devices) */ 401/* reply_tx_statistics (for _agn devices) */
466struct reply_tx_error_statistics { 402struct reply_tx_error_statistics {
467 u32 pp_delay; 403 u32 pp_delay;
@@ -632,10 +568,6 @@ enum iwl_scan_type {
632 * 568 *
633 * @tx_chains_num: Number of TX chains 569 * @tx_chains_num: Number of TX chains
634 * @rx_chains_num: Number of RX chains 570 * @rx_chains_num: Number of RX chains
635 * @valid_tx_ant: usable antennas for TX
636 * @valid_rx_ant: usable antennas for RX
637 * @ht40_channel: is 40MHz width possible: BIT(IEEE80211_BAND_XXX)
638 * @sku: sku read from EEPROM
639 * @ct_kill_threshold: temperature threshold - in hw dependent unit 571 * @ct_kill_threshold: temperature threshold - in hw dependent unit
640 * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit 572 * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
641 * relevant for 1000, 6000 and up 573 * relevant for 1000, 6000 and up
@@ -645,11 +577,7 @@ enum iwl_scan_type {
645struct iwl_hw_params { 577struct iwl_hw_params {
646 u8 tx_chains_num; 578 u8 tx_chains_num;
647 u8 rx_chains_num; 579 u8 rx_chains_num;
648 u8 valid_tx_ant;
649 u8 valid_rx_ant;
650 u8 ht40_channel;
651 bool use_rts_for_aggregation; 580 bool use_rts_for_aggregation;
652 u16 sku;
653 u32 ct_kill_threshold; 581 u32 ct_kill_threshold;
654 u32 ct_kill_exit_threshold; 582 u32 ct_kill_exit_threshold;
655 583
@@ -664,31 +592,10 @@ struct iwl_lib_ops {
664 /* device specific configuration */ 592 /* device specific configuration */
665 void (*nic_config)(struct iwl_priv *priv); 593 void (*nic_config)(struct iwl_priv *priv);
666 594
667 /* eeprom operations (as defined in iwl-eeprom.h) */
668 struct iwl_eeprom_ops eeprom_ops;
669
670 /* temperature */ 595 /* temperature */
671 void (*temperature)(struct iwl_priv *priv); 596 void (*temperature)(struct iwl_priv *priv);
672}; 597};
673 598
674#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
675struct iwl_testmode_trace {
676 u32 buff_size;
677 u32 total_size;
678 u32 num_chunks;
679 u8 *cpu_addr;
680 u8 *trace_addr;
681 dma_addr_t dma_addr;
682 bool trace_enabled;
683};
684struct iwl_testmode_mem {
685 u32 buff_size;
686 u32 num_chunks;
687 u8 *buff_addr;
688 bool read_in_progress;
689};
690#endif
691
692struct iwl_wipan_noa_data { 599struct iwl_wipan_noa_data {
693 struct rcu_head rcu_head; 600 struct rcu_head rcu_head;
694 u32 length; 601 u32 length;
@@ -735,8 +642,6 @@ struct iwl_priv {
735 642
736 /* ieee device used by generic ieee processing code */ 643 /* ieee device used by generic ieee processing code */
737 struct ieee80211_hw *hw; 644 struct ieee80211_hw *hw;
738 struct ieee80211_channel *ieee_channels;
739 struct ieee80211_rate *ieee_rates;
740 645
741 struct list_head calib_results; 646 struct list_head calib_results;
742 647
@@ -747,16 +652,12 @@ struct iwl_priv {
747 enum ieee80211_band band; 652 enum ieee80211_band band;
748 u8 valid_contexts; 653 u8 valid_contexts;
749 654
750 void (*pre_rx_handler)(struct iwl_priv *priv,
751 struct iwl_rx_cmd_buffer *rxb);
752 int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, 655 int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
753 struct iwl_rx_cmd_buffer *rxb, 656 struct iwl_rx_cmd_buffer *rxb,
754 struct iwl_device_cmd *cmd); 657 struct iwl_device_cmd *cmd);
755 658
756 struct iwl_notif_wait_data notif_wait; 659 struct iwl_notif_wait_data notif_wait;
757 660
758 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
759
760 /* spectrum measurement report caching */ 661 /* spectrum measurement report caching */
761 struct iwl_spectrum_notification measure_report; 662 struct iwl_spectrum_notification measure_report;
762 u8 measurement_status; 663 u8 measurement_status;
@@ -787,11 +688,6 @@ struct iwl_priv {
787 bool ucode_loaded; 688 bool ucode_loaded;
788 bool init_ucode_run; /* Don't run init uCode again */ 689 bool init_ucode_run; /* Don't run init uCode again */
789 690
790 /* we allocate array of iwl_channel_info for NIC's valid channels.
791 * Access via channel # using indirect index array */
792 struct iwl_channel_info *channel_info; /* channel info array */
793 u8 channel_count; /* # of channels */
794
795 u8 plcp_delta_threshold; 691 u8 plcp_delta_threshold;
796 692
797 /* thermal calibration */ 693 /* thermal calibration */
@@ -846,6 +742,7 @@ struct iwl_priv {
846 struct iwl_station_entry stations[IWLAGN_STATION_COUNT]; 742 struct iwl_station_entry stations[IWLAGN_STATION_COUNT];
847 unsigned long ucode_key_table; 743 unsigned long ucode_key_table;
848 struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT]; 744 struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
745 atomic_t num_aux_in_flight;
849 746
850 u8 mac80211_registered; 747 u8 mac80211_registered;
851 748
@@ -950,10 +847,8 @@ struct iwl_priv {
950 847
951 struct delayed_work scan_check; 848 struct delayed_work scan_check;
952 849
953 /* TX Power */ 850 /* TX Power settings */
954 s8 tx_power_user_lmt; 851 s8 tx_power_user_lmt;
955 s8 tx_power_device_lmt;
956 s8 tx_power_lmt_in_half_dbm; /* max tx power in half-dBm format */
957 s8 tx_power_next; 852 s8 tx_power_next;
958 853
959#ifdef CONFIG_IWLWIFI_DEBUGFS 854#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -964,9 +859,10 @@ struct iwl_priv {
964 void *wowlan_sram; 859 void *wowlan_sram;
965#endif /* CONFIG_IWLWIFI_DEBUGFS */ 860#endif /* CONFIG_IWLWIFI_DEBUGFS */
966 861
967 /* eeprom -- this is in the card's little endian byte order */ 862 struct iwl_eeprom_data *eeprom_data;
968 u8 *eeprom; 863 /* eeprom blob for debugfs/testmode */
969 enum iwl_nvm_type nvm_device_type; 864 u8 *eeprom_blob;
865 size_t eeprom_blob_size;
970 866
971 struct work_struct txpower_work; 867 struct work_struct txpower_work;
972 u32 calib_disabled; 868 u32 calib_disabled;
@@ -979,9 +875,9 @@ struct iwl_priv {
979 struct led_classdev led; 875 struct led_classdev led;
980 unsigned long blink_on, blink_off; 876 unsigned long blink_on, blink_off;
981 bool led_registered; 877 bool led_registered;
878
982#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE 879#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
983 struct iwl_testmode_trace testmode_trace; 880 struct iwl_test tst;
984 struct iwl_testmode_mem testmode_mem;
985 u32 tm_fixed_rate; 881 u32 tm_fixed_rate;
986#endif 882#endif
987 883
@@ -1001,8 +897,6 @@ struct iwl_priv {
1001 enum iwl_ucode_type cur_ucode; 897 enum iwl_ucode_type cur_ucode;
1002}; /*iwl_priv */ 898}; /*iwl_priv */
1003 899
1004extern struct kmem_cache *iwl_tx_cmd_pool;
1005
1006static inline struct iwl_rxon_context * 900static inline struct iwl_rxon_context *
1007iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif) 901iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
1008{ 902{
@@ -1036,36 +930,4 @@ static inline int iwl_is_any_associated(struct iwl_priv *priv)
1036 return false; 930 return false;
1037} 931}
1038 932
1039static inline int is_channel_valid(const struct iwl_channel_info *ch_info)
1040{
1041 if (ch_info == NULL)
1042 return 0;
1043 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
1044}
1045
1046static inline int is_channel_radar(const struct iwl_channel_info *ch_info)
1047{
1048 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
1049}
1050
1051static inline u8 is_channel_a_band(const struct iwl_channel_info *ch_info)
1052{
1053 return ch_info->band == IEEE80211_BAND_5GHZ;
1054}
1055
1056static inline u8 is_channel_bg_band(const struct iwl_channel_info *ch_info)
1057{
1058 return ch_info->band == IEEE80211_BAND_2GHZ;
1059}
1060
1061static inline int is_channel_passive(const struct iwl_channel_info *ch)
1062{
1063 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
1064}
1065
1066static inline int is_channel_ibss(const struct iwl_channel_info *ch)
1067{
1068 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
1069}
1070
1071#endif /* __iwl_dev_h__ */ 933#endif /* __iwl_dev_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index 48533b3a0f9..349c205d5f6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -27,11 +27,14 @@
27/* 27/*
28 * DVM device-specific data & functions 28 * DVM device-specific data & functions
29 */ 29 */
30#include "iwl-agn.h"
31#include "iwl-dev.h"
32#include "iwl-commands.h"
33#include "iwl-io.h" 30#include "iwl-io.h"
34#include "iwl-prph.h" 31#include "iwl-prph.h"
32#include "iwl-eeprom-parse.h"
33
34#include "agn.h"
35#include "dev.h"
36#include "commands.h"
37
35 38
36/* 39/*
37 * 1000 series 40 * 1000 series
@@ -58,11 +61,6 @@ static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
58/* NIC configuration for 1000 series */ 61/* NIC configuration for 1000 series */
59static void iwl1000_nic_config(struct iwl_priv *priv) 62static void iwl1000_nic_config(struct iwl_priv *priv)
60{ 63{
61 /* set CSR_HW_CONFIG_REG for uCode use */
62 iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
63 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
64 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
65
66 /* Setting digital SVR for 1000 card to 1.32V */ 64 /* Setting digital SVR for 1000 card to 1.32V */
67 /* locking is acquired in iwl_set_bits_mask_prph() function */ 65 /* locking is acquired in iwl_set_bits_mask_prph() function */
68 iwl_set_bits_mask_prph(priv->trans, APMG_DIGITAL_SVR_REG, 66 iwl_set_bits_mask_prph(priv->trans, APMG_DIGITAL_SVR_REG,
@@ -170,16 +168,6 @@ static const struct iwl_sensitivity_ranges iwl1000_sensitivity = {
170 168
171static void iwl1000_hw_set_hw_params(struct iwl_priv *priv) 169static void iwl1000_hw_set_hw_params(struct iwl_priv *priv)
172{ 170{
173 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ);
174
175 priv->hw_params.tx_chains_num =
176 num_of_ant(priv->hw_params.valid_tx_ant);
177 if (priv->cfg->rx_with_siso_diversity)
178 priv->hw_params.rx_chains_num = 1;
179 else
180 priv->hw_params.rx_chains_num =
181 num_of_ant(priv->hw_params.valid_rx_ant);
182
183 iwl1000_set_ct_threshold(priv); 171 iwl1000_set_ct_threshold(priv);
184 172
185 /* Set initial sensitivity parameters */ 173 /* Set initial sensitivity parameters */
@@ -189,17 +177,6 @@ static void iwl1000_hw_set_hw_params(struct iwl_priv *priv)
189struct iwl_lib_ops iwl1000_lib = { 177struct iwl_lib_ops iwl1000_lib = {
190 .set_hw_params = iwl1000_hw_set_hw_params, 178 .set_hw_params = iwl1000_hw_set_hw_params,
191 .nic_config = iwl1000_nic_config, 179 .nic_config = iwl1000_nic_config,
192 .eeprom_ops = {
193 .regulatory_bands = {
194 EEPROM_REG_BAND_1_CHANNELS,
195 EEPROM_REG_BAND_2_CHANNELS,
196 EEPROM_REG_BAND_3_CHANNELS,
197 EEPROM_REG_BAND_4_CHANNELS,
198 EEPROM_REG_BAND_5_CHANNELS,
199 EEPROM_REG_BAND_24_HT40_CHANNELS,
200 EEPROM_REGULATORY_BAND_NO_HT40,
201 },
202 },
203 .temperature = iwlagn_temperature, 180 .temperature = iwlagn_temperature,
204}; 181};
205 182
@@ -219,8 +196,6 @@ static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
219/* NIC configuration for 2000 series */ 196/* NIC configuration for 2000 series */
220static void iwl2000_nic_config(struct iwl_priv *priv) 197static void iwl2000_nic_config(struct iwl_priv *priv)
221{ 198{
222 iwl_rf_config(priv);
223
224 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, 199 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
225 CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER); 200 CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
226} 201}
@@ -251,16 +226,6 @@ static const struct iwl_sensitivity_ranges iwl2000_sensitivity = {
251 226
252static void iwl2000_hw_set_hw_params(struct iwl_priv *priv) 227static void iwl2000_hw_set_hw_params(struct iwl_priv *priv)
253{ 228{
254 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ);
255
256 priv->hw_params.tx_chains_num =
257 num_of_ant(priv->hw_params.valid_tx_ant);
258 if (priv->cfg->rx_with_siso_diversity)
259 priv->hw_params.rx_chains_num = 1;
260 else
261 priv->hw_params.rx_chains_num =
262 num_of_ant(priv->hw_params.valid_rx_ant);
263
264 iwl2000_set_ct_threshold(priv); 229 iwl2000_set_ct_threshold(priv);
265 230
266 /* Set initial sensitivity parameters */ 231 /* Set initial sensitivity parameters */
@@ -270,36 +235,12 @@ static void iwl2000_hw_set_hw_params(struct iwl_priv *priv)
270struct iwl_lib_ops iwl2000_lib = { 235struct iwl_lib_ops iwl2000_lib = {
271 .set_hw_params = iwl2000_hw_set_hw_params, 236 .set_hw_params = iwl2000_hw_set_hw_params,
272 .nic_config = iwl2000_nic_config, 237 .nic_config = iwl2000_nic_config,
273 .eeprom_ops = {
274 .regulatory_bands = {
275 EEPROM_REG_BAND_1_CHANNELS,
276 EEPROM_REG_BAND_2_CHANNELS,
277 EEPROM_REG_BAND_3_CHANNELS,
278 EEPROM_REG_BAND_4_CHANNELS,
279 EEPROM_REG_BAND_5_CHANNELS,
280 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
281 EEPROM_REGULATORY_BAND_NO_HT40,
282 },
283 .enhanced_txpower = true,
284 },
285 .temperature = iwlagn_temperature, 238 .temperature = iwlagn_temperature,
286}; 239};
287 240
288struct iwl_lib_ops iwl2030_lib = { 241struct iwl_lib_ops iwl2030_lib = {
289 .set_hw_params = iwl2000_hw_set_hw_params, 242 .set_hw_params = iwl2000_hw_set_hw_params,
290 .nic_config = iwl2000_nic_config, 243 .nic_config = iwl2000_nic_config,
291 .eeprom_ops = {
292 .regulatory_bands = {
293 EEPROM_REG_BAND_1_CHANNELS,
294 EEPROM_REG_BAND_2_CHANNELS,
295 EEPROM_REG_BAND_3_CHANNELS,
296 EEPROM_REG_BAND_4_CHANNELS,
297 EEPROM_REG_BAND_5_CHANNELS,
298 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
299 EEPROM_REGULATORY_BAND_NO_HT40,
300 },
301 .enhanced_txpower = true,
302 },
303 .temperature = iwlagn_temperature, 244 .temperature = iwlagn_temperature,
304}; 245};
305 246
@@ -309,19 +250,6 @@ struct iwl_lib_ops iwl2030_lib = {
309 */ 250 */
310 251
311/* NIC configuration for 5000 series */ 252/* NIC configuration for 5000 series */
312static void iwl5000_nic_config(struct iwl_priv *priv)
313{
314 iwl_rf_config(priv);
315
316 /* W/A : NIC is stuck in a reset state after Early PCIe power off
317 * (PCIe power is lost before PERST# is asserted),
318 * causing ME FW to lose ownership and not being able to obtain it back.
319 */
320 iwl_set_bits_mask_prph(priv->trans, APMG_PS_CTRL_REG,
321 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
322 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
323}
324
325static const struct iwl_sensitivity_ranges iwl5000_sensitivity = { 253static const struct iwl_sensitivity_ranges iwl5000_sensitivity = {
326 .min_nrg_cck = 100, 254 .min_nrg_cck = 100,
327 .auto_corr_min_ofdm = 90, 255 .auto_corr_min_ofdm = 90,
@@ -376,11 +304,9 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
376static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv) 304static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
377{ 305{
378 u16 temperature, voltage; 306 u16 temperature, voltage;
379 __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(priv,
380 EEPROM_KELVIN_TEMPERATURE);
381 307
382 temperature = le16_to_cpu(temp_calib[0]); 308 temperature = le16_to_cpu(priv->eeprom_data->kelvin_temperature);
383 voltage = le16_to_cpu(temp_calib[1]); 309 voltage = le16_to_cpu(priv->eeprom_data->kelvin_voltage);
384 310
385 /* offset = temp - volt / coeff */ 311 /* offset = temp - volt / coeff */
386 return (s32)(temperature - 312 return (s32)(temperature -
@@ -404,14 +330,6 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
404 330
405static void iwl5000_hw_set_hw_params(struct iwl_priv *priv) 331static void iwl5000_hw_set_hw_params(struct iwl_priv *priv)
406{ 332{
407 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
408 BIT(IEEE80211_BAND_5GHZ);
409
410 priv->hw_params.tx_chains_num =
411 num_of_ant(priv->hw_params.valid_tx_ant);
412 priv->hw_params.rx_chains_num =
413 num_of_ant(priv->hw_params.valid_rx_ant);
414
415 iwl5000_set_ct_threshold(priv); 333 iwl5000_set_ct_threshold(priv);
416 334
417 /* Set initial sensitivity parameters */ 335 /* Set initial sensitivity parameters */
@@ -420,14 +338,6 @@ static void iwl5000_hw_set_hw_params(struct iwl_priv *priv)
420 338
421static void iwl5150_hw_set_hw_params(struct iwl_priv *priv) 339static void iwl5150_hw_set_hw_params(struct iwl_priv *priv)
422{ 340{
423 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
424 BIT(IEEE80211_BAND_5GHZ);
425
426 priv->hw_params.tx_chains_num =
427 num_of_ant(priv->hw_params.valid_tx_ant);
428 priv->hw_params.rx_chains_num =
429 num_of_ant(priv->hw_params.valid_rx_ant);
430
431 iwl5150_set_ct_threshold(priv); 341 iwl5150_set_ct_threshold(priv);
432 342
433 /* Set initial sensitivity parameters */ 343 /* Set initial sensitivity parameters */
@@ -455,7 +365,6 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
455 */ 365 */
456 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 366 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
457 struct iwl5000_channel_switch_cmd cmd; 367 struct iwl5000_channel_switch_cmd cmd;
458 const struct iwl_channel_info *ch_info;
459 u32 switch_time_in_usec, ucode_switch_time; 368 u32 switch_time_in_usec, ucode_switch_time;
460 u16 ch; 369 u16 ch;
461 u32 tsf_low; 370 u32 tsf_low;
@@ -505,14 +414,7 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
505 } 414 }
506 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", 415 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
507 cmd.switch_time); 416 cmd.switch_time);
508 ch_info = iwl_get_channel_info(priv, priv->band, ch); 417 cmd.expect_beacon = ch_switch->channel->flags & IEEE80211_CHAN_RADAR;
509 if (ch_info)
510 cmd.expect_beacon = is_channel_radar(ch_info);
511 else {
512 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
513 ctx->active.channel, ch);
514 return -EFAULT;
515 }
516 418
517 return iwl_dvm_send_cmd(priv, &hcmd); 419 return iwl_dvm_send_cmd(priv, &hcmd);
518} 420}
@@ -520,36 +422,12 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
520struct iwl_lib_ops iwl5000_lib = { 422struct iwl_lib_ops iwl5000_lib = {
521 .set_hw_params = iwl5000_hw_set_hw_params, 423 .set_hw_params = iwl5000_hw_set_hw_params,
522 .set_channel_switch = iwl5000_hw_channel_switch, 424 .set_channel_switch = iwl5000_hw_channel_switch,
523 .nic_config = iwl5000_nic_config,
524 .eeprom_ops = {
525 .regulatory_bands = {
526 EEPROM_REG_BAND_1_CHANNELS,
527 EEPROM_REG_BAND_2_CHANNELS,
528 EEPROM_REG_BAND_3_CHANNELS,
529 EEPROM_REG_BAND_4_CHANNELS,
530 EEPROM_REG_BAND_5_CHANNELS,
531 EEPROM_REG_BAND_24_HT40_CHANNELS,
532 EEPROM_REG_BAND_52_HT40_CHANNELS
533 },
534 },
535 .temperature = iwlagn_temperature, 425 .temperature = iwlagn_temperature,
536}; 426};
537 427
538struct iwl_lib_ops iwl5150_lib = { 428struct iwl_lib_ops iwl5150_lib = {
539 .set_hw_params = iwl5150_hw_set_hw_params, 429 .set_hw_params = iwl5150_hw_set_hw_params,
540 .set_channel_switch = iwl5000_hw_channel_switch, 430 .set_channel_switch = iwl5000_hw_channel_switch,
541 .nic_config = iwl5000_nic_config,
542 .eeprom_ops = {
543 .regulatory_bands = {
544 EEPROM_REG_BAND_1_CHANNELS,
545 EEPROM_REG_BAND_2_CHANNELS,
546 EEPROM_REG_BAND_3_CHANNELS,
547 EEPROM_REG_BAND_4_CHANNELS,
548 EEPROM_REG_BAND_5_CHANNELS,
549 EEPROM_REG_BAND_24_HT40_CHANNELS,
550 EEPROM_REG_BAND_52_HT40_CHANNELS
551 },
552 },
553 .temperature = iwl5150_temperature, 431 .temperature = iwl5150_temperature,
554}; 432};
555 433
@@ -570,8 +448,6 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
570/* NIC configuration for 6000 series */ 448/* NIC configuration for 6000 series */
571static void iwl6000_nic_config(struct iwl_priv *priv) 449static void iwl6000_nic_config(struct iwl_priv *priv)
572{ 450{
573 iwl_rf_config(priv);
574
575 switch (priv->cfg->device_family) { 451 switch (priv->cfg->device_family) {
576 case IWL_DEVICE_FAMILY_6005: 452 case IWL_DEVICE_FAMILY_6005:
577 case IWL_DEVICE_FAMILY_6030: 453 case IWL_DEVICE_FAMILY_6030:
@@ -584,13 +460,13 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
584 break; 460 break;
585 case IWL_DEVICE_FAMILY_6050: 461 case IWL_DEVICE_FAMILY_6050:
586 /* Indicate calibration version to uCode. */ 462 /* Indicate calibration version to uCode. */
587 if (iwl_eeprom_calib_version(priv) >= 6) 463 if (priv->eeprom_data->calib_version >= 6)
588 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, 464 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
589 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); 465 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
590 break; 466 break;
591 case IWL_DEVICE_FAMILY_6150: 467 case IWL_DEVICE_FAMILY_6150:
592 /* Indicate calibration version to uCode. */ 468 /* Indicate calibration version to uCode. */
593 if (iwl_eeprom_calib_version(priv) >= 6) 469 if (priv->eeprom_data->calib_version >= 6)
594 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, 470 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
595 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); 471 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
596 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, 472 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
@@ -627,17 +503,6 @@ static const struct iwl_sensitivity_ranges iwl6000_sensitivity = {
627 503
628static void iwl6000_hw_set_hw_params(struct iwl_priv *priv) 504static void iwl6000_hw_set_hw_params(struct iwl_priv *priv)
629{ 505{
630 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
631 BIT(IEEE80211_BAND_5GHZ);
632
633 priv->hw_params.tx_chains_num =
634 num_of_ant(priv->hw_params.valid_tx_ant);
635 if (priv->cfg->rx_with_siso_diversity)
636 priv->hw_params.rx_chains_num = 1;
637 else
638 priv->hw_params.rx_chains_num =
639 num_of_ant(priv->hw_params.valid_rx_ant);
640
641 iwl6000_set_ct_threshold(priv); 506 iwl6000_set_ct_threshold(priv);
642 507
643 /* Set initial sensitivity parameters */ 508 /* Set initial sensitivity parameters */
@@ -654,7 +519,6 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
654 */ 519 */
655 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 520 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
656 struct iwl6000_channel_switch_cmd cmd; 521 struct iwl6000_channel_switch_cmd cmd;
657 const struct iwl_channel_info *ch_info;
658 u32 switch_time_in_usec, ucode_switch_time; 522 u32 switch_time_in_usec, ucode_switch_time;
659 u16 ch; 523 u16 ch;
660 u32 tsf_low; 524 u32 tsf_low;
@@ -704,14 +568,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
704 } 568 }
705 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", 569 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
706 cmd.switch_time); 570 cmd.switch_time);
707 ch_info = iwl_get_channel_info(priv, priv->band, ch); 571 cmd.expect_beacon = ch_switch->channel->flags & IEEE80211_CHAN_RADAR;
708 if (ch_info)
709 cmd.expect_beacon = is_channel_radar(ch_info);
710 else {
711 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
712 ctx->active.channel, ch);
713 return -EFAULT;
714 }
715 572
716 return iwl_dvm_send_cmd(priv, &hcmd); 573 return iwl_dvm_send_cmd(priv, &hcmd);
717} 574}
@@ -720,18 +577,6 @@ struct iwl_lib_ops iwl6000_lib = {
720 .set_hw_params = iwl6000_hw_set_hw_params, 577 .set_hw_params = iwl6000_hw_set_hw_params,
721 .set_channel_switch = iwl6000_hw_channel_switch, 578 .set_channel_switch = iwl6000_hw_channel_switch,
722 .nic_config = iwl6000_nic_config, 579 .nic_config = iwl6000_nic_config,
723 .eeprom_ops = {
724 .regulatory_bands = {
725 EEPROM_REG_BAND_1_CHANNELS,
726 EEPROM_REG_BAND_2_CHANNELS,
727 EEPROM_REG_BAND_3_CHANNELS,
728 EEPROM_REG_BAND_4_CHANNELS,
729 EEPROM_REG_BAND_5_CHANNELS,
730 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
731 EEPROM_REG_BAND_52_HT40_CHANNELS
732 },
733 .enhanced_txpower = true,
734 },
735 .temperature = iwlagn_temperature, 580 .temperature = iwlagn_temperature,
736}; 581};
737 582
@@ -739,17 +584,5 @@ struct iwl_lib_ops iwl6030_lib = {
739 .set_hw_params = iwl6000_hw_set_hw_params, 584 .set_hw_params = iwl6000_hw_set_hw_params,
740 .set_channel_switch = iwl6000_hw_channel_switch, 585 .set_channel_switch = iwl6000_hw_channel_switch,
741 .nic_config = iwl6000_nic_config, 586 .nic_config = iwl6000_nic_config,
742 .eeprom_ops = {
743 .regulatory_bands = {
744 EEPROM_REG_BAND_1_CHANNELS,
745 EEPROM_REG_BAND_2_CHANNELS,
746 EEPROM_REG_BAND_3_CHANNELS,
747 EEPROM_REG_BAND_4_CHANNELS,
748 EEPROM_REG_BAND_5_CHANNELS,
749 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
750 EEPROM_REG_BAND_52_HT40_CHANNELS
751 },
752 .enhanced_txpower = true,
753 },
754 .temperature = iwlagn_temperature, 587 .temperature = iwlagn_temperature,
755}; 588};
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/dvm/led.c
index 47000419f91..bf479f70909 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/dvm/led.c
@@ -34,12 +34,11 @@
34#include <net/mac80211.h> 34#include <net/mac80211.h>
35#include <linux/etherdevice.h> 35#include <linux/etherdevice.h>
36#include <asm/unaligned.h> 36#include <asm/unaligned.h>
37
38#include "iwl-dev.h"
39#include "iwl-agn.h"
40#include "iwl-io.h" 37#include "iwl-io.h"
41#include "iwl-trans.h" 38#include "iwl-trans.h"
42#include "iwl-modparams.h" 39#include "iwl-modparams.h"
40#include "dev.h"
41#include "agn.h"
43 42
44/* Throughput OFF time(ms) ON time (ms) 43/* Throughput OFF time(ms) ON time (ms)
45 * >300 25 25 44 * >300 25 25
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/dvm/led.h
index b02a853103d..b02a853103d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/dvm/led.h
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index e55ec6c8a92..207ae91a83a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -33,13 +33,14 @@
33#include <linux/sched.h> 33#include <linux/sched.h>
34#include <net/mac80211.h> 34#include <net/mac80211.h>
35 35
36#include "iwl-dev.h"
37#include "iwl-io.h" 36#include "iwl-io.h"
38#include "iwl-agn-hw.h" 37#include "iwl-agn-hw.h"
39#include "iwl-agn.h"
40#include "iwl-trans.h" 38#include "iwl-trans.h"
41#include "iwl-modparams.h" 39#include "iwl-modparams.h"
42 40
41#include "dev.h"
42#include "agn.h"
43
43int iwlagn_hw_valid_rtc_data_addr(u32 addr) 44int iwlagn_hw_valid_rtc_data_addr(u32 addr)
44{ 45{
45 return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) && 46 return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
@@ -58,8 +59,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
58 /* half dBm need to multiply */ 59 /* half dBm need to multiply */
59 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt); 60 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
60 61
61 if (priv->tx_power_lmt_in_half_dbm && 62 if (tx_power_cmd.global_lmt > priv->eeprom_data->max_tx_pwr_half_dbm) {
62 priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
63 /* 63 /*
64 * For the newer devices which using enhanced/extend tx power 64 * For the newer devices which using enhanced/extend tx power
65 * table in EEPROM, the format is in half dBm. driver need to 65 * table in EEPROM, the format is in half dBm. driver need to
@@ -71,7 +71,8 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
71 * "tx_power_user_lmt" is higher than EEPROM value (in 71 * "tx_power_user_lmt" is higher than EEPROM value (in
72 * half-dBm format), lower the tx power based on EEPROM 72 * half-dBm format), lower the tx power based on EEPROM
73 */ 73 */
74 tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm; 74 tx_power_cmd.global_lmt =
75 priv->eeprom_data->max_tx_pwr_half_dbm;
75 } 76 }
76 tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED; 77 tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED;
77 tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO; 78 tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO;
@@ -159,7 +160,7 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
159 IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK | 160 IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
160 IWL_PAN_SCD_MULTICAST_MSK; 161 IWL_PAN_SCD_MULTICAST_MSK;
161 162
162 if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE) 163 if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE)
163 flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK; 164 flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
164 165
165 IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n", 166 IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
@@ -617,6 +618,11 @@ static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv,
617 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 618 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
618 int ave_rssi; 619 int ave_rssi;
619 620
621 if (!ctx->vif || (ctx->vif->type != NL80211_IFTYPE_STATION)) {
622 IWL_DEBUG_INFO(priv, "BSS ctx not active or not in sta mode\n");
623 return false;
624 }
625
620 ave_rssi = ieee80211_ave_rssi(ctx->vif); 626 ave_rssi = ieee80211_ave_rssi(ctx->vif);
621 if (!ave_rssi) { 627 if (!ave_rssi) {
622 /* no rssi data, no changes to reduce tx power */ 628 /* no rssi data, no changes to reduce tx power */
@@ -818,7 +824,7 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
818 if (priv->chain_noise_data.active_chains) 824 if (priv->chain_noise_data.active_chains)
819 active_chains = priv->chain_noise_data.active_chains; 825 active_chains = priv->chain_noise_data.active_chains;
820 else 826 else
821 active_chains = priv->hw_params.valid_rx_ant; 827 active_chains = priv->eeprom_data->valid_rx_ant;
822 828
823 if (priv->cfg->bt_params && 829 if (priv->cfg->bt_params &&
824 priv->cfg->bt_params->advanced_bt_coexist && 830 priv->cfg->bt_params->advanced_bt_coexist &&
@@ -1259,7 +1265,7 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1259 * the mutex, this ensures we don't try to send two 1265 * the mutex, this ensures we don't try to send two
1260 * (or more) synchronous commands at a time. 1266 * (or more) synchronous commands at a time.
1261 */ 1267 */
1262 if (cmd->flags & CMD_SYNC) 1268 if (!(cmd->flags & CMD_ASYNC))
1263 lockdep_assert_held(&priv->mutex); 1269 lockdep_assert_held(&priv->mutex);
1264 1270
1265 if (priv->ucode_owner == IWL_OWNERSHIP_TM && 1271 if (priv->ucode_owner == IWL_OWNERSHIP_TM &&
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 013680332f0..a5f7bce9632 100644
--- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -38,19 +38,20 @@
38#include <linux/etherdevice.h> 38#include <linux/etherdevice.h>
39#include <linux/if_arp.h> 39#include <linux/if_arp.h>
40 40
41#include <net/ieee80211_radiotap.h>
41#include <net/mac80211.h> 42#include <net/mac80211.h>
42 43
43#include <asm/div64.h> 44#include <asm/div64.h>
44 45
45#include "iwl-eeprom.h"
46#include "iwl-dev.h"
47#include "iwl-io.h" 46#include "iwl-io.h"
48#include "iwl-agn-calib.h"
49#include "iwl-agn.h"
50#include "iwl-trans.h" 47#include "iwl-trans.h"
51#include "iwl-op-mode.h" 48#include "iwl-op-mode.h"
52#include "iwl-modparams.h" 49#include "iwl-modparams.h"
53 50
51#include "dev.h"
52#include "calib.h"
53#include "agn.h"
54
54/***************************************************************************** 55/*****************************************************************************
55 * 56 *
56 * mac80211 entry point functions 57 * mac80211 entry point functions
@@ -154,6 +155,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
154 IEEE80211_HW_SCAN_WHILE_IDLE; 155 IEEE80211_HW_SCAN_WHILE_IDLE;
155 156
156 hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE; 157 hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
158 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FMT;
157 159
158 /* 160 /*
159 * Including the following line will crash some AP's. This 161 * Including the following line will crash some AP's. This
@@ -162,7 +164,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
162 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; 164 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
163 */ 165 */
164 166
165 if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE) 167 if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE)
166 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | 168 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
167 IEEE80211_HW_SUPPORTS_STATIC_SMPS; 169 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
168 170
@@ -237,12 +239,12 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
237 239
238 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; 240 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
239 241
240 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) 242 if (priv->eeprom_data->bands[IEEE80211_BAND_2GHZ].n_channels)
241 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 243 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
242 &priv->bands[IEEE80211_BAND_2GHZ]; 244 &priv->eeprom_data->bands[IEEE80211_BAND_2GHZ];
243 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) 245 if (priv->eeprom_data->bands[IEEE80211_BAND_5GHZ].n_channels)
244 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 246 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
245 &priv->bands[IEEE80211_BAND_5GHZ]; 247 &priv->eeprom_data->bands[IEEE80211_BAND_5GHZ];
246 248
247 hw->wiphy->hw_version = priv->trans->hw_id; 249 hw->wiphy->hw_version = priv->trans->hw_id;
248 250
@@ -341,7 +343,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw)
341 return 0; 343 return 0;
342} 344}
343 345
344void iwlagn_mac_stop(struct ieee80211_hw *hw) 346static void iwlagn_mac_stop(struct ieee80211_hw *hw)
345{ 347{
346 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 348 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
347 349
@@ -369,9 +371,9 @@ void iwlagn_mac_stop(struct ieee80211_hw *hw)
369 IWL_DEBUG_MAC80211(priv, "leave\n"); 371 IWL_DEBUG_MAC80211(priv, "leave\n");
370} 372}
371 373
372void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw, 374static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
373 struct ieee80211_vif *vif, 375 struct ieee80211_vif *vif,
374 struct cfg80211_gtk_rekey_data *data) 376 struct cfg80211_gtk_rekey_data *data)
375{ 377{
376 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 378 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
377 379
@@ -397,7 +399,8 @@ void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
397 399
398#ifdef CONFIG_PM_SLEEP 400#ifdef CONFIG_PM_SLEEP
399 401
400int iwlagn_mac_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) 402static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
403 struct cfg80211_wowlan *wowlan)
401{ 404{
402 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 405 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
403 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 406 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
@@ -420,8 +423,6 @@ int iwlagn_mac_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
420 if (ret) 423 if (ret)
421 goto error; 424 goto error;
422 425
423 device_set_wakeup_enable(priv->trans->dev, true);
424
425 iwl_trans_wowlan_suspend(priv->trans); 426 iwl_trans_wowlan_suspend(priv->trans);
426 427
427 goto out; 428 goto out;
@@ -475,7 +476,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
475 } 476 }
476 477
477 if (priv->wowlan_sram) 478 if (priv->wowlan_sram)
478 _iwl_read_targ_mem_words( 479 _iwl_read_targ_mem_dwords(
479 priv->trans, 0x800000, 480 priv->trans, 0x800000,
480 priv->wowlan_sram, 481 priv->wowlan_sram,
481 img->sec[IWL_UCODE_SECTION_DATA].len / 4); 482 img->sec[IWL_UCODE_SECTION_DATA].len / 4);
@@ -488,8 +489,6 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
488 489
489 priv->wowlan = false; 490 priv->wowlan = false;
490 491
491 device_set_wakeup_enable(priv->trans->dev, false);
492
493 iwlagn_prepare_restart(priv); 492 iwlagn_prepare_restart(priv);
494 493
495 memset((void *)&ctx->active, 0, sizeof(ctx->active)); 494 memset((void *)&ctx->active, 0, sizeof(ctx->active));
@@ -504,9 +503,15 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
504 return 1; 503 return 1;
505} 504}
506 505
506static void iwlagn_mac_set_wakeup(struct ieee80211_hw *hw, bool enabled)
507{
508 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
509
510 device_set_wakeup_enable(priv->trans->dev, enabled);
511}
507#endif 512#endif
508 513
509void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 514static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
510{ 515{
511 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 516 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
512 517
@@ -517,21 +522,21 @@ void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
517 dev_kfree_skb_any(skb); 522 dev_kfree_skb_any(skb);
518} 523}
519 524
520void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, 525static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
521 struct ieee80211_vif *vif, 526 struct ieee80211_vif *vif,
522 struct ieee80211_key_conf *keyconf, 527 struct ieee80211_key_conf *keyconf,
523 struct ieee80211_sta *sta, 528 struct ieee80211_sta *sta,
524 u32 iv32, u16 *phase1key) 529 u32 iv32, u16 *phase1key)
525{ 530{
526 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 531 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
527 532
528 iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key); 533 iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
529} 534}
530 535
531int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 536static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
532 struct ieee80211_vif *vif, 537 struct ieee80211_vif *vif,
533 struct ieee80211_sta *sta, 538 struct ieee80211_sta *sta,
534 struct ieee80211_key_conf *key) 539 struct ieee80211_key_conf *key)
535{ 540{
536 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 541 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
537 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 542 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -631,11 +636,11 @@ int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
631 return ret; 636 return ret;
632} 637}
633 638
634int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, 639static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
635 struct ieee80211_vif *vif, 640 struct ieee80211_vif *vif,
636 enum ieee80211_ampdu_mlme_action action, 641 enum ieee80211_ampdu_mlme_action action,
637 struct ieee80211_sta *sta, u16 tid, u16 *ssn, 642 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
638 u8 buf_size) 643 u8 buf_size)
639{ 644{
640 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 645 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
641 int ret = -EINVAL; 646 int ret = -EINVAL;
@@ -644,7 +649,7 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
644 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", 649 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
645 sta->addr, tid); 650 sta->addr, tid);
646 651
647 if (!(priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)) 652 if (!(priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE))
648 return -EACCES; 653 return -EACCES;
649 654
650 IWL_DEBUG_MAC80211(priv, "enter\n"); 655 IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -662,7 +667,7 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
662 ret = iwl_sta_rx_agg_stop(priv, sta, tid); 667 ret = iwl_sta_rx_agg_stop(priv, sta, tid);
663 break; 668 break;
664 case IEEE80211_AMPDU_TX_START: 669 case IEEE80211_AMPDU_TX_START:
665 if (!priv->trans->ops->tx_agg_setup) 670 if (!priv->trans->ops->txq_enable)
666 break; 671 break;
667 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) 672 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
668 break; 673 break;
@@ -757,11 +762,11 @@ static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
757 return ret; 762 return ret;
758} 763}
759 764
760int iwlagn_mac_sta_state(struct ieee80211_hw *hw, 765static int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
761 struct ieee80211_vif *vif, 766 struct ieee80211_vif *vif,
762 struct ieee80211_sta *sta, 767 struct ieee80211_sta *sta,
763 enum ieee80211_sta_state old_state, 768 enum ieee80211_sta_state old_state,
764 enum ieee80211_sta_state new_state) 769 enum ieee80211_sta_state new_state)
765{ 770{
766 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 771 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
767 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 772 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -852,11 +857,10 @@ int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
852 return ret; 857 return ret;
853} 858}
854 859
855void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, 860static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
856 struct ieee80211_channel_switch *ch_switch) 861 struct ieee80211_channel_switch *ch_switch)
857{ 862{
858 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 863 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
859 const struct iwl_channel_info *ch_info;
860 struct ieee80211_conf *conf = &hw->conf; 864 struct ieee80211_conf *conf = &hw->conf;
861 struct ieee80211_channel *channel = ch_switch->channel; 865 struct ieee80211_channel *channel = ch_switch->channel;
862 struct iwl_ht_config *ht_conf = &priv->current_ht_config; 866 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
@@ -893,12 +897,6 @@ void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
893 if (le16_to_cpu(ctx->active.channel) == ch) 897 if (le16_to_cpu(ctx->active.channel) == ch)
894 goto out; 898 goto out;
895 899
896 ch_info = iwl_get_channel_info(priv, channel->band, ch);
897 if (!is_channel_valid(ch_info)) {
898 IWL_DEBUG_MAC80211(priv, "invalid channel\n");
899 goto out;
900 }
901
902 priv->current_ht_config.smps = conf->smps_mode; 900 priv->current_ht_config.smps = conf->smps_mode;
903 901
904 /* Configure HT40 channels */ 902 /* Configure HT40 channels */
@@ -947,10 +945,10 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
947 ieee80211_chswitch_done(ctx->vif, is_success); 945 ieee80211_chswitch_done(ctx->vif, is_success);
948} 946}
949 947
950void iwlagn_configure_filter(struct ieee80211_hw *hw, 948static void iwlagn_configure_filter(struct ieee80211_hw *hw,
951 unsigned int changed_flags, 949 unsigned int changed_flags,
952 unsigned int *total_flags, 950 unsigned int *total_flags,
953 u64 multicast) 951 u64 multicast)
954{ 952{
955 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 953 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
956 __le32 filter_or = 0, filter_nand = 0; 954 __le32 filter_or = 0, filter_nand = 0;
@@ -997,7 +995,7 @@ void iwlagn_configure_filter(struct ieee80211_hw *hw,
997 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; 995 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
998} 996}
999 997
1000void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop) 998static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
1001{ 999{
1002 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1000 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1003 1001
@@ -1050,8 +1048,18 @@ static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
1050 mutex_lock(&priv->mutex); 1048 mutex_lock(&priv->mutex);
1051 1049
1052 if (test_bit(STATUS_SCAN_HW, &priv->status)) { 1050 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
1053 err = -EBUSY; 1051 /* mac80211 should not scan while ROC or ROC while scanning */
1054 goto out; 1052 if (WARN_ON_ONCE(priv->scan_type != IWL_SCAN_RADIO_RESET)) {
1053 err = -EBUSY;
1054 goto out;
1055 }
1056
1057 iwl_scan_cancel_timeout(priv, 100);
1058
1059 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
1060 err = -EBUSY;
1061 goto out;
1062 }
1055 } 1063 }
1056 1064
1057 priv->hw_roc_channel = channel; 1065 priv->hw_roc_channel = channel;
@@ -1124,7 +1132,7 @@ static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
1124 return err; 1132 return err;
1125} 1133}
1126 1134
1127int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw) 1135static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
1128{ 1136{
1129 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1137 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1130 1138
@@ -1141,8 +1149,8 @@ int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
1141 return 0; 1149 return 0;
1142} 1150}
1143 1151
1144void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw, 1152static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
1145 enum ieee80211_rssi_event rssi_event) 1153 enum ieee80211_rssi_event rssi_event)
1146{ 1154{
1147 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1155 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1148 1156
@@ -1166,8 +1174,8 @@ void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
1166 IWL_DEBUG_MAC80211(priv, "leave\n"); 1174 IWL_DEBUG_MAC80211(priv, "leave\n");
1167} 1175}
1168 1176
1169int iwlagn_mac_set_tim(struct ieee80211_hw *hw, 1177static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
1170 struct ieee80211_sta *sta, bool set) 1178 struct ieee80211_sta *sta, bool set)
1171{ 1179{
1172 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1180 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1173 1181
@@ -1176,9 +1184,9 @@ int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
1176 return 0; 1184 return 0;
1177} 1185}
1178 1186
1179int iwlagn_mac_conf_tx(struct ieee80211_hw *hw, 1187static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
1180 struct ieee80211_vif *vif, u16 queue, 1188 struct ieee80211_vif *vif, u16 queue,
1181 const struct ieee80211_tx_queue_params *params) 1189 const struct ieee80211_tx_queue_params *params)
1182{ 1190{
1183 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1191 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1184 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 1192 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -1220,7 +1228,7 @@ int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
1220 return 0; 1228 return 0;
1221} 1229}
1222 1230
1223int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw) 1231static int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
1224{ 1232{
1225 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1233 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1226 1234
@@ -1236,7 +1244,8 @@ static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1236 return iwlagn_commit_rxon(priv, ctx); 1244 return iwlagn_commit_rxon(priv, ctx);
1237} 1245}
1238 1246
1239int iwl_setup_interface(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 1247static int iwl_setup_interface(struct iwl_priv *priv,
1248 struct iwl_rxon_context *ctx)
1240{ 1249{
1241 struct ieee80211_vif *vif = ctx->vif; 1250 struct ieee80211_vif *vif = ctx->vif;
1242 int err, ac; 1251 int err, ac;
@@ -1356,9 +1365,9 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
1356 return err; 1365 return err;
1357} 1366}
1358 1367
1359void iwl_teardown_interface(struct iwl_priv *priv, 1368static void iwl_teardown_interface(struct iwl_priv *priv,
1360 struct ieee80211_vif *vif, 1369 struct ieee80211_vif *vif,
1361 bool mode_change) 1370 bool mode_change)
1362{ 1371{
1363 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); 1372 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1364 1373
@@ -1414,13 +1423,11 @@ static void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
1414} 1423}
1415 1424
1416static int iwlagn_mac_change_interface(struct ieee80211_hw *hw, 1425static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
1417 struct ieee80211_vif *vif, 1426 struct ieee80211_vif *vif,
1418 enum nl80211_iftype newtype, bool newp2p) 1427 enum nl80211_iftype newtype, bool newp2p)
1419{ 1428{
1420 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1429 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1421 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); 1430 struct iwl_rxon_context *ctx, *tmp;
1422 struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1423 struct iwl_rxon_context *tmp;
1424 enum nl80211_iftype newviftype = newtype; 1431 enum nl80211_iftype newviftype = newtype;
1425 u32 interface_modes; 1432 u32 interface_modes;
1426 int err; 1433 int err;
@@ -1431,6 +1438,18 @@ static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
1431 1438
1432 mutex_lock(&priv->mutex); 1439 mutex_lock(&priv->mutex);
1433 1440
1441 ctx = iwl_rxon_ctx_from_vif(vif);
1442
1443 /*
1444 * To simplify this code, only support changes on the
1445 * BSS context. The PAN context is usually reassigned
1446 * by creating/removing P2P interfaces anyway.
1447 */
1448 if (ctx->ctxid != IWL_RXON_CTX_BSS) {
1449 err = -EBUSY;
1450 goto out;
1451 }
1452
1434 if (!ctx->vif || !iwl_is_ready_rf(priv)) { 1453 if (!ctx->vif || !iwl_is_ready_rf(priv)) {
1435 /* 1454 /*
1436 * Huh? But wait ... this can maybe happen when 1455 * Huh? But wait ... this can maybe happen when
@@ -1440,32 +1459,19 @@ static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
1440 goto out; 1459 goto out;
1441 } 1460 }
1442 1461
1462 /* Check if the switch is supported in the same context */
1443 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes; 1463 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
1444
1445 if (!(interface_modes & BIT(newtype))) { 1464 if (!(interface_modes & BIT(newtype))) {
1446 err = -EBUSY; 1465 err = -EBUSY;
1447 goto out; 1466 goto out;
1448 } 1467 }
1449 1468
1450 /*
1451 * Refuse a change that should be done by moving from the PAN
1452 * context to the BSS context instead, if the BSS context is
1453 * available and can support the new interface type.
1454 */
1455 if (ctx->ctxid == IWL_RXON_CTX_PAN && !bss_ctx->vif &&
1456 (bss_ctx->interface_modes & BIT(newtype) ||
1457 bss_ctx->exclusive_interface_modes & BIT(newtype))) {
1458 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
1459 err = -EBUSY;
1460 goto out;
1461 }
1462
1463 if (ctx->exclusive_interface_modes & BIT(newtype)) { 1469 if (ctx->exclusive_interface_modes & BIT(newtype)) {
1464 for_each_context(priv, tmp) { 1470 for_each_context(priv, tmp) {
1465 if (ctx == tmp) 1471 if (ctx == tmp)
1466 continue; 1472 continue;
1467 1473
1468 if (!tmp->vif) 1474 if (!tmp->is_active)
1469 continue; 1475 continue;
1470 1476
1471 /* 1477 /*
@@ -1499,9 +1505,9 @@ static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
1499 return err; 1505 return err;
1500} 1506}
1501 1507
1502int iwlagn_mac_hw_scan(struct ieee80211_hw *hw, 1508static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
1503 struct ieee80211_vif *vif, 1509 struct ieee80211_vif *vif,
1504 struct cfg80211_scan_request *req) 1510 struct cfg80211_scan_request *req)
1505{ 1511{
1506 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1512 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1507 int ret; 1513 int ret;
@@ -1556,10 +1562,10 @@ static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
1556 iwl_send_add_sta(priv, &cmd, CMD_ASYNC); 1562 iwl_send_add_sta(priv, &cmd, CMD_ASYNC);
1557} 1563}
1558 1564
1559void iwlagn_mac_sta_notify(struct ieee80211_hw *hw, 1565static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
1560 struct ieee80211_vif *vif, 1566 struct ieee80211_vif *vif,
1561 enum sta_notify_cmd cmd, 1567 enum sta_notify_cmd cmd,
1562 struct ieee80211_sta *sta) 1568 struct ieee80211_sta *sta)
1563{ 1569{
1564 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1570 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1565 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; 1571 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
@@ -1596,6 +1602,7 @@ struct ieee80211_ops iwlagn_hw_ops = {
1596#ifdef CONFIG_PM_SLEEP 1602#ifdef CONFIG_PM_SLEEP
1597 .suspend = iwlagn_mac_suspend, 1603 .suspend = iwlagn_mac_suspend,
1598 .resume = iwlagn_mac_resume, 1604 .resume = iwlagn_mac_resume,
1605 .set_wakeup = iwlagn_mac_set_wakeup,
1599#endif 1606#endif
1600 .add_interface = iwlagn_mac_add_interface, 1607 .add_interface = iwlagn_mac_add_interface,
1601 .remove_interface = iwlagn_mac_remove_interface, 1608 .remove_interface = iwlagn_mac_remove_interface,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index ec36e2b020b..abfd7916bde 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -44,15 +44,19 @@
44 44
45#include <asm/div64.h> 45#include <asm/div64.h>
46 46
47#include "iwl-eeprom.h" 47#include "iwl-eeprom-read.h"
48#include "iwl-dev.h" 48#include "iwl-eeprom-parse.h"
49#include "iwl-io.h" 49#include "iwl-io.h"
50#include "iwl-agn-calib.h"
51#include "iwl-agn.h"
52#include "iwl-trans.h" 50#include "iwl-trans.h"
53#include "iwl-op-mode.h" 51#include "iwl-op-mode.h"
54#include "iwl-drv.h" 52#include "iwl-drv.h"
55#include "iwl-modparams.h" 53#include "iwl-modparams.h"
54#include "iwl-prph.h"
55
56#include "dev.h"
57#include "calib.h"
58#include "agn.h"
59
56 60
57/****************************************************************************** 61/******************************************************************************
58 * 62 *
@@ -78,7 +82,8 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
78MODULE_VERSION(DRV_VERSION); 82MODULE_VERSION(DRV_VERSION);
79MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 83MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
80MODULE_LICENSE("GPL"); 84MODULE_LICENSE("GPL");
81MODULE_ALIAS("iwlagn"); 85
86static const struct iwl_op_mode_ops iwl_dvm_ops;
82 87
83void iwl_update_chain_flags(struct iwl_priv *priv) 88void iwl_update_chain_flags(struct iwl_priv *priv)
84{ 89{
@@ -180,7 +185,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
180 rate = info->control.rates[0].idx; 185 rate = info->control.rates[0].idx;
181 186
182 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 187 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
183 priv->hw_params.valid_tx_ant); 188 priv->eeprom_data->valid_tx_ant);
184 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant); 189 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
185 190
186 /* In mac80211, rates for 5 GHz start at 0 */ 191 /* In mac80211, rates for 5 GHz start at 0 */
@@ -403,7 +408,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
403 408
404 base = priv->device_pointers.log_event_table; 409 base = priv->device_pointers.log_event_table;
405 if (iwlagn_hw_valid_rtc_data_addr(base)) { 410 if (iwlagn_hw_valid_rtc_data_addr(base)) {
406 iwl_read_targ_mem_words(priv->trans, base, &read, sizeof(read)); 411 iwl_read_targ_mem_bytes(priv->trans, base, &read, sizeof(read));
407 capacity = read.capacity; 412 capacity = read.capacity;
408 mode = read.mode; 413 mode = read.mode;
409 num_wraps = read.wrap_counter; 414 num_wraps = read.wrap_counter;
@@ -578,7 +583,7 @@ static const u8 iwlagn_pan_ac_to_queue[] = {
578 7, 6, 5, 4, 583 7, 6, 5, 4,
579}; 584};
580 585
581void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags) 586static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
582{ 587{
583 int i; 588 int i;
584 589
@@ -645,7 +650,7 @@ void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
645 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); 650 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
646} 651}
647 652
648void iwl_rf_kill_ct_config(struct iwl_priv *priv) 653static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
649{ 654{
650 struct iwl_ct_kill_config cmd; 655 struct iwl_ct_kill_config cmd;
651 struct iwl_ct_kill_throttling_config adv_cmd; 656 struct iwl_ct_kill_throttling_config adv_cmd;
@@ -726,7 +731,7 @@ static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
726 } 731 }
727} 732}
728 733
729void iwl_send_bt_config(struct iwl_priv *priv) 734static void iwl_send_bt_config(struct iwl_priv *priv)
730{ 735{
731 struct iwl_bt_cmd bt_cmd = { 736 struct iwl_bt_cmd bt_cmd = {
732 .lead_time = BT_LEAD_TIME_DEF, 737 .lead_time = BT_LEAD_TIME_DEF,
@@ -814,7 +819,7 @@ int iwl_alive_start(struct iwl_priv *priv)
814 ieee80211_wake_queues(priv->hw); 819 ieee80211_wake_queues(priv->hw);
815 820
816 /* Configure Tx antenna selection based on H/W config */ 821 /* Configure Tx antenna selection based on H/W config */
817 iwlagn_send_tx_ant_config(priv, priv->hw_params.valid_tx_ant); 822 iwlagn_send_tx_ant_config(priv, priv->eeprom_data->valid_tx_ant);
818 823
819 if (iwl_is_associated_ctx(ctx) && !priv->wowlan) { 824 if (iwl_is_associated_ctx(ctx) && !priv->wowlan) {
820 struct iwl_rxon_cmd *active_rxon = 825 struct iwl_rxon_cmd *active_rxon =
@@ -932,11 +937,12 @@ void iwl_down(struct iwl_priv *priv)
932 priv->ucode_loaded = false; 937 priv->ucode_loaded = false;
933 iwl_trans_stop_device(priv->trans); 938 iwl_trans_stop_device(priv->trans);
934 939
940 /* Set num_aux_in_flight must be done after the transport is stopped */
941 atomic_set(&priv->num_aux_in_flight, 0);
942
935 /* Clear out all status bits but a few that are stable across reset */ 943 /* Clear out all status bits but a few that are stable across reset */
936 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << 944 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
937 STATUS_RF_KILL_HW | 945 STATUS_RF_KILL_HW |
938 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
939 STATUS_GEO_CONFIGURED |
940 test_bit(STATUS_FW_ERROR, &priv->status) << 946 test_bit(STATUS_FW_ERROR, &priv->status) <<
941 STATUS_FW_ERROR | 947 STATUS_FW_ERROR |
942 test_bit(STATUS_EXIT_PENDING, &priv->status) << 948 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
@@ -1078,7 +1084,7 @@ static void iwlagn_disable_roc_work(struct work_struct *work)
1078 * 1084 *
1079 *****************************************************************************/ 1085 *****************************************************************************/
1080 1086
1081void iwl_setup_deferred_work(struct iwl_priv *priv) 1087static void iwl_setup_deferred_work(struct iwl_priv *priv)
1082{ 1088{
1083 priv->workqueue = create_singlethread_workqueue(DRV_NAME); 1089 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
1084 1090
@@ -1123,224 +1129,14 @@ void iwl_cancel_deferred_work(struct iwl_priv *priv)
1123 del_timer_sync(&priv->ucode_trace); 1129 del_timer_sync(&priv->ucode_trace);
1124} 1130}
1125 1131
1126static void iwl_init_hw_rates(struct ieee80211_rate *rates) 1132static int iwl_init_drv(struct iwl_priv *priv)
1127{ 1133{
1128 int i;
1129
1130 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
1131 rates[i].bitrate = iwl_rates[i].ieee * 5;
1132 rates[i].hw_value = i; /* Rate scaling will work on indexes */
1133 rates[i].hw_value_short = i;
1134 rates[i].flags = 0;
1135 if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
1136 /*
1137 * If CCK != 1M then set short preamble rate flag.
1138 */
1139 rates[i].flags |=
1140 (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ?
1141 0 : IEEE80211_RATE_SHORT_PREAMBLE;
1142 }
1143 }
1144}
1145
1146#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
1147#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
1148static void iwl_init_ht_hw_capab(const struct iwl_priv *priv,
1149 struct ieee80211_sta_ht_cap *ht_info,
1150 enum ieee80211_band band)
1151{
1152 u16 max_bit_rate = 0;
1153 u8 rx_chains_num = priv->hw_params.rx_chains_num;
1154 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1155
1156 ht_info->cap = 0;
1157 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
1158
1159 ht_info->ht_supported = true;
1160
1161 if (priv->cfg->ht_params &&
1162 priv->cfg->ht_params->ht_greenfield_support)
1163 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
1164 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
1165 max_bit_rate = MAX_BIT_RATE_20_MHZ;
1166 if (priv->hw_params.ht40_channel & BIT(band)) {
1167 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
1168 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
1169 ht_info->mcs.rx_mask[4] = 0x01;
1170 max_bit_rate = MAX_BIT_RATE_40_MHZ;
1171 }
1172
1173 if (iwlwifi_mod_params.amsdu_size_8K)
1174 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
1175
1176 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
1177 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
1178
1179 ht_info->mcs.rx_mask[0] = 0xFF;
1180 if (rx_chains_num >= 2)
1181 ht_info->mcs.rx_mask[1] = 0xFF;
1182 if (rx_chains_num >= 3)
1183 ht_info->mcs.rx_mask[2] = 0xFF;
1184
1185 /* Highest supported Rx data rate */
1186 max_bit_rate *= rx_chains_num;
1187 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
1188 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
1189
1190 /* Tx MCS capabilities */
1191 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
1192 if (tx_chains_num != rx_chains_num) {
1193 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
1194 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
1195 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
1196 }
1197}
1198
1199/**
1200 * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom
1201 */
1202static int iwl_init_geos(struct iwl_priv *priv)
1203{
1204 struct iwl_channel_info *ch;
1205 struct ieee80211_supported_band *sband;
1206 struct ieee80211_channel *channels;
1207 struct ieee80211_channel *geo_ch;
1208 struct ieee80211_rate *rates;
1209 int i = 0;
1210 s8 max_tx_power = IWLAGN_TX_POWER_TARGET_POWER_MIN;
1211
1212 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
1213 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
1214 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
1215 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
1216 return 0;
1217 }
1218
1219 channels = kcalloc(priv->channel_count,
1220 sizeof(struct ieee80211_channel), GFP_KERNEL);
1221 if (!channels)
1222 return -ENOMEM;
1223
1224 rates = kcalloc(IWL_RATE_COUNT_LEGACY, sizeof(struct ieee80211_rate),
1225 GFP_KERNEL);
1226 if (!rates) {
1227 kfree(channels);
1228 return -ENOMEM;
1229 }
1230
1231 /* 5.2GHz channels start after the 2.4GHz channels */
1232 sband = &priv->bands[IEEE80211_BAND_5GHZ];
1233 sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
1234 /* just OFDM */
1235 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
1236 sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
1237
1238 if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
1239 iwl_init_ht_hw_capab(priv, &sband->ht_cap,
1240 IEEE80211_BAND_5GHZ);
1241
1242 sband = &priv->bands[IEEE80211_BAND_2GHZ];
1243 sband->channels = channels;
1244 /* OFDM & CCK */
1245 sband->bitrates = rates;
1246 sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
1247
1248 if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
1249 iwl_init_ht_hw_capab(priv, &sband->ht_cap,
1250 IEEE80211_BAND_2GHZ);
1251
1252 priv->ieee_channels = channels;
1253 priv->ieee_rates = rates;
1254
1255 for (i = 0; i < priv->channel_count; i++) {
1256 ch = &priv->channel_info[i];
1257
1258 /* FIXME: might be removed if scan is OK */
1259 if (!is_channel_valid(ch))
1260 continue;
1261
1262 sband = &priv->bands[ch->band];
1263
1264 geo_ch = &sband->channels[sband->n_channels++];
1265
1266 geo_ch->center_freq =
1267 ieee80211_channel_to_frequency(ch->channel, ch->band);
1268 geo_ch->max_power = ch->max_power_avg;
1269 geo_ch->max_antenna_gain = 0xff;
1270 geo_ch->hw_value = ch->channel;
1271
1272 if (is_channel_valid(ch)) {
1273 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
1274 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
1275
1276 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
1277 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
1278
1279 if (ch->flags & EEPROM_CHANNEL_RADAR)
1280 geo_ch->flags |= IEEE80211_CHAN_RADAR;
1281
1282 geo_ch->flags |= ch->ht40_extension_channel;
1283
1284 if (ch->max_power_avg > max_tx_power)
1285 max_tx_power = ch->max_power_avg;
1286 } else {
1287 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
1288 }
1289
1290 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
1291 ch->channel, geo_ch->center_freq,
1292 is_channel_a_band(ch) ? "5.2" : "2.4",
1293 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
1294 "restricted" : "valid",
1295 geo_ch->flags);
1296 }
1297
1298 priv->tx_power_device_lmt = max_tx_power;
1299 priv->tx_power_user_lmt = max_tx_power;
1300 priv->tx_power_next = max_tx_power;
1301
1302 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
1303 priv->hw_params.sku & EEPROM_SKU_CAP_BAND_52GHZ) {
1304 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
1305 "Please send your %s to maintainer.\n",
1306 priv->trans->hw_id_str);
1307 priv->hw_params.sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
1308 }
1309
1310 if (iwlwifi_mod_params.disable_5ghz)
1311 priv->bands[IEEE80211_BAND_5GHZ].n_channels = 0;
1312
1313 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
1314 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
1315 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
1316
1317 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
1318
1319 return 0;
1320}
1321
1322/*
1323 * iwl_free_geos - undo allocations in iwl_init_geos
1324 */
1325static void iwl_free_geos(struct iwl_priv *priv)
1326{
1327 kfree(priv->ieee_channels);
1328 kfree(priv->ieee_rates);
1329 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
1330}
1331
1332int iwl_init_drv(struct iwl_priv *priv)
1333{
1334 int ret;
1335
1336 spin_lock_init(&priv->sta_lock); 1134 spin_lock_init(&priv->sta_lock);
1337 1135
1338 mutex_init(&priv->mutex); 1136 mutex_init(&priv->mutex);
1339 1137
1340 INIT_LIST_HEAD(&priv->calib_results); 1138 INIT_LIST_HEAD(&priv->calib_results);
1341 1139
1342 priv->ieee_channels = NULL;
1343 priv->ieee_rates = NULL;
1344 priv->band = IEEE80211_BAND_2GHZ; 1140 priv->band = IEEE80211_BAND_2GHZ;
1345 1141
1346 priv->plcp_delta_threshold = 1142 priv->plcp_delta_threshold =
@@ -1371,31 +1167,11 @@ int iwl_init_drv(struct iwl_priv *priv)
1371 priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF; 1167 priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF;
1372 } 1168 }
1373 1169
1374 ret = iwl_init_channel_map(priv);
1375 if (ret) {
1376 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
1377 goto err;
1378 }
1379
1380 ret = iwl_init_geos(priv);
1381 if (ret) {
1382 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
1383 goto err_free_channel_map;
1384 }
1385 iwl_init_hw_rates(priv->ieee_rates);
1386
1387 return 0; 1170 return 0;
1388
1389err_free_channel_map:
1390 iwl_free_channel_map(priv);
1391err:
1392 return ret;
1393} 1171}
1394 1172
1395void iwl_uninit_drv(struct iwl_priv *priv) 1173static void iwl_uninit_drv(struct iwl_priv *priv)
1396{ 1174{
1397 iwl_free_geos(priv);
1398 iwl_free_channel_map(priv);
1399 kfree(priv->scan_cmd); 1175 kfree(priv->scan_cmd);
1400 kfree(priv->beacon_cmd); 1176 kfree(priv->beacon_cmd);
1401 kfree(rcu_dereference_raw(priv->noa_data)); 1177 kfree(rcu_dereference_raw(priv->noa_data));
@@ -1405,15 +1181,12 @@ void iwl_uninit_drv(struct iwl_priv *priv)
1405#endif 1181#endif
1406} 1182}
1407 1183
1408void iwl_set_hw_params(struct iwl_priv *priv) 1184static void iwl_set_hw_params(struct iwl_priv *priv)
1409{ 1185{
1410 if (priv->cfg->ht_params) 1186 if (priv->cfg->ht_params)
1411 priv->hw_params.use_rts_for_aggregation = 1187 priv->hw_params.use_rts_for_aggregation =
1412 priv->cfg->ht_params->use_rts_for_aggregation; 1188 priv->cfg->ht_params->use_rts_for_aggregation;
1413 1189
1414 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
1415 priv->hw_params.sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
1416
1417 /* Device-specific setup */ 1190 /* Device-specific setup */
1418 priv->lib->set_hw_params(priv); 1191 priv->lib->set_hw_params(priv);
1419} 1192}
@@ -1421,7 +1194,7 @@ void iwl_set_hw_params(struct iwl_priv *priv)
1421 1194
1422 1195
1423/* show what optional capabilities we have */ 1196/* show what optional capabilities we have */
1424void iwl_option_config(struct iwl_priv *priv) 1197static void iwl_option_config(struct iwl_priv *priv)
1425{ 1198{
1426#ifdef CONFIG_IWLWIFI_DEBUG 1199#ifdef CONFIG_IWLWIFI_DEBUG
1427 IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG enabled\n"); 1200 IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG enabled\n");
@@ -1454,6 +1227,42 @@ void iwl_option_config(struct iwl_priv *priv)
1454#endif 1227#endif
1455} 1228}
1456 1229
1230static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
1231{
1232 u16 radio_cfg;
1233
1234 priv->eeprom_data->sku = priv->eeprom_data->sku;
1235
1236 if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE &&
1237 !priv->cfg->ht_params) {
1238 IWL_ERR(priv, "Invalid 11n configuration\n");
1239 return -EINVAL;
1240 }
1241
1242 if (!priv->eeprom_data->sku) {
1243 IWL_ERR(priv, "Invalid device sku\n");
1244 return -EINVAL;
1245 }
1246
1247 IWL_INFO(priv, "Device SKU: 0x%X\n", priv->eeprom_data->sku);
1248
1249 radio_cfg = priv->eeprom_data->radio_cfg;
1250
1251 priv->hw_params.tx_chains_num =
1252 num_of_ant(priv->eeprom_data->valid_tx_ant);
1253 if (priv->cfg->rx_with_siso_diversity)
1254 priv->hw_params.rx_chains_num = 1;
1255 else
1256 priv->hw_params.rx_chains_num =
1257 num_of_ant(priv->eeprom_data->valid_rx_ant);
1258
1259 IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
1260 priv->eeprom_data->valid_tx_ant,
1261 priv->eeprom_data->valid_rx_ant);
1262
1263 return 0;
1264}
1265
1457static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, 1266static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1458 const struct iwl_cfg *cfg, 1267 const struct iwl_cfg *cfg,
1459 const struct iwl_fw *fw) 1268 const struct iwl_fw *fw)
@@ -1539,9 +1348,12 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1539 trans_cfg.queue_watchdog_timeout = 1348 trans_cfg.queue_watchdog_timeout =
1540 priv->cfg->base_params->wd_timeout; 1349 priv->cfg->base_params->wd_timeout;
1541 else 1350 else
1542 trans_cfg.queue_watchdog_timeout = IWL_WATCHHDOG_DISABLED; 1351 trans_cfg.queue_watchdog_timeout = IWL_WATCHDOG_DISABLED;
1543 trans_cfg.command_names = iwl_dvm_cmd_strings; 1352 trans_cfg.command_names = iwl_dvm_cmd_strings;
1544 1353
1354 WARN_ON(sizeof(priv->transport_queue_stop) * BITS_PER_BYTE <
1355 priv->cfg->base_params->num_of_queues);
1356
1545 ucode_flags = fw->ucode_capa.flags; 1357 ucode_flags = fw->ucode_capa.flags;
1546 1358
1547#ifndef CONFIG_IWLWIFI_P2P 1359#ifndef CONFIG_IWLWIFI_P2P
@@ -1599,25 +1411,33 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1599 goto out_free_hw; 1411 goto out_free_hw;
1600 1412
1601 /* Read the EEPROM */ 1413 /* Read the EEPROM */
1602 if (iwl_eeprom_init(priv, priv->trans->hw_rev)) { 1414 if (iwl_read_eeprom(priv->trans, &priv->eeprom_blob,
1415 &priv->eeprom_blob_size)) {
1603 IWL_ERR(priv, "Unable to init EEPROM\n"); 1416 IWL_ERR(priv, "Unable to init EEPROM\n");
1604 goto out_free_hw; 1417 goto out_free_hw;
1605 } 1418 }
1419
1606 /* Reset chip to save power until we load uCode during "up". */ 1420 /* Reset chip to save power until we load uCode during "up". */
1607 iwl_trans_stop_hw(priv->trans, false); 1421 iwl_trans_stop_hw(priv->trans, false);
1608 1422
1609 if (iwl_eeprom_check_version(priv)) 1423 priv->eeprom_data = iwl_parse_eeprom_data(priv->trans->dev, priv->cfg,
1424 priv->eeprom_blob,
1425 priv->eeprom_blob_size);
1426 if (!priv->eeprom_data)
1427 goto out_free_eeprom_blob;
1428
1429 if (iwl_eeprom_check_version(priv->eeprom_data, priv->trans))
1610 goto out_free_eeprom; 1430 goto out_free_eeprom;
1611 1431
1612 if (iwl_eeprom_init_hw_params(priv)) 1432 if (iwl_eeprom_init_hw_params(priv))
1613 goto out_free_eeprom; 1433 goto out_free_eeprom;
1614 1434
1615 /* extract MAC Address */ 1435 /* extract MAC Address */
1616 iwl_eeprom_get_mac(priv, priv->addresses[0].addr); 1436 memcpy(priv->addresses[0].addr, priv->eeprom_data->hw_addr, ETH_ALEN);
1617 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr); 1437 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
1618 priv->hw->wiphy->addresses = priv->addresses; 1438 priv->hw->wiphy->addresses = priv->addresses;
1619 priv->hw->wiphy->n_addresses = 1; 1439 priv->hw->wiphy->n_addresses = 1;
1620 num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS); 1440 num_mac = priv->eeprom_data->n_hw_addrs;
1621 if (num_mac > 1) { 1441 if (num_mac > 1) {
1622 memcpy(priv->addresses[1].addr, priv->addresses[0].addr, 1442 memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
1623 ETH_ALEN); 1443 ETH_ALEN);
@@ -1630,7 +1450,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1630 ************************/ 1450 ************************/
1631 iwl_set_hw_params(priv); 1451 iwl_set_hw_params(priv);
1632 1452
1633 if (!(priv->hw_params.sku & EEPROM_SKU_CAP_IPAN_ENABLE)) { 1453 if (!(priv->eeprom_data->sku & EEPROM_SKU_CAP_IPAN_ENABLE)) {
1634 IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN"); 1454 IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN");
1635 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN; 1455 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
1636 /* 1456 /*
@@ -1711,8 +1531,10 @@ out_destroy_workqueue:
1711 destroy_workqueue(priv->workqueue); 1531 destroy_workqueue(priv->workqueue);
1712 priv->workqueue = NULL; 1532 priv->workqueue = NULL;
1713 iwl_uninit_drv(priv); 1533 iwl_uninit_drv(priv);
1534out_free_eeprom_blob:
1535 kfree(priv->eeprom_blob);
1714out_free_eeprom: 1536out_free_eeprom:
1715 iwl_eeprom_free(priv); 1537 iwl_free_eeprom_data(priv->eeprom_data);
1716out_free_hw: 1538out_free_hw:
1717 ieee80211_free_hw(priv->hw); 1539 ieee80211_free_hw(priv->hw);
1718out: 1540out:
@@ -1720,7 +1542,7 @@ out:
1720 return op_mode; 1542 return op_mode;
1721} 1543}
1722 1544
1723void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode) 1545static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
1724{ 1546{
1725 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 1547 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1726 1548
@@ -1728,7 +1550,7 @@ void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
1728 1550
1729 iwl_dbgfs_unregister(priv); 1551 iwl_dbgfs_unregister(priv);
1730 1552
1731 iwl_testmode_cleanup(priv); 1553 iwl_testmode_free(priv);
1732 iwlagn_mac_unregister(priv); 1554 iwlagn_mac_unregister(priv);
1733 1555
1734 iwl_tt_exit(priv); 1556 iwl_tt_exit(priv);
@@ -1737,7 +1559,8 @@ void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
1737 priv->ucode_loaded = false; 1559 priv->ucode_loaded = false;
1738 iwl_trans_stop_device(priv->trans); 1560 iwl_trans_stop_device(priv->trans);
1739 1561
1740 iwl_eeprom_free(priv); 1562 kfree(priv->eeprom_blob);
1563 iwl_free_eeprom_data(priv->eeprom_data);
1741 1564
1742 /*netif_stop_queue(dev); */ 1565 /*netif_stop_queue(dev); */
1743 flush_workqueue(priv->workqueue); 1566 flush_workqueue(priv->workqueue);
@@ -1850,7 +1673,7 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv)
1850 } 1673 }
1851 1674
1852 /*TODO: Update dbgfs with ISR error stats obtained below */ 1675 /*TODO: Update dbgfs with ISR error stats obtained below */
1853 iwl_read_targ_mem_words(trans, base, &table, sizeof(table)); 1676 iwl_read_targ_mem_bytes(trans, base, &table, sizeof(table));
1854 1677
1855 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 1678 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
1856 IWL_ERR(trans, "Start IWL Error Log Dump:\n"); 1679 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
@@ -2185,7 +2008,7 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
2185 } 2008 }
2186} 2009}
2187 2010
2188void iwl_nic_error(struct iwl_op_mode *op_mode) 2011static void iwl_nic_error(struct iwl_op_mode *op_mode)
2189{ 2012{
2190 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 2013 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2191 2014
@@ -2198,7 +2021,7 @@ void iwl_nic_error(struct iwl_op_mode *op_mode)
2198 iwlagn_fw_error(priv, false); 2021 iwlagn_fw_error(priv, false);
2199} 2022}
2200 2023
2201void iwl_cmd_queue_full(struct iwl_op_mode *op_mode) 2024static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
2202{ 2025{
2203 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 2026 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2204 2027
@@ -2208,11 +2031,60 @@ void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
2208 } 2031 }
2209} 2032}
2210 2033
2211void iwl_nic_config(struct iwl_op_mode *op_mode) 2034#define EEPROM_RF_CONFIG_TYPE_MAX 0x3
2035
2036static void iwl_nic_config(struct iwl_op_mode *op_mode)
2212{ 2037{
2213 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 2038 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2039 u16 radio_cfg = priv->eeprom_data->radio_cfg;
2040
2041 /* SKU Control */
2042 iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
2043 CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2044 CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP,
2045 (CSR_HW_REV_STEP(priv->trans->hw_rev) <<
2046 CSR_HW_IF_CONFIG_REG_POS_MAC_STEP) |
2047 (CSR_HW_REV_DASH(priv->trans->hw_rev) <<
2048 CSR_HW_IF_CONFIG_REG_POS_MAC_DASH));
2049
2050 /* write radio config values to register */
2051 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
2052 u32 reg_val =
2053 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <<
2054 CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE |
2055 EEPROM_RF_CFG_STEP_MSK(radio_cfg) <<
2056 CSR_HW_IF_CONFIG_REG_POS_PHY_STEP |
2057 EEPROM_RF_CFG_DASH_MSK(radio_cfg) <<
2058 CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2059
2060 iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
2061 CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2062 CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2063 CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH, reg_val);
2064
2065 IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n",
2066 EEPROM_RF_CFG_TYPE_MSK(radio_cfg),
2067 EEPROM_RF_CFG_STEP_MSK(radio_cfg),
2068 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
2069 } else {
2070 WARN_ON(1);
2071 }
2072
2073 /* set CSR_HW_CONFIG_REG for uCode use */
2074 iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
2075 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2076 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
2077
2078 /* W/A : NIC is stuck in a reset state after Early PCIe power off
2079 * (PCIe power is lost before PERST# is asserted),
2080 * causing ME FW to lose ownership and not being able to obtain it back.
2081 */
2082 iwl_set_bits_mask_prph(priv->trans, APMG_PS_CTRL_REG,
2083 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
2084 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
2214 2085
2215 priv->lib->nic_config(priv); 2086 if (priv->lib->nic_config)
2087 priv->lib->nic_config(priv);
2216} 2088}
2217 2089
2218static void iwl_wimax_active(struct iwl_op_mode *op_mode) 2090static void iwl_wimax_active(struct iwl_op_mode *op_mode)
@@ -2223,7 +2095,7 @@ static void iwl_wimax_active(struct iwl_op_mode *op_mode)
2223 IWL_ERR(priv, "RF is used by WiMAX\n"); 2095 IWL_ERR(priv, "RF is used by WiMAX\n");
2224} 2096}
2225 2097
2226void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue) 2098static void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
2227{ 2099{
2228 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 2100 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2229 int mq = priv->queue_to_mac80211[queue]; 2101 int mq = priv->queue_to_mac80211[queue];
@@ -2242,7 +2114,7 @@ void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
2242 ieee80211_stop_queue(priv->hw, mq); 2114 ieee80211_stop_queue(priv->hw, mq);
2243} 2115}
2244 2116
2245void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) 2117static void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
2246{ 2118{
2247 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 2119 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2248 int mq = priv->queue_to_mac80211[queue]; 2120 int mq = priv->queue_to_mac80211[queue];
@@ -2282,16 +2154,17 @@ void iwlagn_lift_passive_no_rx(struct iwl_priv *priv)
2282 priv->passive_no_rx = false; 2154 priv->passive_no_rx = false;
2283} 2155}
2284 2156
2285void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) 2157static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
2286{ 2158{
2159 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2287 struct ieee80211_tx_info *info; 2160 struct ieee80211_tx_info *info;
2288 2161
2289 info = IEEE80211_SKB_CB(skb); 2162 info = IEEE80211_SKB_CB(skb);
2290 kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1])); 2163 iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
2291 dev_kfree_skb_any(skb); 2164 dev_kfree_skb_any(skb);
2292} 2165}
2293 2166
2294void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) 2167static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
2295{ 2168{
2296 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 2169 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2297 2170
@@ -2303,7 +2176,7 @@ void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
2303 wiphy_rfkill_set_hw_state(priv->hw->wiphy, state); 2176 wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
2304} 2177}
2305 2178
2306const struct iwl_op_mode_ops iwl_dvm_ops = { 2179static const struct iwl_op_mode_ops iwl_dvm_ops = {
2307 .start = iwl_op_mode_dvm_start, 2180 .start = iwl_op_mode_dvm_start,
2308 .stop = iwl_op_mode_dvm_stop, 2181 .stop = iwl_op_mode_dvm_stop,
2309 .rx = iwl_rx_dispatch, 2182 .rx = iwl_rx_dispatch,
@@ -2322,9 +2195,6 @@ const struct iwl_op_mode_ops iwl_dvm_ops = {
2322 * driver and module entry point 2195 * driver and module entry point
2323 * 2196 *
2324 *****************************************************************************/ 2197 *****************************************************************************/
2325
2326struct kmem_cache *iwl_tx_cmd_pool;
2327
2328static int __init iwl_init(void) 2198static int __init iwl_init(void)
2329{ 2199{
2330 2200
@@ -2332,36 +2202,25 @@ static int __init iwl_init(void)
2332 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); 2202 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
2333 pr_info(DRV_COPYRIGHT "\n"); 2203 pr_info(DRV_COPYRIGHT "\n");
2334 2204
2335 iwl_tx_cmd_pool = kmem_cache_create("iwl_dev_cmd",
2336 sizeof(struct iwl_device_cmd),
2337 sizeof(void *), 0, NULL);
2338 if (!iwl_tx_cmd_pool)
2339 return -ENOMEM;
2340
2341 ret = iwlagn_rate_control_register(); 2205 ret = iwlagn_rate_control_register();
2342 if (ret) { 2206 if (ret) {
2343 pr_err("Unable to register rate control algorithm: %d\n", ret); 2207 pr_err("Unable to register rate control algorithm: %d\n", ret);
2344 goto error_rc_register; 2208 return ret;
2345 } 2209 }
2346 2210
2347 ret = iwl_pci_register_driver(); 2211 ret = iwl_opmode_register("iwldvm", &iwl_dvm_ops);
2348 if (ret) 2212 if (ret) {
2349 goto error_pci_register; 2213 pr_err("Unable to register op_mode: %d\n", ret);
2350 return ret; 2214 iwlagn_rate_control_unregister();
2215 }
2351 2216
2352error_pci_register:
2353 iwlagn_rate_control_unregister();
2354error_rc_register:
2355 kmem_cache_destroy(iwl_tx_cmd_pool);
2356 return ret; 2217 return ret;
2357} 2218}
2219module_init(iwl_init);
2358 2220
2359static void __exit iwl_exit(void) 2221static void __exit iwl_exit(void)
2360{ 2222{
2361 iwl_pci_unregister_driver(); 2223 iwl_opmode_deregister("iwldvm");
2362 iwlagn_rate_control_unregister(); 2224 iwlagn_rate_control_unregister();
2363 kmem_cache_destroy(iwl_tx_cmd_pool);
2364} 2225}
2365
2366module_exit(iwl_exit); 2226module_exit(iwl_exit);
2367module_init(iwl_init);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/dvm/power.c
index 544ddf17f5b..518cf371580 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/dvm/power.c
@@ -31,18 +31,15 @@
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/init.h> 33#include <linux/init.h>
34
35#include <net/mac80211.h> 34#include <net/mac80211.h>
36
37#include "iwl-eeprom.h"
38#include "iwl-dev.h"
39#include "iwl-agn.h"
40#include "iwl-io.h" 35#include "iwl-io.h"
41#include "iwl-commands.h"
42#include "iwl-debug.h" 36#include "iwl-debug.h"
43#include "iwl-power.h"
44#include "iwl-trans.h" 37#include "iwl-trans.h"
45#include "iwl-modparams.h" 38#include "iwl-modparams.h"
39#include "dev.h"
40#include "agn.h"
41#include "commands.h"
42#include "power.h"
46 43
47/* 44/*
48 * Setting power level allows the card to go to sleep when not busy. 45 * Setting power level allows the card to go to sleep when not busy.
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/dvm/power.h
index 21afc92efac..a2cee7f0484 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/dvm/power.h
@@ -28,7 +28,7 @@
28#ifndef __iwl_power_setting_h__ 28#ifndef __iwl_power_setting_h__
29#define __iwl_power_setting_h__ 29#define __iwl_power_setting_h__
30 30
31#include "iwl-commands.h" 31#include "commands.h"
32 32
33struct iwl_power_mgr { 33struct iwl_power_mgr {
34 struct iwl_powertable_cmd sleep_cmd; 34 struct iwl_powertable_cmd sleep_cmd;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 8cebd7c363f..6fddd2785e6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -35,10 +35,8 @@
35 35
36#include <linux/workqueue.h> 36#include <linux/workqueue.h>
37 37
38#include "iwl-dev.h" 38#include "dev.h"
39#include "iwl-agn.h" 39#include "agn.h"
40#include "iwl-op-mode.h"
41#include "iwl-modparams.h"
42 40
43#define RS_NAME "iwl-agn-rs" 41#define RS_NAME "iwl-agn-rs"
44 42
@@ -819,7 +817,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
819 817
820 if (num_of_ant(tbl->ant_type) > 1) 818 if (num_of_ant(tbl->ant_type) > 1)
821 tbl->ant_type = 819 tbl->ant_type =
822 first_antenna(priv->hw_params.valid_tx_ant); 820 first_antenna(priv->eeprom_data->valid_tx_ant);
823 821
824 tbl->is_ht40 = 0; 822 tbl->is_ht40 = 0;
825 tbl->is_SGI = 0; 823 tbl->is_SGI = 0;
@@ -1447,7 +1445,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1447 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1445 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1448 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1446 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1449 u8 start_action; 1447 u8 start_action;
1450 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1448 u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
1451 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1449 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1452 int ret = 0; 1450 int ret = 0;
1453 u8 update_search_tbl_counter = 0; 1451 u8 update_search_tbl_counter = 0;
@@ -1465,7 +1463,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1465 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: 1463 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1466 /* avoid antenna B and MIMO */ 1464 /* avoid antenna B and MIMO */
1467 valid_tx_ant = 1465 valid_tx_ant =
1468 first_antenna(priv->hw_params.valid_tx_ant); 1466 first_antenna(priv->eeprom_data->valid_tx_ant);
1469 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 && 1467 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 &&
1470 tbl->action != IWL_LEGACY_SWITCH_SISO) 1468 tbl->action != IWL_LEGACY_SWITCH_SISO)
1471 tbl->action = IWL_LEGACY_SWITCH_SISO; 1469 tbl->action = IWL_LEGACY_SWITCH_SISO;
@@ -1489,7 +1487,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1489 else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2) 1487 else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
1490 tbl->action = IWL_LEGACY_SWITCH_SISO; 1488 tbl->action = IWL_LEGACY_SWITCH_SISO;
1491 valid_tx_ant = 1489 valid_tx_ant =
1492 first_antenna(priv->hw_params.valid_tx_ant); 1490 first_antenna(priv->eeprom_data->valid_tx_ant);
1493 } 1491 }
1494 1492
1495 start_action = tbl->action; 1493 start_action = tbl->action;
@@ -1623,7 +1621,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1623 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1621 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1624 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1622 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1625 u8 start_action; 1623 u8 start_action;
1626 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1624 u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
1627 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1625 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1628 u8 update_search_tbl_counter = 0; 1626 u8 update_search_tbl_counter = 0;
1629 int ret; 1627 int ret;
@@ -1641,7 +1639,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1641 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: 1639 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1642 /* avoid antenna B and MIMO */ 1640 /* avoid antenna B and MIMO */
1643 valid_tx_ant = 1641 valid_tx_ant =
1644 first_antenna(priv->hw_params.valid_tx_ant); 1642 first_antenna(priv->eeprom_data->valid_tx_ant);
1645 if (tbl->action != IWL_SISO_SWITCH_ANTENNA1) 1643 if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
1646 tbl->action = IWL_SISO_SWITCH_ANTENNA1; 1644 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1647 break; 1645 break;
@@ -1659,7 +1657,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1659 /* configure as 1x1 if bt full concurrency */ 1657 /* configure as 1x1 if bt full concurrency */
1660 if (priv->bt_full_concurrent) { 1658 if (priv->bt_full_concurrent) {
1661 valid_tx_ant = 1659 valid_tx_ant =
1662 first_antenna(priv->hw_params.valid_tx_ant); 1660 first_antenna(priv->eeprom_data->valid_tx_ant);
1663 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2) 1661 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
1664 tbl->action = IWL_SISO_SWITCH_ANTENNA1; 1662 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1665 } 1663 }
@@ -1795,7 +1793,7 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
1795 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1793 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1796 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1794 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1797 u8 start_action; 1795 u8 start_action;
1798 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1796 u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
1799 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1797 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1800 u8 update_search_tbl_counter = 0; 1798 u8 update_search_tbl_counter = 0;
1801 int ret; 1799 int ret;
@@ -1965,7 +1963,7 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
1965 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1963 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1966 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1964 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1967 u8 start_action; 1965 u8 start_action;
1968 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1966 u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
1969 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1967 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1970 int ret; 1968 int ret;
1971 u8 update_search_tbl_counter = 0; 1969 u8 update_search_tbl_counter = 0;
@@ -2699,7 +2697,7 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2699 2697
2700 i = lq_sta->last_txrate_idx; 2698 i = lq_sta->last_txrate_idx;
2701 2699
2702 valid_tx_ant = priv->hw_params.valid_tx_ant; 2700 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
2703 2701
2704 if (!lq_sta->search_better_tbl) 2702 if (!lq_sta->search_better_tbl)
2705 active_tbl = lq_sta->active_tbl; 2703 active_tbl = lq_sta->active_tbl;
@@ -2893,15 +2891,15 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
2893 2891
2894 /* These values will be overridden later */ 2892 /* These values will be overridden later */
2895 lq_sta->lq.general_params.single_stream_ant_msk = 2893 lq_sta->lq.general_params.single_stream_ant_msk =
2896 first_antenna(priv->hw_params.valid_tx_ant); 2894 first_antenna(priv->eeprom_data->valid_tx_ant);
2897 lq_sta->lq.general_params.dual_stream_ant_msk = 2895 lq_sta->lq.general_params.dual_stream_ant_msk =
2898 priv->hw_params.valid_tx_ant & 2896 priv->eeprom_data->valid_tx_ant &
2899 ~first_antenna(priv->hw_params.valid_tx_ant); 2897 ~first_antenna(priv->eeprom_data->valid_tx_ant);
2900 if (!lq_sta->lq.general_params.dual_stream_ant_msk) { 2898 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2901 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB; 2899 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2902 } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) { 2900 } else if (num_of_ant(priv->eeprom_data->valid_tx_ant) == 2) {
2903 lq_sta->lq.general_params.dual_stream_ant_msk = 2901 lq_sta->lq.general_params.dual_stream_ant_msk =
2904 priv->hw_params.valid_tx_ant; 2902 priv->eeprom_data->valid_tx_ant;
2905 } 2903 }
2906 2904
2907 /* as default allow aggregation for all tids */ 2905 /* as default allow aggregation for all tids */
@@ -2947,7 +2945,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2947 if (priv && priv->bt_full_concurrent) { 2945 if (priv && priv->bt_full_concurrent) {
2948 /* 1x1 only */ 2946 /* 1x1 only */
2949 tbl_type.ant_type = 2947 tbl_type.ant_type =
2950 first_antenna(priv->hw_params.valid_tx_ant); 2948 first_antenna(priv->eeprom_data->valid_tx_ant);
2951 } 2949 }
2952 2950
2953 /* How many times should we repeat the initial rate? */ 2951 /* How many times should we repeat the initial rate? */
@@ -2979,7 +2977,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2979 if (priv->bt_full_concurrent) 2977 if (priv->bt_full_concurrent)
2980 valid_tx_ant = ANT_A; 2978 valid_tx_ant = ANT_A;
2981 else 2979 else
2982 valid_tx_ant = priv->hw_params.valid_tx_ant; 2980 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
2983 } 2981 }
2984 2982
2985 /* Fill rest of rate table */ 2983 /* Fill rest of rate table */
@@ -3013,7 +3011,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
3013 if (priv && priv->bt_full_concurrent) { 3011 if (priv && priv->bt_full_concurrent) {
3014 /* 1x1 only */ 3012 /* 1x1 only */
3015 tbl_type.ant_type = 3013 tbl_type.ant_type =
3016 first_antenna(priv->hw_params.valid_tx_ant); 3014 first_antenna(priv->eeprom_data->valid_tx_ant);
3017 } 3015 }
3018 3016
3019 /* Indicate to uCode which entries might be MIMO. 3017 /* Indicate to uCode which entries might be MIMO.
@@ -3100,7 +3098,7 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
3100 u8 ant_sel_tx; 3098 u8 ant_sel_tx;
3101 3099
3102 priv = lq_sta->drv; 3100 priv = lq_sta->drv;
3103 valid_tx_ant = priv->hw_params.valid_tx_ant; 3101 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
3104 if (lq_sta->dbg_fixed_rate) { 3102 if (lq_sta->dbg_fixed_rate) {
3105 ant_sel_tx = 3103 ant_sel_tx =
3106 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) 3104 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
@@ -3171,9 +3169,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
3171 desc += sprintf(buff+desc, "fixed rate 0x%X\n", 3169 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
3172 lq_sta->dbg_fixed_rate); 3170 lq_sta->dbg_fixed_rate);
3173 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", 3171 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
3174 (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "", 3172 (priv->eeprom_data->valid_tx_ant & ANT_A) ? "ANT_A," : "",
3175 (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "", 3173 (priv->eeprom_data->valid_tx_ant & ANT_B) ? "ANT_B," : "",
3176 (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : ""); 3174 (priv->eeprom_data->valid_tx_ant & ANT_C) ? "ANT_C" : "");
3177 desc += sprintf(buff+desc, "lq type %s\n", 3175 desc += sprintf(buff+desc, "lq type %s\n",
3178 (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); 3176 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
3179 if (is_Ht(tbl->lq_type)) { 3177 if (is_Ht(tbl->lq_type)) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h
index 82d02e1ae89..ad3aea8f626 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.h
@@ -29,9 +29,10 @@
29 29
30#include <net/mac80211.h> 30#include <net/mac80211.h>
31 31
32#include "iwl-commands.h"
33#include "iwl-config.h" 32#include "iwl-config.h"
34 33
34#include "commands.h"
35
35struct iwl_rate_info { 36struct iwl_rate_info {
36 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ 37 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
37 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ 38 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index 403de96f974..c1f7a18e08d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -32,12 +32,10 @@
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <net/mac80211.h> 33#include <net/mac80211.h>
34#include <asm/unaligned.h> 34#include <asm/unaligned.h>
35#include "iwl-eeprom.h"
36#include "iwl-dev.h"
37#include "iwl-io.h" 35#include "iwl-io.h"
38#include "iwl-agn-calib.h" 36#include "dev.h"
39#include "iwl-agn.h" 37#include "calib.h"
40#include "iwl-modparams.h" 38#include "agn.h"
41 39
42#define IWL_CMD_ENTRY(x) [x] = #x 40#define IWL_CMD_ENTRY(x) [x] = #x
43 41
@@ -1012,6 +1010,8 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
1012 rx_status.flag |= RX_FLAG_40MHZ; 1010 rx_status.flag |= RX_FLAG_40MHZ;
1013 if (rate_n_flags & RATE_MCS_SGI_MSK) 1011 if (rate_n_flags & RATE_MCS_SGI_MSK)
1014 rx_status.flag |= RX_FLAG_SHORT_GI; 1012 rx_status.flag |= RX_FLAG_SHORT_GI;
1013 if (rate_n_flags & RATE_MCS_GF_MSK)
1014 rx_status.flag |= RX_FLAG_HT_GF;
1015 1015
1016 iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status, 1016 iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
1017 rxb, &rx_status); 1017 rxb, &rx_status);
@@ -1124,8 +1124,6 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
1124{ 1124{
1125 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1125 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1126 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 1126 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1127 void (*pre_rx_handler)(struct iwl_priv *,
1128 struct iwl_rx_cmd_buffer *);
1129 int err = 0; 1127 int err = 0;
1130 1128
1131 /* 1129 /*
@@ -1135,19 +1133,19 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
1135 */ 1133 */
1136 iwl_notification_wait_notify(&priv->notif_wait, pkt); 1134 iwl_notification_wait_notify(&priv->notif_wait, pkt);
1137 1135
1138 /* RX data may be forwarded to userspace (using pre_rx_handler) in one 1136#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
1139 * of two cases: the first, that the user owns the uCode through 1137 /*
1140 * testmode - in such case the pre_rx_handler is set and no further 1138 * RX data may be forwarded to userspace in one
1141 * processing takes place. The other case is when the user want to 1139 * of two cases: the user owns the fw through testmode or when
1142 * monitor the rx w/o affecting the regular flow - the pre_rx_handler 1140 * the user requested to monitor the rx w/o affecting the regular flow.
1143 * will be set but the ownership flag != IWL_OWNERSHIP_TM and the flow 1141 * In these cases the iwl_test object will handle forwarding the rx
1142 * data to user space.
1143 * Note that if the ownership flag != IWL_OWNERSHIP_TM the flow
1144 * continues. 1144 * continues.
1145 * We need to use ACCESS_ONCE to prevent a case where the handler
1146 * changes between the check and the call.
1147 */ 1145 */
1148 pre_rx_handler = ACCESS_ONCE(priv->pre_rx_handler); 1146 iwl_test_rx(&priv->tst, rxb);
1149 if (pre_rx_handler) 1147#endif
1150 pre_rx_handler(priv, rxb); 1148
1151 if (priv->ucode_owner != IWL_OWNERSHIP_TM) { 1149 if (priv->ucode_owner != IWL_OWNERSHIP_TM) {
1152 /* Based on type of command response or notification, 1150 /* Based on type of command response or notification,
1153 * handle those that need handling via function in 1151 * handle those that need handling via function in
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 0a3aa7c8300..6ee940f497f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -25,11 +25,11 @@
25 *****************************************************************************/ 25 *****************************************************************************/
26 26
27#include <linux/etherdevice.h> 27#include <linux/etherdevice.h>
28#include "iwl-dev.h"
29#include "iwl-agn.h"
30#include "iwl-agn-calib.h"
31#include "iwl-trans.h" 28#include "iwl-trans.h"
32#include "iwl-modparams.h" 29#include "iwl-modparams.h"
30#include "dev.h"
31#include "agn.h"
32#include "calib.h"
33 33
34/* 34/*
35 * initialize rxon structure with default values from eeprom 35 * initialize rxon structure with default values from eeprom
@@ -37,8 +37,6 @@
37void iwl_connection_init_rx_config(struct iwl_priv *priv, 37void iwl_connection_init_rx_config(struct iwl_priv *priv,
38 struct iwl_rxon_context *ctx) 38 struct iwl_rxon_context *ctx)
39{ 39{
40 const struct iwl_channel_info *ch_info;
41
42 memset(&ctx->staging, 0, sizeof(ctx->staging)); 40 memset(&ctx->staging, 0, sizeof(ctx->staging));
43 41
44 if (!ctx->vif) { 42 if (!ctx->vif) {
@@ -80,14 +78,8 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv,
80 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 78 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
81#endif 79#endif
82 80
83 ch_info = iwl_get_channel_info(priv, priv->band, 81 ctx->staging.channel = cpu_to_le16(priv->hw->conf.channel->hw_value);
84 le16_to_cpu(ctx->active.channel)); 82 priv->band = priv->hw->conf.channel->band;
85
86 if (!ch_info)
87 ch_info = &priv->channel_info[0];
88
89 ctx->staging.channel = cpu_to_le16(ch_info->channel);
90 priv->band = ch_info->band;
91 83
92 iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif); 84 iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
93 85
@@ -175,7 +167,8 @@ static int iwlagn_disconn_pan(struct iwl_priv *priv,
175 return ret; 167 return ret;
176} 168}
177 169
178void iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 170static void iwlagn_update_qos(struct iwl_priv *priv,
171 struct iwl_rxon_context *ctx)
179{ 172{
180 int ret; 173 int ret;
181 174
@@ -202,8 +195,8 @@ void iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
202 IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n"); 195 IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n");
203} 196}
204 197
205int iwlagn_update_beacon(struct iwl_priv *priv, 198static int iwlagn_update_beacon(struct iwl_priv *priv,
206 struct ieee80211_vif *vif) 199 struct ieee80211_vif *vif)
207{ 200{
208 lockdep_assert_held(&priv->mutex); 201 lockdep_assert_held(&priv->mutex);
209 202
@@ -215,7 +208,7 @@ int iwlagn_update_beacon(struct iwl_priv *priv,
215} 208}
216 209
217static int iwlagn_send_rxon_assoc(struct iwl_priv *priv, 210static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
218 struct iwl_rxon_context *ctx) 211 struct iwl_rxon_context *ctx)
219{ 212{
220 int ret = 0; 213 int ret = 0;
221 struct iwl_rxon_assoc_cmd rxon_assoc; 214 struct iwl_rxon_assoc_cmd rxon_assoc;
@@ -427,10 +420,10 @@ static int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
427 return -EINVAL; 420 return -EINVAL;
428 } 421 }
429 422
430 if (tx_power > priv->tx_power_device_lmt) { 423 if (tx_power > DIV_ROUND_UP(priv->eeprom_data->max_tx_pwr_half_dbm, 2)) {
431 IWL_WARN(priv, 424 IWL_WARN(priv,
432 "Requested user TXPOWER %d above upper limit %d.\n", 425 "Requested user TXPOWER %d above upper limit %d.\n",
433 tx_power, priv->tx_power_device_lmt); 426 tx_power, priv->eeprom_data->max_tx_pwr_half_dbm);
434 return -EINVAL; 427 return -EINVAL;
435 } 428 }
436 429
@@ -863,8 +856,8 @@ static int iwl_check_rxon_cmd(struct iwl_priv *priv,
863 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that 856 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
864 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. 857 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
865 */ 858 */
866int iwl_full_rxon_required(struct iwl_priv *priv, 859static int iwl_full_rxon_required(struct iwl_priv *priv,
867 struct iwl_rxon_context *ctx) 860 struct iwl_rxon_context *ctx)
868{ 861{
869 const struct iwl_rxon_cmd *staging = &ctx->staging; 862 const struct iwl_rxon_cmd *staging = &ctx->staging;
870 const struct iwl_rxon_cmd *active = &ctx->active; 863 const struct iwl_rxon_cmd *active = &ctx->active;
@@ -1189,7 +1182,6 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
1189 struct iwl_rxon_context *ctx; 1182 struct iwl_rxon_context *ctx;
1190 struct ieee80211_conf *conf = &hw->conf; 1183 struct ieee80211_conf *conf = &hw->conf;
1191 struct ieee80211_channel *channel = conf->channel; 1184 struct ieee80211_channel *channel = conf->channel;
1192 const struct iwl_channel_info *ch_info;
1193 int ret = 0; 1185 int ret = 0;
1194 1186
1195 IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed); 1187 IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed);
@@ -1223,14 +1215,6 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
1223 } 1215 }
1224 1216
1225 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 1217 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1226 ch_info = iwl_get_channel_info(priv, channel->band,
1227 channel->hw_value);
1228 if (!is_channel_valid(ch_info)) {
1229 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
1230 ret = -EINVAL;
1231 goto out;
1232 }
1233
1234 for_each_context(priv, ctx) { 1218 for_each_context(priv, ctx) {
1235 /* Configure HT40 channels */ 1219 /* Configure HT40 channels */
1236 if (ctx->ht.enabled != conf_is_ht(conf)) 1220 if (ctx->ht.enabled != conf_is_ht(conf))
@@ -1294,9 +1278,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
1294 return ret; 1278 return ret;
1295} 1279}
1296 1280
1297void iwlagn_check_needed_chains(struct iwl_priv *priv, 1281static void iwlagn_check_needed_chains(struct iwl_priv *priv,
1298 struct iwl_rxon_context *ctx, 1282 struct iwl_rxon_context *ctx,
1299 struct ieee80211_bss_conf *bss_conf) 1283 struct ieee80211_bss_conf *bss_conf)
1300{ 1284{
1301 struct ieee80211_vif *vif = ctx->vif; 1285 struct ieee80211_vif *vif = ctx->vif;
1302 struct iwl_rxon_context *tmp; 1286 struct iwl_rxon_context *tmp;
@@ -1388,7 +1372,7 @@ void iwlagn_check_needed_chains(struct iwl_priv *priv,
1388 ht_conf->single_chain_sufficient = !need_multiple; 1372 ht_conf->single_chain_sufficient = !need_multiple;
1389} 1373}
1390 1374
1391void iwlagn_chain_noise_reset(struct iwl_priv *priv) 1375static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
1392{ 1376{
1393 struct iwl_chain_noise_data *data = &priv->chain_noise_data; 1377 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
1394 int ret; 1378 int ret;
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index 031d8e21f82..6633074258c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -30,11 +30,8 @@
30#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
31#include <net/mac80211.h> 31#include <net/mac80211.h>
32 32
33#include "iwl-eeprom.h" 33#include "dev.h"
34#include "iwl-dev.h" 34#include "agn.h"
35#include "iwl-io.h"
36#include "iwl-agn.h"
37#include "iwl-trans.h"
38 35
39/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after 36/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
40 * sending probe req. This should be set long enough to hear probe responses 37 * sending probe req. This should be set long enough to hear probe responses
@@ -54,6 +51,9 @@
54#define IWL_CHANNEL_TUNE_TIME 5 51#define IWL_CHANNEL_TUNE_TIME 5
55#define MAX_SCAN_CHANNEL 50 52#define MAX_SCAN_CHANNEL 50
56 53
54/* For reset radio, need minimal dwell time only */
55#define IWL_RADIO_RESET_DWELL_TIME 5
56
57static int iwl_send_scan_abort(struct iwl_priv *priv) 57static int iwl_send_scan_abort(struct iwl_priv *priv)
58{ 58{
59 int ret; 59 int ret;
@@ -67,7 +67,6 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
67 * to receive scan abort command or it does not perform 67 * to receive scan abort command or it does not perform
68 * hardware scan currently */ 68 * hardware scan currently */
69 if (!test_bit(STATUS_READY, &priv->status) || 69 if (!test_bit(STATUS_READY, &priv->status) ||
70 !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
71 !test_bit(STATUS_SCAN_HW, &priv->status) || 70 !test_bit(STATUS_SCAN_HW, &priv->status) ||
72 test_bit(STATUS_FW_ERROR, &priv->status)) 71 test_bit(STATUS_FW_ERROR, &priv->status))
73 return -EIO; 72 return -EIO;
@@ -101,11 +100,8 @@ static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
101 ieee80211_scan_completed(priv->hw, aborted); 100 ieee80211_scan_completed(priv->hw, aborted);
102 } 101 }
103 102
104 if (priv->scan_type == IWL_SCAN_ROC) { 103 if (priv->scan_type == IWL_SCAN_ROC)
105 ieee80211_remain_on_channel_expired(priv->hw); 104 iwl_scan_roc_expired(priv);
106 priv->hw_roc_channel = NULL;
107 schedule_delayed_work(&priv->hw_roc_disable_work, 10 * HZ);
108 }
109 105
110 priv->scan_type = IWL_SCAN_NORMAL; 106 priv->scan_type = IWL_SCAN_NORMAL;
111 priv->scan_vif = NULL; 107 priv->scan_vif = NULL;
@@ -134,11 +130,8 @@ static void iwl_process_scan_complete(struct iwl_priv *priv)
134 goto out_settings; 130 goto out_settings;
135 } 131 }
136 132
137 if (priv->scan_type == IWL_SCAN_ROC) { 133 if (priv->scan_type == IWL_SCAN_ROC)
138 ieee80211_remain_on_channel_expired(priv->hw); 134 iwl_scan_roc_expired(priv);
139 priv->hw_roc_channel = NULL;
140 schedule_delayed_work(&priv->hw_roc_disable_work, 10 * HZ);
141 }
142 135
143 if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) { 136 if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
144 int err; 137 int err;
@@ -453,27 +446,17 @@ static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
453 446
454/* Return valid, unused, channel for a passive scan to reset the RF */ 447/* Return valid, unused, channel for a passive scan to reset the RF */
455static u8 iwl_get_single_channel_number(struct iwl_priv *priv, 448static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
456 enum ieee80211_band band) 449 enum ieee80211_band band)
457{ 450{
458 const struct iwl_channel_info *ch_info; 451 struct ieee80211_supported_band *sband = priv->hw->wiphy->bands[band];
459 int i;
460 u8 channel = 0;
461 u8 min, max;
462 struct iwl_rxon_context *ctx; 452 struct iwl_rxon_context *ctx;
453 int i;
463 454
464 if (band == IEEE80211_BAND_5GHZ) { 455 for (i = 0; i < sband->n_channels; i++) {
465 min = 14;
466 max = priv->channel_count;
467 } else {
468 min = 0;
469 max = 14;
470 }
471
472 for (i = min; i < max; i++) {
473 bool busy = false; 456 bool busy = false;
474 457
475 for_each_context(priv, ctx) { 458 for_each_context(priv, ctx) {
476 busy = priv->channel_info[i].channel == 459 busy = sband->channels[i].hw_value ==
477 le16_to_cpu(ctx->staging.channel); 460 le16_to_cpu(ctx->staging.channel);
478 if (busy) 461 if (busy)
479 break; 462 break;
@@ -482,54 +465,46 @@ static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
482 if (busy) 465 if (busy)
483 continue; 466 continue;
484 467
485 channel = priv->channel_info[i].channel; 468 if (!(sband->channels[i].flags & IEEE80211_CHAN_DISABLED))
486 ch_info = iwl_get_channel_info(priv, band, channel); 469 return sband->channels[i].hw_value;
487 if (is_channel_valid(ch_info))
488 break;
489 } 470 }
490 471
491 return channel; 472 return 0;
492} 473}
493 474
494static int iwl_get_single_channel_for_scan(struct iwl_priv *priv, 475static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv,
495 struct ieee80211_vif *vif, 476 struct ieee80211_vif *vif,
496 enum ieee80211_band band, 477 enum ieee80211_band band,
497 struct iwl_scan_channel *scan_ch) 478 struct iwl_scan_channel *scan_ch)
498{ 479{
499 const struct ieee80211_supported_band *sband; 480 const struct ieee80211_supported_band *sband;
500 u16 passive_dwell = 0; 481 u16 channel;
501 u16 active_dwell = 0;
502 int added = 0;
503 u16 channel = 0;
504 482
505 sband = iwl_get_hw_mode(priv, band); 483 sband = iwl_get_hw_mode(priv, band);
506 if (!sband) { 484 if (!sband) {
507 IWL_ERR(priv, "invalid band\n"); 485 IWL_ERR(priv, "invalid band\n");
508 return added; 486 return 0;
509 } 487 }
510 488
511 active_dwell = iwl_get_active_dwell_time(priv, band, 0);
512 passive_dwell = iwl_get_passive_dwell_time(priv, band);
513
514 if (passive_dwell <= active_dwell)
515 passive_dwell = active_dwell + 1;
516
517 channel = iwl_get_single_channel_number(priv, band); 489 channel = iwl_get_single_channel_number(priv, band);
518 if (channel) { 490 if (channel) {
519 scan_ch->channel = cpu_to_le16(channel); 491 scan_ch->channel = cpu_to_le16(channel);
520 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; 492 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
521 scan_ch->active_dwell = cpu_to_le16(active_dwell); 493 scan_ch->active_dwell =
522 scan_ch->passive_dwell = cpu_to_le16(passive_dwell); 494 cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
495 scan_ch->passive_dwell =
496 cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
523 /* Set txpower levels to defaults */ 497 /* Set txpower levels to defaults */
524 scan_ch->dsp_atten = 110; 498 scan_ch->dsp_atten = 110;
525 if (band == IEEE80211_BAND_5GHZ) 499 if (band == IEEE80211_BAND_5GHZ)
526 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; 500 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
527 else 501 else
528 scan_ch->tx_gain = ((1 << 5) | (5 << 3)); 502 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
529 added++; 503 return 1;
530 } else 504 }
531 IWL_ERR(priv, "no valid channel found\n"); 505
532 return added; 506 IWL_ERR(priv, "no valid channel found\n");
507 return 0;
533} 508}
534 509
535static int iwl_get_channels_for_scan(struct iwl_priv *priv, 510static int iwl_get_channels_for_scan(struct iwl_priv *priv,
@@ -540,7 +515,6 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
540{ 515{
541 struct ieee80211_channel *chan; 516 struct ieee80211_channel *chan;
542 const struct ieee80211_supported_band *sband; 517 const struct ieee80211_supported_band *sband;
543 const struct iwl_channel_info *ch_info;
544 u16 passive_dwell = 0; 518 u16 passive_dwell = 0;
545 u16 active_dwell = 0; 519 u16 active_dwell = 0;
546 int added, i; 520 int added, i;
@@ -565,16 +539,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
565 channel = chan->hw_value; 539 channel = chan->hw_value;
566 scan_ch->channel = cpu_to_le16(channel); 540 scan_ch->channel = cpu_to_le16(channel);
567 541
568 ch_info = iwl_get_channel_info(priv, band, channel); 542 if (!is_active || (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
569 if (!is_channel_valid(ch_info)) {
570 IWL_DEBUG_SCAN(priv,
571 "Channel %d is INVALID for this band.\n",
572 channel);
573 continue;
574 }
575
576 if (!is_active || is_channel_passive(ch_info) ||
577 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
578 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; 543 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
579 else 544 else
580 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE; 545 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
@@ -678,12 +643,12 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
678 u16 rx_chain = 0; 643 u16 rx_chain = 0;
679 enum ieee80211_band band; 644 enum ieee80211_band band;
680 u8 n_probes = 0; 645 u8 n_probes = 0;
681 u8 rx_ant = priv->hw_params.valid_rx_ant; 646 u8 rx_ant = priv->eeprom_data->valid_rx_ant;
682 u8 rate; 647 u8 rate;
683 bool is_active = false; 648 bool is_active = false;
684 int chan_mod; 649 int chan_mod;
685 u8 active_chains; 650 u8 active_chains;
686 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant; 651 u8 scan_tx_antennas = priv->eeprom_data->valid_tx_ant;
687 int ret; 652 int ret;
688 int scan_cmd_size = sizeof(struct iwl_scan_cmd) + 653 int scan_cmd_size = sizeof(struct iwl_scan_cmd) +
689 MAX_SCAN_CHANNEL * sizeof(struct iwl_scan_channel) + 654 MAX_SCAN_CHANNEL * sizeof(struct iwl_scan_channel) +
@@ -755,6 +720,12 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
755 switch (priv->scan_type) { 720 switch (priv->scan_type) {
756 case IWL_SCAN_RADIO_RESET: 721 case IWL_SCAN_RADIO_RESET:
757 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n"); 722 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
723 /*
724 * Override quiet time as firmware checks that active
725 * dwell is >= quiet; since we use passive scan it'll
726 * not actually be used.
727 */
728 scan->quiet_time = cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
758 break; 729 break;
759 case IWL_SCAN_NORMAL: 730 case IWL_SCAN_NORMAL:
760 if (priv->scan_request->n_ssids) { 731 if (priv->scan_request->n_ssids) {
@@ -893,7 +864,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
893 864
894 /* MIMO is not used here, but value is required */ 865 /* MIMO is not used here, but value is required */
895 rx_chain |= 866 rx_chain |=
896 priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS; 867 priv->eeprom_data->valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
897 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; 868 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
898 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; 869 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
899 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; 870 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
@@ -928,7 +899,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
928 switch (priv->scan_type) { 899 switch (priv->scan_type) {
929 case IWL_SCAN_RADIO_RESET: 900 case IWL_SCAN_RADIO_RESET:
930 scan->channel_count = 901 scan->channel_count =
931 iwl_get_single_channel_for_scan(priv, vif, band, 902 iwl_get_channel_for_reset_scan(priv, vif, band,
932 (void *)&scan->data[cmd_len]); 903 (void *)&scan->data[cmd_len]);
933 break; 904 break;
934 case IWL_SCAN_NORMAL: 905 case IWL_SCAN_NORMAL:
@@ -994,8 +965,10 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
994 set_bit(STATUS_SCAN_HW, &priv->status); 965 set_bit(STATUS_SCAN_HW, &priv->status);
995 966
996 ret = iwlagn_set_pan_params(priv); 967 ret = iwlagn_set_pan_params(priv);
997 if (ret) 968 if (ret) {
969 clear_bit(STATUS_SCAN_HW, &priv->status);
998 return ret; 970 return ret;
971 }
999 972
1000 ret = iwl_dvm_send_cmd(priv, &cmd); 973 ret = iwl_dvm_send_cmd(priv, &cmd);
1001 if (ret) { 974 if (ret) {
@@ -1008,7 +981,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1008 981
1009void iwl_init_scan_params(struct iwl_priv *priv) 982void iwl_init_scan_params(struct iwl_priv *priv)
1010{ 983{
1011 u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1; 984 u8 ant_idx = fls(priv->eeprom_data->valid_tx_ant) - 1;
1012 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ]) 985 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
1013 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; 986 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
1014 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) 987 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
@@ -1158,3 +1131,40 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
1158 mutex_unlock(&priv->mutex); 1131 mutex_unlock(&priv->mutex);
1159 } 1132 }
1160} 1133}
1134
1135void iwl_scan_roc_expired(struct iwl_priv *priv)
1136{
1137 /*
1138 * The status bit should be set here, to prevent a race
1139 * where the atomic_read returns 1, but before the execution continues
1140 * iwl_scan_offchannel_skb_status() checks if the status bit is set
1141 */
1142 set_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status);
1143
1144 if (atomic_read(&priv->num_aux_in_flight) == 0) {
1145 ieee80211_remain_on_channel_expired(priv->hw);
1146 priv->hw_roc_channel = NULL;
1147 schedule_delayed_work(&priv->hw_roc_disable_work,
1148 10 * HZ);
1149
1150 clear_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status);
1151 } else {
1152 IWL_DEBUG_SCAN(priv, "ROC done with %d frames in aux\n",
1153 atomic_read(&priv->num_aux_in_flight));
1154 }
1155}
1156
1157void iwl_scan_offchannel_skb(struct iwl_priv *priv)
1158{
1159 WARN_ON(!priv->hw_roc_start_notified);
1160 atomic_inc(&priv->num_aux_in_flight);
1161}
1162
1163void iwl_scan_offchannel_skb_status(struct iwl_priv *priv)
1164{
1165 if (atomic_dec_return(&priv->num_aux_in_flight) == 0 &&
1166 test_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status)) {
1167 IWL_DEBUG_SCAN(priv, "0 aux frames. Calling ROC expired\n");
1168 iwl_scan_roc_expired(priv);
1169 }
1170}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index eb6a8eaf42f..b29b798f755 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -28,10 +28,9 @@
28 *****************************************************************************/ 28 *****************************************************************************/
29#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
30#include <net/mac80211.h> 30#include <net/mac80211.h>
31
32#include "iwl-dev.h"
33#include "iwl-agn.h"
34#include "iwl-trans.h" 31#include "iwl-trans.h"
32#include "dev.h"
33#include "agn.h"
35 34
36const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 35const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
37 36
@@ -171,26 +170,6 @@ int iwl_send_add_sta(struct iwl_priv *priv,
171 return cmd.handler_status; 170 return cmd.handler_status;
172} 171}
173 172
174static bool iwl_is_channel_extension(struct iwl_priv *priv,
175 enum ieee80211_band band,
176 u16 channel, u8 extension_chan_offset)
177{
178 const struct iwl_channel_info *ch_info;
179
180 ch_info = iwl_get_channel_info(priv, band, channel);
181 if (!is_channel_valid(ch_info))
182 return false;
183
184 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
185 return !(ch_info->ht40_extension_channel &
186 IEEE80211_CHAN_NO_HT40PLUS);
187 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
188 return !(ch_info->ht40_extension_channel &
189 IEEE80211_CHAN_NO_HT40MINUS);
190
191 return false;
192}
193
194bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv, 173bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
195 struct iwl_rxon_context *ctx, 174 struct iwl_rxon_context *ctx,
196 struct ieee80211_sta_ht_cap *ht_cap) 175 struct ieee80211_sta_ht_cap *ht_cap)
@@ -198,21 +177,25 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
198 if (!ctx->ht.enabled || !ctx->ht.is_40mhz) 177 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
199 return false; 178 return false;
200 179
180#ifdef CONFIG_IWLWIFI_DEBUGFS
181 if (priv->disable_ht40)
182 return false;
183#endif
184
201 /* 185 /*
202 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40 186 * Remainder of this function checks ht_cap, but if it's
203 * the bit will not set if it is pure 40MHz case 187 * NULL then we can do HT40 (special case for RXON)
204 */ 188 */
205 if (ht_cap && !ht_cap->ht_supported) 189 if (!ht_cap)
190 return true;
191
192 if (!ht_cap->ht_supported)
206 return false; 193 return false;
207 194
208#ifdef CONFIG_IWLWIFI_DEBUGFS 195 if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
209 if (priv->disable_ht40)
210 return false; 196 return false;
211#endif
212 197
213 return iwl_is_channel_extension(priv, priv->band, 198 return true;
214 le16_to_cpu(ctx->staging.channel),
215 ctx->ht.extension_chan_offset);
216} 199}
217 200
218static void iwl_sta_calc_ht_flags(struct iwl_priv *priv, 201static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
@@ -236,6 +219,7 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
236 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2; 219 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
237 220
238 IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n", 221 IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n",
222 sta->addr,
239 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? 223 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
240 "static" : 224 "static" :
241 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? 225 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
@@ -649,23 +633,23 @@ static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
649 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE) 633 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
650 rate_flags |= RATE_MCS_CCK_MSK; 634 rate_flags |= RATE_MCS_CCK_MSK;
651 635
652 rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) << 636 rate_flags |= first_antenna(priv->eeprom_data->valid_tx_ant) <<
653 RATE_MCS_ANT_POS; 637 RATE_MCS_ANT_POS;
654 rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags); 638 rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
655 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) 639 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
656 link_cmd->rs_table[i].rate_n_flags = rate_n_flags; 640 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
657 641
658 link_cmd->general_params.single_stream_ant_msk = 642 link_cmd->general_params.single_stream_ant_msk =
659 first_antenna(priv->hw_params.valid_tx_ant); 643 first_antenna(priv->eeprom_data->valid_tx_ant);
660 644
661 link_cmd->general_params.dual_stream_ant_msk = 645 link_cmd->general_params.dual_stream_ant_msk =
662 priv->hw_params.valid_tx_ant & 646 priv->eeprom_data->valid_tx_ant &
663 ~first_antenna(priv->hw_params.valid_tx_ant); 647 ~first_antenna(priv->eeprom_data->valid_tx_ant);
664 if (!link_cmd->general_params.dual_stream_ant_msk) { 648 if (!link_cmd->general_params.dual_stream_ant_msk) {
665 link_cmd->general_params.dual_stream_ant_msk = ANT_AB; 649 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
666 } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) { 650 } else if (num_of_ant(priv->eeprom_data->valid_tx_ant) == 2) {
667 link_cmd->general_params.dual_stream_ant_msk = 651 link_cmd->general_params.dual_stream_ant_msk =
668 priv->hw_params.valid_tx_ant; 652 priv->eeprom_data->valid_tx_ant;
669 } 653 }
670 654
671 link_cmd->agg_params.agg_dis_start_th = 655 link_cmd->agg_params.agg_dis_start_th =
diff --git a/drivers/net/wireless/iwlwifi/dvm/testmode.c b/drivers/net/wireless/iwlwifi/dvm/testmode.c
new file mode 100644
index 00000000000..57b918ce3b5
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/dvm/testmode.c
@@ -0,0 +1,471 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/init.h>
65#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/dma-mapping.h>
68#include <net/net_namespace.h>
69#include <linux/netdevice.h>
70#include <net/cfg80211.h>
71#include <net/mac80211.h>
72#include <net/netlink.h>
73
74#include "iwl-debug.h"
75#include "iwl-trans.h"
76#include "dev.h"
77#include "agn.h"
78#include "iwl-test.h"
79#include "iwl-testmode.h"
80
81static int iwl_testmode_send_cmd(struct iwl_op_mode *op_mode,
82 struct iwl_host_cmd *cmd)
83{
84 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
85 return iwl_dvm_send_cmd(priv, cmd);
86}
87
88static bool iwl_testmode_valid_hw_addr(u32 addr)
89{
90 if (iwlagn_hw_valid_rtc_data_addr(addr))
91 return true;
92
93 if (IWLAGN_RTC_INST_LOWER_BOUND <= addr &&
94 addr < IWLAGN_RTC_INST_UPPER_BOUND)
95 return true;
96
97 return false;
98}
99
100static u32 iwl_testmode_get_fw_ver(struct iwl_op_mode *op_mode)
101{
102 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
103 return priv->fw->ucode_ver;
104}
105
106static struct sk_buff*
107iwl_testmode_alloc_reply(struct iwl_op_mode *op_mode, int len)
108{
109 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
110 return cfg80211_testmode_alloc_reply_skb(priv->hw->wiphy, len);
111}
112
113static int iwl_testmode_reply(struct iwl_op_mode *op_mode, struct sk_buff *skb)
114{
115 return cfg80211_testmode_reply(skb);
116}
117
118static struct sk_buff *iwl_testmode_alloc_event(struct iwl_op_mode *op_mode,
119 int len)
120{
121 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
122 return cfg80211_testmode_alloc_event_skb(priv->hw->wiphy, len,
123 GFP_ATOMIC);
124}
125
126static void iwl_testmode_event(struct iwl_op_mode *op_mode, struct sk_buff *skb)
127{
128 return cfg80211_testmode_event(skb, GFP_ATOMIC);
129}
130
131static struct iwl_test_ops tst_ops = {
132 .send_cmd = iwl_testmode_send_cmd,
133 .valid_hw_addr = iwl_testmode_valid_hw_addr,
134 .get_fw_ver = iwl_testmode_get_fw_ver,
135 .alloc_reply = iwl_testmode_alloc_reply,
136 .reply = iwl_testmode_reply,
137 .alloc_event = iwl_testmode_alloc_event,
138 .event = iwl_testmode_event,
139};
140
141void iwl_testmode_init(struct iwl_priv *priv)
142{
143 iwl_test_init(&priv->tst, priv->trans, &tst_ops);
144}
145
146void iwl_testmode_free(struct iwl_priv *priv)
147{
148 iwl_test_free(&priv->tst);
149}
150
151static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
152{
153 struct iwl_notification_wait calib_wait;
154 static const u8 calib_complete[] = {
155 CALIBRATION_COMPLETE_NOTIFICATION
156 };
157 int ret;
158
159 iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
160 calib_complete, ARRAY_SIZE(calib_complete),
161 NULL, NULL);
162 ret = iwl_init_alive_start(priv);
163 if (ret) {
164 IWL_ERR(priv, "Fail init calibration: %d\n", ret);
165 goto cfg_init_calib_error;
166 }
167
168 ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, 2 * HZ);
169 if (ret)
170 IWL_ERR(priv, "Error detecting"
171 " CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
172 return ret;
173
174cfg_init_calib_error:
175 iwl_remove_notification(&priv->notif_wait, &calib_wait);
176 return ret;
177}
178
179/*
180 * This function handles the user application commands for driver.
181 *
182 * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
183 * handlers respectively.
184 *
185 * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
186 * value of the actual command execution is replied to the user application.
187 *
188 * If there's any message responding to the user space, IWL_TM_ATTR_SYNC_RSP
189 * is used for carry the message while IWL_TM_ATTR_COMMAND must set to
190 * IWL_TM_CMD_DEV2APP_SYNC_RSP.
191 *
192 * @hw: ieee80211_hw object that represents the device
193 * @tb: gnl message fields from the user space
194 */
195static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
196{
197 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
198 struct iwl_trans *trans = priv->trans;
199 struct sk_buff *skb;
200 unsigned char *rsp_data_ptr = NULL;
201 int status = 0, rsp_data_len = 0;
202 u32 inst_size = 0, data_size = 0;
203 const struct fw_img *img;
204
205 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
206 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
207 rsp_data_ptr = (unsigned char *)priv->cfg->name;
208 rsp_data_len = strlen(priv->cfg->name);
209 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
210 rsp_data_len + 20);
211 if (!skb) {
212 IWL_ERR(priv, "Memory allocation fail\n");
213 return -ENOMEM;
214 }
215 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
216 IWL_TM_CMD_DEV2APP_SYNC_RSP) ||
217 nla_put(skb, IWL_TM_ATTR_SYNC_RSP,
218 rsp_data_len, rsp_data_ptr))
219 goto nla_put_failure;
220 status = cfg80211_testmode_reply(skb);
221 if (status < 0)
222 IWL_ERR(priv, "Error sending msg : %d\n", status);
223 break;
224
225 case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
226 status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
227 if (status)
228 IWL_ERR(priv, "Error loading init ucode: %d\n", status);
229 break;
230
231 case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
232 iwl_testmode_cfg_init_calib(priv);
233 priv->ucode_loaded = false;
234 iwl_trans_stop_device(trans);
235 break;
236
237 case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
238 status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
239 if (status) {
240 IWL_ERR(priv,
241 "Error loading runtime ucode: %d\n", status);
242 break;
243 }
244 status = iwl_alive_start(priv);
245 if (status)
246 IWL_ERR(priv,
247 "Error starting the device: %d\n", status);
248 break;
249
250 case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
251 iwl_scan_cancel_timeout(priv, 200);
252 priv->ucode_loaded = false;
253 iwl_trans_stop_device(trans);
254 status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
255 if (status) {
256 IWL_ERR(priv,
257 "Error loading WOWLAN ucode: %d\n", status);
258 break;
259 }
260 status = iwl_alive_start(priv);
261 if (status)
262 IWL_ERR(priv,
263 "Error starting the device: %d\n", status);
264 break;
265
266 case IWL_TM_CMD_APP2DEV_GET_EEPROM:
267 if (priv->eeprom_blob) {
268 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
269 priv->eeprom_blob_size + 20);
270 if (!skb) {
271 IWL_ERR(priv, "Memory allocation fail\n");
272 return -ENOMEM;
273 }
274 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
275 IWL_TM_CMD_DEV2APP_EEPROM_RSP) ||
276 nla_put(skb, IWL_TM_ATTR_EEPROM,
277 priv->eeprom_blob_size,
278 priv->eeprom_blob))
279 goto nla_put_failure;
280 status = cfg80211_testmode_reply(skb);
281 if (status < 0)
282 IWL_ERR(priv, "Error sending msg : %d\n",
283 status);
284 } else
285 return -ENODATA;
286 break;
287
288 case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
289 if (!tb[IWL_TM_ATTR_FIXRATE]) {
290 IWL_ERR(priv, "Missing fixrate setting\n");
291 return -ENOMSG;
292 }
293 priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
294 break;
295
296 case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
297 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20 + 8);
298 if (!skb) {
299 IWL_ERR(priv, "Memory allocation fail\n");
300 return -ENOMEM;
301 }
302 if (!priv->ucode_loaded) {
303 IWL_ERR(priv, "No uCode has not been loaded\n");
304 return -EINVAL;
305 } else {
306 img = &priv->fw->img[priv->cur_ucode];
307 inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
308 data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
309 }
310 if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode) ||
311 nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) ||
312 nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size))
313 goto nla_put_failure;
314 status = cfg80211_testmode_reply(skb);
315 if (status < 0)
316 IWL_ERR(priv, "Error sending msg : %d\n", status);
317 break;
318
319 default:
320 IWL_ERR(priv, "Unknown testmode driver command ID\n");
321 return -ENOSYS;
322 }
323 return status;
324
325nla_put_failure:
326 kfree_skb(skb);
327 return -EMSGSIZE;
328}
329
330/*
331 * This function handles the user application switch ucode ownership.
332 *
333 * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_OWNER and
334 * decide who the current owner of the uCode
335 *
336 * If the current owner is OWNERSHIP_TM, then the only host command
337 * can deliver to uCode is from testmode, all the other host commands
338 * will dropped.
339 *
340 * default driver is the owner of uCode in normal operational mode
341 *
342 * @hw: ieee80211_hw object that represents the device
343 * @tb: gnl message fields from the user space
344 */
345static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
346{
347 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
348 u8 owner;
349
350 if (!tb[IWL_TM_ATTR_UCODE_OWNER]) {
351 IWL_ERR(priv, "Missing ucode owner\n");
352 return -ENOMSG;
353 }
354
355 owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
356 if (owner == IWL_OWNERSHIP_DRIVER) {
357 priv->ucode_owner = owner;
358 iwl_test_enable_notifications(&priv->tst, false);
359 } else if (owner == IWL_OWNERSHIP_TM) {
360 priv->ucode_owner = owner;
361 iwl_test_enable_notifications(&priv->tst, true);
362 } else {
363 IWL_ERR(priv, "Invalid owner\n");
364 return -EINVAL;
365 }
366 return 0;
367}
368
369/* The testmode gnl message handler that takes the gnl message from the
370 * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
371 * invoke the corresponding handlers.
372 *
373 * This function is invoked when there is user space application sending
374 * gnl message through the testmode tunnel NL80211_CMD_TESTMODE regulated
375 * by nl80211.
376 *
377 * It retrieves the mandatory field, IWL_TM_ATTR_COMMAND, before
378 * dispatching it to the corresponding handler.
379 *
380 * If IWL_TM_ATTR_COMMAND is missing, -ENOMSG is replied to user application;
381 * -ENOSYS is replied to the user application if the command is unknown;
382 * Otherwise, the command is dispatched to the respective handler.
383 *
384 * @hw: ieee80211_hw object that represents the device
385 * @data: pointer to user space message
386 * @len: length in byte of @data
387 */
388int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
389{
390 struct nlattr *tb[IWL_TM_ATTR_MAX];
391 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
392 int result;
393
394 result = iwl_test_parse(&priv->tst, tb, data, len);
395 if (result)
396 return result;
397
398 /* in case multiple accesses to the device happens */
399 mutex_lock(&priv->mutex);
400 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
401 case IWL_TM_CMD_APP2DEV_UCODE:
402 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
403 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
404 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
405 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
406 case IWL_TM_CMD_APP2DEV_END_TRACE:
407 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
408 case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
409 case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
410 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
411 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
412 result = iwl_test_handle_cmd(&priv->tst, tb);
413 break;
414
415 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
416 case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
417 case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
418 case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
419 case IWL_TM_CMD_APP2DEV_GET_EEPROM:
420 case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
421 case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
422 case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
423 IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
424 result = iwl_testmode_driver(hw, tb);
425 break;
426
427 case IWL_TM_CMD_APP2DEV_OWNERSHIP:
428 IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n");
429 result = iwl_testmode_ownership(hw, tb);
430 break;
431
432 default:
433 IWL_ERR(priv, "Unknown testmode command\n");
434 result = -ENOSYS;
435 break;
436 }
437 mutex_unlock(&priv->mutex);
438
439 if (result)
440 IWL_ERR(priv, "Test cmd failed result=%d\n", result);
441 return result;
442}
443
444int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
445 struct netlink_callback *cb,
446 void *data, int len)
447{
448 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
449 int result;
450 u32 cmd;
451
452 if (cb->args[3]) {
453 /* offset by 1 since commands start at 0 */
454 cmd = cb->args[3] - 1;
455 } else {
456 struct nlattr *tb[IWL_TM_ATTR_MAX];
457
458 result = iwl_test_parse(&priv->tst, tb, data, len);
459 if (result)
460 return result;
461
462 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
463 cb->args[3] = cmd + 1;
464 }
465
466 /* in case multiple accesses to the device happens */
467 mutex_lock(&priv->mutex);
468 result = iwl_test_dump(&priv->tst, cmd, skb, cb);
469 mutex_unlock(&priv->mutex);
470 return result;
471}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c
index a5cfe0aceed..eb864433e59 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.c
@@ -31,17 +31,14 @@
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/init.h> 33#include <linux/init.h>
34
35#include <net/mac80211.h> 34#include <net/mac80211.h>
36
37#include "iwl-agn.h"
38#include "iwl-eeprom.h"
39#include "iwl-dev.h"
40#include "iwl-io.h" 35#include "iwl-io.h"
41#include "iwl-commands.h"
42#include "iwl-debug.h"
43#include "iwl-agn-tt.h"
44#include "iwl-modparams.h" 36#include "iwl-modparams.h"
37#include "iwl-debug.h"
38#include "agn.h"
39#include "dev.h"
40#include "commands.h"
41#include "tt.h"
45 42
46/* default Thermal Throttling transaction table 43/* default Thermal Throttling transaction table
47 * Current state | Throttling Down | Throttling Up 44 * Current state | Throttling Down | Throttling Up
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.h b/drivers/net/wireless/iwlwifi/dvm/tt.h
index 86bbf47501c..44c7c8f30a2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.h
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.h
@@ -28,7 +28,7 @@
28#ifndef __iwl_tt_setting_h__ 28#ifndef __iwl_tt_setting_h__
29#define __iwl_tt_setting_h__ 29#define __iwl_tt_setting_h__
30 30
31#include "iwl-commands.h" 31#include "commands.h"
32 32
33#define IWL_ABSOLUTE_ZERO 0 33#define IWL_ABSOLUTE_ZERO 0
34#define IWL_ABSOLUTE_MAX 0xFFFFFFFF 34#define IWL_ABSOLUTE_MAX 0xFFFFFFFF
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 3366e2e2f00..5971a23aa47 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -32,12 +32,11 @@
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/sched.h> 33#include <linux/sched.h>
34#include <linux/ieee80211.h> 34#include <linux/ieee80211.h>
35
36#include "iwl-dev.h"
37#include "iwl-io.h" 35#include "iwl-io.h"
38#include "iwl-agn-hw.h"
39#include "iwl-agn.h"
40#include "iwl-trans.h" 36#include "iwl-trans.h"
37#include "iwl-agn-hw.h"
38#include "dev.h"
39#include "agn.h"
41 40
42static const u8 tid_to_ac[] = { 41static const u8 tid_to_ac[] = {
43 IEEE80211_AC_BE, 42 IEEE80211_AC_BE,
@@ -187,7 +186,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
187 rate_idx = info->control.rates[0].idx; 186 rate_idx = info->control.rates[0].idx;
188 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || 187 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
189 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) 188 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
190 rate_idx = rate_lowest_index(&priv->bands[info->band], 189 rate_idx = rate_lowest_index(
190 &priv->eeprom_data->bands[info->band],
191 info->control.sta); 191 info->control.sta);
192 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ 192 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
193 if (info->band == IEEE80211_BAND_5GHZ) 193 if (info->band == IEEE80211_BAND_5GHZ)
@@ -207,10 +207,11 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
207 priv->bt_full_concurrent) { 207 priv->bt_full_concurrent) {
208 /* operated as 1x1 in full concurrency mode */ 208 /* operated as 1x1 in full concurrency mode */
209 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 209 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
210 first_antenna(priv->hw_params.valid_tx_ant)); 210 first_antenna(priv->eeprom_data->valid_tx_ant));
211 } else 211 } else
212 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 212 priv->mgmt_tx_ant = iwl_toggle_tx_ant(
213 priv->hw_params.valid_tx_ant); 213 priv, priv->mgmt_tx_ant,
214 priv->eeprom_data->valid_tx_ant);
214 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); 215 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
215 216
216 /* Set the rate in the TX cmd */ 217 /* Set the rate in the TX cmd */
@@ -296,7 +297,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
296 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 297 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
297 struct iwl_station_priv *sta_priv = NULL; 298 struct iwl_station_priv *sta_priv = NULL;
298 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 299 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
299 struct iwl_device_cmd *dev_cmd = NULL; 300 struct iwl_device_cmd *dev_cmd;
300 struct iwl_tx_cmd *tx_cmd; 301 struct iwl_tx_cmd *tx_cmd;
301 __le16 fc; 302 __le16 fc;
302 u8 hdr_len; 303 u8 hdr_len;
@@ -378,7 +379,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
378 if (info->flags & IEEE80211_TX_CTL_AMPDU) 379 if (info->flags & IEEE80211_TX_CTL_AMPDU)
379 is_agg = true; 380 is_agg = true;
380 381
381 dev_cmd = kmem_cache_alloc(iwl_tx_cmd_pool, GFP_ATOMIC); 382 dev_cmd = iwl_trans_alloc_tx_cmd(priv->trans);
382 383
383 if (unlikely(!dev_cmd)) 384 if (unlikely(!dev_cmd))
384 goto drop_unlock_priv; 385 goto drop_unlock_priv;
@@ -402,6 +403,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
402 403
403 info->driver_data[0] = ctx; 404 info->driver_data[0] = ctx;
404 info->driver_data[1] = dev_cmd; 405 info->driver_data[1] = dev_cmd;
406 /* From now on, we cannot access info->control */
405 407
406 spin_lock(&priv->sta_lock); 408 spin_lock(&priv->sta_lock);
407 409
@@ -486,11 +488,14 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
486 if (sta_priv && sta_priv->client && !is_agg) 488 if (sta_priv && sta_priv->client && !is_agg)
487 atomic_inc(&sta_priv->pending_frames); 489 atomic_inc(&sta_priv->pending_frames);
488 490
491 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
492 iwl_scan_offchannel_skb(priv);
493
489 return 0; 494 return 0;
490 495
491drop_unlock_sta: 496drop_unlock_sta:
492 if (dev_cmd) 497 if (dev_cmd)
493 kmem_cache_free(iwl_tx_cmd_pool, dev_cmd); 498 iwl_trans_free_tx_cmd(priv->trans, dev_cmd);
494 spin_unlock(&priv->sta_lock); 499 spin_unlock(&priv->sta_lock);
495drop_unlock_priv: 500drop_unlock_priv:
496 return -1; 501 return -1;
@@ -597,7 +602,7 @@ turn_off:
597 * time, or we hadn't time to drain the AC queues. 602 * time, or we hadn't time to drain the AC queues.
598 */ 603 */
599 if (agg_state == IWL_AGG_ON) 604 if (agg_state == IWL_AGG_ON)
600 iwl_trans_tx_agg_disable(priv->trans, txq_id); 605 iwl_trans_txq_disable(priv->trans, txq_id);
601 else 606 else
602 IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n", 607 IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
603 agg_state); 608 agg_state);
@@ -686,9 +691,8 @@ int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
686 691
687 fifo = ctx->ac_to_fifo[tid_to_ac[tid]]; 692 fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
688 693
689 iwl_trans_tx_agg_setup(priv->trans, q, fifo, 694 iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid,
690 sta_priv->sta_id, tid, 695 buf_size, ssn);
691 buf_size, ssn);
692 696
693 /* 697 /*
694 * If the limit is 0, then it wasn't initialised yet, 698 * If the limit is 0, then it wasn't initialised yet,
@@ -753,8 +757,8 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
753 IWL_DEBUG_TX_QUEUES(priv, 757 IWL_DEBUG_TX_QUEUES(priv,
754 "Can continue DELBA flow ssn = next_recl =" 758 "Can continue DELBA flow ssn = next_recl ="
755 " %d", tid_data->next_reclaimed); 759 " %d", tid_data->next_reclaimed);
756 iwl_trans_tx_agg_disable(priv->trans, 760 iwl_trans_txq_disable(priv->trans,
757 tid_data->agg.txq_id); 761 tid_data->agg.txq_id);
758 iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id); 762 iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
759 tid_data->agg.state = IWL_AGG_OFF; 763 tid_data->agg.state = IWL_AGG_OFF;
760 ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid); 764 ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
@@ -1136,6 +1140,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1136 struct sk_buff *skb; 1140 struct sk_buff *skb;
1137 struct iwl_rxon_context *ctx; 1141 struct iwl_rxon_context *ctx;
1138 bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE); 1142 bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
1143 bool is_offchannel_skb;
1139 1144
1140 tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >> 1145 tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
1141 IWLAGN_TX_RES_TID_POS; 1146 IWLAGN_TX_RES_TID_POS;
@@ -1149,6 +1154,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1149 1154
1150 __skb_queue_head_init(&skbs); 1155 __skb_queue_head_init(&skbs);
1151 1156
1157 is_offchannel_skb = false;
1158
1152 if (tx_resp->frame_count == 1) { 1159 if (tx_resp->frame_count == 1) {
1153 u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl); 1160 u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
1154 next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10); 1161 next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10);
@@ -1176,7 +1183,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1176 } 1183 }
1177 1184
1178 /*we can free until ssn % q.n_bd not inclusive */ 1185 /*we can free until ssn % q.n_bd not inclusive */
1179 WARN_ON(iwl_reclaim(priv, sta_id, tid, txq_id, ssn, &skbs)); 1186 WARN_ON_ONCE(iwl_reclaim(priv, sta_id, tid,
1187 txq_id, ssn, &skbs));
1180 iwlagn_check_ratid_empty(priv, sta_id, tid); 1188 iwlagn_check_ratid_empty(priv, sta_id, tid);
1181 freed = 0; 1189 freed = 0;
1182 1190
@@ -1189,8 +1197,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1189 1197
1190 info = IEEE80211_SKB_CB(skb); 1198 info = IEEE80211_SKB_CB(skb);
1191 ctx = info->driver_data[0]; 1199 ctx = info->driver_data[0];
1192 kmem_cache_free(iwl_tx_cmd_pool, 1200 iwl_trans_free_tx_cmd(priv->trans,
1193 (info->driver_data[1])); 1201 info->driver_data[1]);
1194 1202
1195 memset(&info->status, 0, sizeof(info->status)); 1203 memset(&info->status, 0, sizeof(info->status));
1196 1204
@@ -1225,10 +1233,19 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1225 if (!is_agg) 1233 if (!is_agg)
1226 iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); 1234 iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
1227 1235
1236 is_offchannel_skb =
1237 (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
1228 freed++; 1238 freed++;
1229 } 1239 }
1230 1240
1231 WARN_ON(!is_agg && freed != 1); 1241 WARN_ON(!is_agg && freed != 1);
1242
1243 /*
1244 * An offchannel frame can be send only on the AUX queue, where
1245 * there is no aggregation (and reordering) so it only is single
1246 * skb is expected to be processed.
1247 */
1248 WARN_ON(is_offchannel_skb && freed != 1);
1232 } 1249 }
1233 1250
1234 iwl_check_abort_status(priv, tx_resp->frame_count, status); 1251 iwl_check_abort_status(priv, tx_resp->frame_count, status);
@@ -1239,6 +1256,9 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1239 ieee80211_tx_status(priv->hw, skb); 1256 ieee80211_tx_status(priv->hw, skb);
1240 } 1257 }
1241 1258
1259 if (is_offchannel_skb)
1260 iwl_scan_offchannel_skb_status(priv);
1261
1242 return 0; 1262 return 0;
1243} 1263}
1244 1264
@@ -1341,7 +1361,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1341 WARN_ON_ONCE(1); 1361 WARN_ON_ONCE(1);
1342 1362
1343 info = IEEE80211_SKB_CB(skb); 1363 info = IEEE80211_SKB_CB(skb);
1344 kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1])); 1364 iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
1345 1365
1346 if (freed == 1) { 1366 if (freed == 1) {
1347 /* this is the first skb we deliver in this batch */ 1367 /* this is the first skb we deliver in this batch */
diff --git a/drivers/net/wireless/iwlwifi/iwl-ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index bc40dc68b0f..b3a314ba48c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -30,15 +30,16 @@
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/init.h> 31#include <linux/init.h>
32 32
33#include "iwl-dev.h"
34#include "iwl-io.h" 33#include "iwl-io.h"
35#include "iwl-agn-hw.h" 34#include "iwl-agn-hw.h"
36#include "iwl-agn.h"
37#include "iwl-agn-calib.h"
38#include "iwl-trans.h" 35#include "iwl-trans.h"
39#include "iwl-fh.h" 36#include "iwl-fh.h"
40#include "iwl-op-mode.h" 37#include "iwl-op-mode.h"
41 38
39#include "dev.h"
40#include "agn.h"
41#include "calib.h"
42
42/****************************************************************************** 43/******************************************************************************
43 * 44 *
44 * uCode download functions 45 * uCode download functions
@@ -60,8 +61,7 @@ iwl_get_ucode_image(struct iwl_priv *priv, enum iwl_ucode_type ucode_type)
60static int iwl_set_Xtal_calib(struct iwl_priv *priv) 61static int iwl_set_Xtal_calib(struct iwl_priv *priv)
61{ 62{
62 struct iwl_calib_xtal_freq_cmd cmd; 63 struct iwl_calib_xtal_freq_cmd cmd;
63 __le16 *xtal_calib = 64 __le16 *xtal_calib = priv->eeprom_data->xtal_calib;
64 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
65 65
66 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD); 66 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
67 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]); 67 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
@@ -72,12 +72,10 @@ static int iwl_set_Xtal_calib(struct iwl_priv *priv)
72static int iwl_set_temperature_offset_calib(struct iwl_priv *priv) 72static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
73{ 73{
74 struct iwl_calib_temperature_offset_cmd cmd; 74 struct iwl_calib_temperature_offset_cmd cmd;
75 __le16 *offset_calib =
76 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
77 75
78 memset(&cmd, 0, sizeof(cmd)); 76 memset(&cmd, 0, sizeof(cmd));
79 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); 77 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
80 memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib)); 78 cmd.radio_sensor_offset = priv->eeprom_data->raw_temperature;
81 if (!(cmd.radio_sensor_offset)) 79 if (!(cmd.radio_sensor_offset))
82 cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; 80 cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
83 81
@@ -89,27 +87,17 @@ static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
89static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv) 87static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv)
90{ 88{
91 struct iwl_calib_temperature_offset_v2_cmd cmd; 89 struct iwl_calib_temperature_offset_v2_cmd cmd;
92 __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv,
93 EEPROM_KELVIN_TEMPERATURE);
94 __le16 *offset_calib_low =
95 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
96 struct iwl_eeprom_calib_hdr *hdr;
97 90
98 memset(&cmd, 0, sizeof(cmd)); 91 memset(&cmd, 0, sizeof(cmd));
99 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); 92 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
100 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv, 93 cmd.radio_sensor_offset_high = priv->eeprom_data->kelvin_temperature;
101 EEPROM_CALIB_ALL); 94 cmd.radio_sensor_offset_low = priv->eeprom_data->raw_temperature;
102 memcpy(&cmd.radio_sensor_offset_high, offset_calib_high, 95 if (!cmd.radio_sensor_offset_low) {
103 sizeof(*offset_calib_high));
104 memcpy(&cmd.radio_sensor_offset_low, offset_calib_low,
105 sizeof(*offset_calib_low));
106 if (!(cmd.radio_sensor_offset_low)) {
107 IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n"); 96 IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n");
108 cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET; 97 cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET;
109 cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET; 98 cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET;
110 } 99 }
111 memcpy(&cmd.burntVoltageRef, &hdr->voltage, 100 cmd.burntVoltageRef = priv->eeprom_data->calib_voltage;
112 sizeof(hdr->voltage));
113 101
114 IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n", 102 IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n",
115 le16_to_cpu(cmd.radio_sensor_offset_high)); 103 le16_to_cpu(cmd.radio_sensor_offset_high));
@@ -177,7 +165,7 @@ int iwl_init_alive_start(struct iwl_priv *priv)
177 return 0; 165 return 0;
178} 166}
179 167
180int iwl_send_wimax_coex(struct iwl_priv *priv) 168static int iwl_send_wimax_coex(struct iwl_priv *priv)
181{ 169{
182 struct iwl_wimax_coex_cmd coex_cmd; 170 struct iwl_wimax_coex_cmd coex_cmd;
183 171
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 67b28aa7f9b..10e47938b63 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -113,7 +113,7 @@ enum iwl_led_mode {
113#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE 0 113#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE 0
114 114
115/* TX queue watchdog timeouts in mSecs */ 115/* TX queue watchdog timeouts in mSecs */
116#define IWL_WATCHHDOG_DISABLED 0 116#define IWL_WATCHDOG_DISABLED 0
117#define IWL_DEF_WD_TIMEOUT 2000 117#define IWL_DEF_WD_TIMEOUT 2000
118#define IWL_LONG_WD_TIMEOUT 10000 118#define IWL_LONG_WD_TIMEOUT 10000
119#define IWL_MAX_WD_TIMEOUT 120000 119#define IWL_MAX_WD_TIMEOUT 120000
@@ -143,7 +143,7 @@ enum iwl_led_mode {
143 * @chain_noise_scale: default chain noise scale used for gain computation 143 * @chain_noise_scale: default chain noise scale used for gain computation
144 * @wd_timeout: TX queues watchdog timeout 144 * @wd_timeout: TX queues watchdog timeout
145 * @max_event_log_size: size of event log buffer size for ucode event logging 145 * @max_event_log_size: size of event log buffer size for ucode event logging
146 * @shadow_reg_enable: HW shadhow register bit 146 * @shadow_reg_enable: HW shadow register support
147 * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up 147 * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
148 * @no_idle_support: do not support idle mode 148 * @no_idle_support: do not support idle mode
149 */ 149 */
@@ -182,13 +182,34 @@ struct iwl_bt_params {
182 bool bt_sco_disable; 182 bool bt_sco_disable;
183 bool bt_session_2; 183 bool bt_session_2;
184}; 184};
185
185/* 186/*
186 * @use_rts_for_aggregation: use rts/cts protection for HT traffic 187 * @use_rts_for_aggregation: use rts/cts protection for HT traffic
188 * @ht40_bands: bitmap of bands (using %IEEE80211_BAND_*) that support HT40
187 */ 189 */
188struct iwl_ht_params { 190struct iwl_ht_params {
191 enum ieee80211_smps_mode smps_mode;
189 const bool ht_greenfield_support; /* if used set to true */ 192 const bool ht_greenfield_support; /* if used set to true */
190 bool use_rts_for_aggregation; 193 bool use_rts_for_aggregation;
191 enum ieee80211_smps_mode smps_mode; 194 u8 ht40_bands;
195};
196
197/*
198 * information on how to parse the EEPROM
199 */
200#define EEPROM_REG_BAND_1_CHANNELS 0x08
201#define EEPROM_REG_BAND_2_CHANNELS 0x26
202#define EEPROM_REG_BAND_3_CHANNELS 0x42
203#define EEPROM_REG_BAND_4_CHANNELS 0x5C
204#define EEPROM_REG_BAND_5_CHANNELS 0x74
205#define EEPROM_REG_BAND_24_HT40_CHANNELS 0x82
206#define EEPROM_REG_BAND_52_HT40_CHANNELS 0x92
207#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS 0x80
208#define EEPROM_REGULATORY_BAND_NO_HT40 0
209
210struct iwl_eeprom_params {
211 const u8 regulatory_bands[7];
212 bool enhanced_txpower;
192}; 213};
193 214
194/** 215/**
@@ -243,6 +264,7 @@ struct iwl_cfg {
243 /* params likely to change within a device family */ 264 /* params likely to change within a device family */
244 const struct iwl_ht_params *ht_params; 265 const struct iwl_ht_params *ht_params;
245 const struct iwl_bt_params *bt_params; 266 const struct iwl_bt_params *bt_params;
267 const struct iwl_eeprom_params *eeprom_params;
246 const bool need_temp_offset_calib; /* if used set to true */ 268 const bool need_temp_offset_calib; /* if used set to true */
247 const bool no_xtal_calib; 269 const bool no_xtal_calib;
248 enum iwl_led_mode led_mode; 270 enum iwl_led_mode led_mode;
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 59750543fce..34a5287dfc2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -97,13 +97,10 @@
97/* 97/*
98 * Hardware revision info 98 * Hardware revision info
99 * Bit fields: 99 * Bit fields:
100 * 31-8: Reserved 100 * 31-16: Reserved
101 * 7-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions 101 * 15-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions
102 * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D 102 * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D
103 * 1-0: "Dash" (-) value, as in A-1, etc. 103 * 1-0: "Dash" (-) value, as in A-1, etc.
104 *
105 * NOTE: Revision step affects calculation of CCK txpower for 4965.
106 * NOTE: See also CSR_HW_REV_WA_REG (work-around for bug in 4965).
107 */ 104 */
108#define CSR_HW_REV (CSR_BASE+0x028) 105#define CSR_HW_REV (CSR_BASE+0x028)
109 106
@@ -155,9 +152,21 @@
155#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250) 152#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250)
156 153
157/* Bits for CSR_HW_IF_CONFIG_REG */ 154/* Bits for CSR_HW_IF_CONFIG_REG */
158#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00) 155#define CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH (0x00000003)
159#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100) 156#define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP (0x0000000C)
157#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0)
158#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
160#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200) 159#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
160#define CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE (0x00000C00)
161#define CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH (0x00003000)
162#define CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP (0x0000C000)
163
164#define CSR_HW_IF_CONFIG_REG_POS_MAC_DASH (0)
165#define CSR_HW_IF_CONFIG_REG_POS_MAC_STEP (2)
166#define CSR_HW_IF_CONFIG_REG_POS_BOARD_VER (6)
167#define CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE (10)
168#define CSR_HW_IF_CONFIG_REG_POS_PHY_DASH (12)
169#define CSR_HW_IF_CONFIG_REG_POS_PHY_STEP (14)
161 170
162#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000) 171#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
163#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) 172#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
@@ -270,7 +279,10 @@
270 279
271 280
272/* HW REV */ 281/* HW REV */
273#define CSR_HW_REV_TYPE_MSK (0x00001F0) 282#define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0)
283#define CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2)
284
285#define CSR_HW_REV_TYPE_MSK (0x000FFF0)
274#define CSR_HW_REV_TYPE_5300 (0x0000020) 286#define CSR_HW_REV_TYPE_5300 (0x0000020)
275#define CSR_HW_REV_TYPE_5350 (0x0000030) 287#define CSR_HW_REV_TYPE_5350 (0x0000030)
276#define CSR_HW_REV_TYPE_5100 (0x0000050) 288#define CSR_HW_REV_TYPE_5100 (0x0000050)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.c b/drivers/net/wireless/iwlwifi/iwl-debug.c
index 2d1b42847b9..0f8fcd1d4fe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.c
@@ -62,6 +62,7 @@
62 *****************************************************************************/ 62 *****************************************************************************/
63 63
64#include <linux/interrupt.h> 64#include <linux/interrupt.h>
65#include <linux/export.h>
65#include "iwl-debug.h" 66#include "iwl-debug.h"
66#include "iwl-devtrace.h" 67#include "iwl-devtrace.h"
67 68
@@ -81,8 +82,11 @@ void __iwl_ ##fn(struct device *dev, const char *fmt, ...) \
81} 82}
82 83
83__iwl_fn(warn) 84__iwl_fn(warn)
85EXPORT_SYMBOL_GPL(__iwl_warn);
84__iwl_fn(info) 86__iwl_fn(info)
87EXPORT_SYMBOL_GPL(__iwl_info);
85__iwl_fn(crit) 88__iwl_fn(crit)
89EXPORT_SYMBOL_GPL(__iwl_crit);
86 90
87void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only, 91void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only,
88 const char *fmt, ...) 92 const char *fmt, ...)
@@ -103,6 +107,7 @@ void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only,
103 trace_iwlwifi_err(&vaf); 107 trace_iwlwifi_err(&vaf);
104 va_end(args); 108 va_end(args);
105} 109}
110EXPORT_SYMBOL_GPL(__iwl_err);
106 111
107#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING) 112#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
108void __iwl_dbg(struct device *dev, 113void __iwl_dbg(struct device *dev,
@@ -125,4 +130,5 @@ void __iwl_dbg(struct device *dev,
125 trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf); 130 trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf);
126 va_end(args); 131 va_end(args);
127} 132}
133EXPORT_SYMBOL_GPL(__iwl_dbg);
128#endif 134#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 8376b842bdb..42b20b0e83b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -38,13 +38,14 @@ static inline bool iwl_have_debug_level(u32 level)
38} 38}
39 39
40void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace, 40void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace,
41 const char *fmt, ...); 41 const char *fmt, ...) __printf(4, 5);
42void __iwl_warn(struct device *dev, const char *fmt, ...); 42void __iwl_warn(struct device *dev, const char *fmt, ...) __printf(2, 3);
43void __iwl_info(struct device *dev, const char *fmt, ...); 43void __iwl_info(struct device *dev, const char *fmt, ...) __printf(2, 3);
44void __iwl_crit(struct device *dev, const char *fmt, ...); 44void __iwl_crit(struct device *dev, const char *fmt, ...) __printf(2, 3);
45 45
46/* No matter what is m (priv, bus, trans), this will work */ 46/* No matter what is m (priv, bus, trans), this will work */
47#define IWL_ERR(m, f, a...) __iwl_err((m)->dev, false, false, f, ## a) 47#define IWL_ERR(m, f, a...) __iwl_err((m)->dev, false, false, f, ## a)
48#define IWL_ERR_DEV(d, f, a...) __iwl_err((d), false, false, f, ## a)
48#define IWL_WARN(m, f, a...) __iwl_warn((m)->dev, f, ## a) 49#define IWL_WARN(m, f, a...) __iwl_warn((m)->dev, f, ## a)
49#define IWL_INFO(m, f, a...) __iwl_info((m)->dev, f, ## a) 50#define IWL_INFO(m, f, a...) __iwl_info((m)->dev, f, ## a)
50#define IWL_CRIT(m, f, a...) __iwl_crit((m)->dev, f, ## a) 51#define IWL_CRIT(m, f, a...) __iwl_crit((m)->dev, f, ## a)
@@ -52,9 +53,9 @@ void __iwl_crit(struct device *dev, const char *fmt, ...);
52#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING) 53#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
53void __iwl_dbg(struct device *dev, 54void __iwl_dbg(struct device *dev,
54 u32 level, bool limit, const char *function, 55 u32 level, bool limit, const char *function,
55 const char *fmt, ...); 56 const char *fmt, ...) __printf(5, 6);
56#else 57#else
57static inline void 58__printf(5, 6) static inline void
58__iwl_dbg(struct device *dev, 59__iwl_dbg(struct device *dev,
59 u32 level, bool limit, const char *function, 60 u32 level, bool limit, const char *function,
60 const char *fmt, ...) 61 const char *fmt, ...)
@@ -69,6 +70,8 @@ do { \
69 70
70#define IWL_DEBUG(m, level, fmt, args...) \ 71#define IWL_DEBUG(m, level, fmt, args...) \
71 __iwl_dbg((m)->dev, level, false, __func__, fmt, ##args) 72 __iwl_dbg((m)->dev, level, false, __func__, fmt, ##args)
73#define IWL_DEBUG_DEV(dev, level, fmt, args...) \
74 __iwl_dbg((dev), level, false, __func__, fmt, ##args)
72#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \ 75#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
73 __iwl_dbg((m)->dev, level, true, __func__, fmt, ##args) 76 __iwl_dbg((m)->dev, level, true, __func__, fmt, ##args)
74 77
@@ -153,7 +156,7 @@ do { \
153#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a) 156#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
154#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a) 157#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
155#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a) 158#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
156#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a) 159#define IWL_DEBUG_EEPROM(d, f, a...) IWL_DEBUG_DEV(d, IWL_DL_EEPROM, f, ## a)
157#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a) 160#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
158#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a) 161#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
159#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a) 162#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 91f45e71e0a..70191ddbd8f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -42,4 +42,9 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
42EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error); 42EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
43EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event); 43EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
44EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event); 44EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event);
45EXPORT_TRACEPOINT_SYMBOL(iwlwifi_info);
46EXPORT_TRACEPOINT_SYMBOL(iwlwifi_warn);
47EXPORT_TRACEPOINT_SYMBOL(iwlwifi_crit);
48EXPORT_TRACEPOINT_SYMBOL(iwlwifi_err);
49EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dbg);
45#endif 50#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 06203d6a1d8..65364793021 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -28,6 +28,7 @@
28#define __IWLWIFI_DEVICE_TRACE 28#define __IWLWIFI_DEVICE_TRACE
29 29
30#include <linux/tracepoint.h> 30#include <linux/tracepoint.h>
31#include <linux/device.h>
31 32
32 33
33#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__) 34#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index fac67a526a3..a175997e782 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -77,8 +77,33 @@
77/* private includes */ 77/* private includes */
78#include "iwl-fw-file.h" 78#include "iwl-fw-file.h"
79 79
80/******************************************************************************
81 *
82 * module boiler plate
83 *
84 ******************************************************************************/
85
86/*
87 * module name, copyright, version, etc.
88 */
89#define DRV_DESCRIPTION "Intel(R) Wireless WiFi driver for Linux"
90
91#ifdef CONFIG_IWLWIFI_DEBUG
92#define VD "d"
93#else
94#define VD
95#endif
96
97#define DRV_VERSION IWLWIFI_VERSION VD
98
99MODULE_DESCRIPTION(DRV_DESCRIPTION);
100MODULE_VERSION(DRV_VERSION);
101MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
102MODULE_LICENSE("GPL");
103
80/** 104/**
81 * struct iwl_drv - drv common data 105 * struct iwl_drv - drv common data
106 * @list: list of drv structures using this opmode
82 * @fw: the iwl_fw structure 107 * @fw: the iwl_fw structure
83 * @op_mode: the running op_mode 108 * @op_mode: the running op_mode
84 * @trans: transport layer 109 * @trans: transport layer
@@ -89,6 +114,7 @@
89 * @request_firmware_complete: the firmware has been obtained from user space 114 * @request_firmware_complete: the firmware has been obtained from user space
90 */ 115 */
91struct iwl_drv { 116struct iwl_drv {
117 struct list_head list;
92 struct iwl_fw fw; 118 struct iwl_fw fw;
93 119
94 struct iwl_op_mode *op_mode; 120 struct iwl_op_mode *op_mode;
@@ -102,7 +128,19 @@ struct iwl_drv {
102 struct completion request_firmware_complete; 128 struct completion request_firmware_complete;
103}; 129};
104 130
105 131#define DVM_OP_MODE 0
132#define MVM_OP_MODE 1
133
134/* Protects the table contents, i.e. the ops pointer & drv list */
135static struct mutex iwlwifi_opmode_table_mtx;
136static struct iwlwifi_opmode_table {
137 const char *name; /* name: iwldvm, iwlmvm, etc */
138 const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */
139 struct list_head drv; /* list of devices using this op_mode */
140} iwlwifi_opmode_table[] = { /* ops set when driver is initialized */
141 { .name = "iwldvm", .ops = NULL },
142 { .name = "iwlmvm", .ops = NULL },
143};
106 144
107/* 145/*
108 * struct fw_sec: Just for the image parsing proccess. 146 * struct fw_sec: Just for the image parsing proccess.
@@ -721,7 +759,6 @@ static int validate_sec_sizes(struct iwl_drv *drv,
721 return 0; 759 return 0;
722} 760}
723 761
724
725/** 762/**
726 * iwl_ucode_callback - callback when firmware was loaded 763 * iwl_ucode_callback - callback when firmware was loaded
727 * 764 *
@@ -733,6 +770,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
733 struct iwl_drv *drv = context; 770 struct iwl_drv *drv = context;
734 struct iwl_fw *fw = &drv->fw; 771 struct iwl_fw *fw = &drv->fw;
735 struct iwl_ucode_header *ucode; 772 struct iwl_ucode_header *ucode;
773 struct iwlwifi_opmode_table *op;
736 int err; 774 int err;
737 struct iwl_firmware_pieces pieces; 775 struct iwl_firmware_pieces pieces;
738 const unsigned int api_max = drv->cfg->ucode_api_max; 776 const unsigned int api_max = drv->cfg->ucode_api_max;
@@ -740,6 +778,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
740 const unsigned int api_min = drv->cfg->ucode_api_min; 778 const unsigned int api_min = drv->cfg->ucode_api_min;
741 u32 api_ver; 779 u32 api_ver;
742 int i; 780 int i;
781 bool load_module = false;
743 782
744 fw->ucode_capa.max_probe_length = 200; 783 fw->ucode_capa.max_probe_length = 200;
745 fw->ucode_capa.standard_phy_calibration_size = 784 fw->ucode_capa.standard_phy_calibration_size =
@@ -862,10 +901,24 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
862 /* We have our copies now, allow OS release its copies */ 901 /* We have our copies now, allow OS release its copies */
863 release_firmware(ucode_raw); 902 release_firmware(ucode_raw);
864 903
865 drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw); 904 mutex_lock(&iwlwifi_opmode_table_mtx);
905 op = &iwlwifi_opmode_table[DVM_OP_MODE];
866 906
867 if (!drv->op_mode) 907 /* add this device to the list of devices using this op_mode */
868 goto out_unbind; 908 list_add_tail(&drv->list, &op->drv);
909
910 if (op->ops) {
911 const struct iwl_op_mode_ops *ops = op->ops;
912 drv->op_mode = ops->start(drv->trans, drv->cfg, &drv->fw);
913
914 if (!drv->op_mode) {
915 mutex_unlock(&iwlwifi_opmode_table_mtx);
916 goto out_unbind;
917 }
918 } else {
919 load_module = true;
920 }
921 mutex_unlock(&iwlwifi_opmode_table_mtx);
869 922
870 /* 923 /*
871 * Complete the firmware request last so that 924 * Complete the firmware request last so that
@@ -873,6 +926,14 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
873 * are doing the start() above. 926 * are doing the start() above.
874 */ 927 */
875 complete(&drv->request_firmware_complete); 928 complete(&drv->request_firmware_complete);
929
930 /*
931 * Load the module last so we don't block anything
932 * else from proceeding if the module fails to load
933 * or hangs loading.
934 */
935 if (load_module)
936 request_module("%s", op->name);
876 return; 937 return;
877 938
878 try_again: 939 try_again:
@@ -906,6 +967,7 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
906 drv->cfg = cfg; 967 drv->cfg = cfg;
907 968
908 init_completion(&drv->request_firmware_complete); 969 init_completion(&drv->request_firmware_complete);
970 INIT_LIST_HEAD(&drv->list);
909 971
910 ret = iwl_request_firmware(drv, true); 972 ret = iwl_request_firmware(drv, true);
911 973
@@ -928,6 +990,16 @@ void iwl_drv_stop(struct iwl_drv *drv)
928 990
929 iwl_dealloc_ucode(drv); 991 iwl_dealloc_ucode(drv);
930 992
993 mutex_lock(&iwlwifi_opmode_table_mtx);
994 /*
995 * List is empty (this item wasn't added)
996 * when firmware loading failed -- in that
997 * case we can't remove it from any list.
998 */
999 if (!list_empty(&drv->list))
1000 list_del(&drv->list);
1001 mutex_unlock(&iwlwifi_opmode_table_mtx);
1002
931 kfree(drv); 1003 kfree(drv);
932} 1004}
933 1005
@@ -943,6 +1015,75 @@ struct iwl_mod_params iwlwifi_mod_params = {
943 .auto_agg = true, 1015 .auto_agg = true,
944 /* the rest are 0 by default */ 1016 /* the rest are 0 by default */
945}; 1017};
1018EXPORT_SYMBOL_GPL(iwlwifi_mod_params);
1019
1020int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
1021{
1022 int i;
1023 struct iwl_drv *drv;
1024
1025 mutex_lock(&iwlwifi_opmode_table_mtx);
1026 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
1027 if (strcmp(iwlwifi_opmode_table[i].name, name))
1028 continue;
1029 iwlwifi_opmode_table[i].ops = ops;
1030 list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
1031 drv->op_mode = ops->start(drv->trans, drv->cfg,
1032 &drv->fw);
1033 mutex_unlock(&iwlwifi_opmode_table_mtx);
1034 return 0;
1035 }
1036 mutex_unlock(&iwlwifi_opmode_table_mtx);
1037 return -EIO;
1038}
1039EXPORT_SYMBOL_GPL(iwl_opmode_register);
1040
1041void iwl_opmode_deregister(const char *name)
1042{
1043 int i;
1044 struct iwl_drv *drv;
1045
1046 mutex_lock(&iwlwifi_opmode_table_mtx);
1047 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
1048 if (strcmp(iwlwifi_opmode_table[i].name, name))
1049 continue;
1050 iwlwifi_opmode_table[i].ops = NULL;
1051
1052 /* call the stop routine for all devices */
1053 list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) {
1054 if (drv->op_mode) {
1055 iwl_op_mode_stop(drv->op_mode);
1056 drv->op_mode = NULL;
1057 }
1058 }
1059 mutex_unlock(&iwlwifi_opmode_table_mtx);
1060 return;
1061 }
1062 mutex_unlock(&iwlwifi_opmode_table_mtx);
1063}
1064EXPORT_SYMBOL_GPL(iwl_opmode_deregister);
1065
1066static int __init iwl_drv_init(void)
1067{
1068 int i;
1069
1070 mutex_init(&iwlwifi_opmode_table_mtx);
1071
1072 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++)
1073 INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv);
1074
1075 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
1076 pr_info(DRV_COPYRIGHT "\n");
1077
1078 return iwl_pci_register_driver();
1079}
1080module_init(iwl_drv_init);
1081
1082static void __exit iwl_drv_exit(void)
1083{
1084 iwl_pci_unregister_driver();
1085}
1086module_exit(iwl_drv_exit);
946 1087
947#ifdef CONFIG_IWLWIFI_DEBUG 1088#ifdef CONFIG_IWLWIFI_DEBUG
948module_param_named(debug, iwlwifi_mod_params.debug_level, uint, 1089module_param_named(debug, iwlwifi_mod_params.debug_level, uint,
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
new file mode 100644
index 00000000000..f10170fe879
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -0,0 +1,903 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62#include <linux/types.h>
63#include <linux/slab.h>
64#include <linux/export.h>
65#include "iwl-modparams.h"
66#include "iwl-eeprom-parse.h"
67
68/* EEPROM offset definitions */
69
70/* indirect access definitions */
71#define ADDRESS_MSK 0x0000FFFF
72#define INDIRECT_TYPE_MSK 0x000F0000
73#define INDIRECT_HOST 0x00010000
74#define INDIRECT_GENERAL 0x00020000
75#define INDIRECT_REGULATORY 0x00030000
76#define INDIRECT_CALIBRATION 0x00040000
77#define INDIRECT_PROCESS_ADJST 0x00050000
78#define INDIRECT_OTHERS 0x00060000
79#define INDIRECT_TXP_LIMIT 0x00070000
80#define INDIRECT_TXP_LIMIT_SIZE 0x00080000
81#define INDIRECT_ADDRESS 0x00100000
82
83/* corresponding link offsets in EEPROM */
84#define EEPROM_LINK_HOST (2*0x64)
85#define EEPROM_LINK_GENERAL (2*0x65)
86#define EEPROM_LINK_REGULATORY (2*0x66)
87#define EEPROM_LINK_CALIBRATION (2*0x67)
88#define EEPROM_LINK_PROCESS_ADJST (2*0x68)
89#define EEPROM_LINK_OTHERS (2*0x69)
90#define EEPROM_LINK_TXP_LIMIT (2*0x6a)
91#define EEPROM_LINK_TXP_LIMIT_SIZE (2*0x6b)
92
93/* General */
94#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
95#define EEPROM_SUBSYSTEM_ID (2*0x0A) /* 2 bytes */
96#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
97#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
98#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
99#define EEPROM_VERSION (2*0x44) /* 2 bytes */
100#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
101#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
102#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
103#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
104
105/* calibration */
106struct iwl_eeprom_calib_hdr {
107 u8 version;
108 u8 pa_type;
109 __le16 voltage;
110} __packed;
111
112#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
113#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL)
114
115/* temperature */
116#define EEPROM_KELVIN_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
117#define EEPROM_RAW_TEMPERATURE ((2*0x12B) | EEPROM_CALIB_ALL)
118
119/*
120 * EEPROM bands
121 * These are the channel numbers from each band in the order
122 * that they are stored in the EEPROM band information. Note
123 * that EEPROM bands aren't the same as mac80211 bands, and
124 * there are even special "ht40 bands" in the EEPROM.
125 */
126static const u8 iwl_eeprom_band_1[14] = { /* 2.4 GHz */
127 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
128};
129
130static const u8 iwl_eeprom_band_2[] = { /* 4915-5080MHz */
131 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
132};
133
134static const u8 iwl_eeprom_band_3[] = { /* 5170-5320MHz */
135 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
136};
137
138static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */
139 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
140};
141
142static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */
143 145, 149, 153, 157, 161, 165
144};
145
146static const u8 iwl_eeprom_band_6[] = { /* 2.4 ht40 channel */
147 1, 2, 3, 4, 5, 6, 7
148};
149
150static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
151 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
152};
153
154#define IWL_NUM_CHANNELS (ARRAY_SIZE(iwl_eeprom_band_1) + \
155 ARRAY_SIZE(iwl_eeprom_band_2) + \
156 ARRAY_SIZE(iwl_eeprom_band_3) + \
157 ARRAY_SIZE(iwl_eeprom_band_4) + \
158 ARRAY_SIZE(iwl_eeprom_band_5))
159
160/* rate data (static) */
161static struct ieee80211_rate iwl_cfg80211_rates[] = {
162 { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
163 { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1,
164 .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
165 { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2,
166 .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
167 { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3,
168 .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
169 { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, },
170 { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, },
171 { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, },
172 { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, },
173 { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, },
174 { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, },
175 { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, },
176 { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, },
177};
178#define RATES_24_OFFS 0
179#define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates)
180#define RATES_52_OFFS 4
181#define N_RATES_52 (N_RATES_24 - RATES_52_OFFS)
182
183/* EEPROM reading functions */
184
185static u16 iwl_eeprom_query16(const u8 *eeprom, size_t eeprom_size, int offset)
186{
187 if (WARN_ON(offset + sizeof(u16) > eeprom_size))
188 return 0;
189 return le16_to_cpup((__le16 *)(eeprom + offset));
190}
191
192static u32 eeprom_indirect_address(const u8 *eeprom, size_t eeprom_size,
193 u32 address)
194{
195 u16 offset = 0;
196
197 if ((address & INDIRECT_ADDRESS) == 0)
198 return address;
199
200 switch (address & INDIRECT_TYPE_MSK) {
201 case INDIRECT_HOST:
202 offset = iwl_eeprom_query16(eeprom, eeprom_size,
203 EEPROM_LINK_HOST);
204 break;
205 case INDIRECT_GENERAL:
206 offset = iwl_eeprom_query16(eeprom, eeprom_size,
207 EEPROM_LINK_GENERAL);
208 break;
209 case INDIRECT_REGULATORY:
210 offset = iwl_eeprom_query16(eeprom, eeprom_size,
211 EEPROM_LINK_REGULATORY);
212 break;
213 case INDIRECT_TXP_LIMIT:
214 offset = iwl_eeprom_query16(eeprom, eeprom_size,
215 EEPROM_LINK_TXP_LIMIT);
216 break;
217 case INDIRECT_TXP_LIMIT_SIZE:
218 offset = iwl_eeprom_query16(eeprom, eeprom_size,
219 EEPROM_LINK_TXP_LIMIT_SIZE);
220 break;
221 case INDIRECT_CALIBRATION:
222 offset = iwl_eeprom_query16(eeprom, eeprom_size,
223 EEPROM_LINK_CALIBRATION);
224 break;
225 case INDIRECT_PROCESS_ADJST:
226 offset = iwl_eeprom_query16(eeprom, eeprom_size,
227 EEPROM_LINK_PROCESS_ADJST);
228 break;
229 case INDIRECT_OTHERS:
230 offset = iwl_eeprom_query16(eeprom, eeprom_size,
231 EEPROM_LINK_OTHERS);
232 break;
233 default:
234 WARN_ON(1);
235 break;
236 }
237
238 /* translate the offset from words to byte */
239 return (address & ADDRESS_MSK) + (offset << 1);
240}
241
242static const u8 *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size,
243 u32 offset)
244{
245 u32 address = eeprom_indirect_address(eeprom, eeprom_size, offset);
246
247 if (WARN_ON(address >= eeprom_size))
248 return NULL;
249
250 return &eeprom[address];
251}
252
253static int iwl_eeprom_read_calib(const u8 *eeprom, size_t eeprom_size,
254 struct iwl_eeprom_data *data)
255{
256 struct iwl_eeprom_calib_hdr *hdr;
257
258 hdr = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size,
259 EEPROM_CALIB_ALL);
260 if (!hdr)
261 return -ENODATA;
262 data->calib_version = hdr->version;
263 data->calib_voltage = hdr->voltage;
264
265 return 0;
266}
267
268/**
269 * enum iwl_eeprom_channel_flags - channel flags in EEPROM
270 * @EEPROM_CHANNEL_VALID: channel is usable for this SKU/geo
271 * @EEPROM_CHANNEL_IBSS: usable as an IBSS channel
272 * @EEPROM_CHANNEL_ACTIVE: active scanning allowed
273 * @EEPROM_CHANNEL_RADAR: radar detection required
274 * @EEPROM_CHANNEL_WIDE: 20 MHz channel okay (?)
275 * @EEPROM_CHANNEL_DFS: dynamic freq selection candidate
276 */
277enum iwl_eeprom_channel_flags {
278 EEPROM_CHANNEL_VALID = BIT(0),
279 EEPROM_CHANNEL_IBSS = BIT(1),
280 EEPROM_CHANNEL_ACTIVE = BIT(3),
281 EEPROM_CHANNEL_RADAR = BIT(4),
282 EEPROM_CHANNEL_WIDE = BIT(5),
283 EEPROM_CHANNEL_DFS = BIT(7),
284};
285
286/**
287 * struct iwl_eeprom_channel - EEPROM channel data
288 * @flags: %EEPROM_CHANNEL_* flags
289 * @max_power_avg: max power (in dBm) on this channel, at most 31 dBm
290 */
291struct iwl_eeprom_channel {
292 u8 flags;
293 s8 max_power_avg;
294} __packed;
295
296
297enum iwl_eeprom_enhanced_txpwr_flags {
298 IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0),
299 IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1),
300 IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2),
301 IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3),
302 IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4),
303 IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5),
304 IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6),
305 IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7),
306};
307
308/**
309 * iwl_eeprom_enhanced_txpwr structure
310 * @flags: entry flags
311 * @channel: channel number
312 * @chain_a_max_pwr: chain a max power in 1/2 dBm
313 * @chain_b_max_pwr: chain b max power in 1/2 dBm
314 * @chain_c_max_pwr: chain c max power in 1/2 dBm
315 * @delta_20_in_40: 20-in-40 deltas (hi/lo)
316 * @mimo2_max_pwr: mimo2 max power in 1/2 dBm
317 * @mimo3_max_pwr: mimo3 max power in 1/2 dBm
318 *
319 * This structure presents the enhanced regulatory tx power limit layout
320 * in an EEPROM image.
321 */
322struct iwl_eeprom_enhanced_txpwr {
323 u8 flags;
324 u8 channel;
325 s8 chain_a_max;
326 s8 chain_b_max;
327 s8 chain_c_max;
328 u8 delta_20_in_40;
329 s8 mimo2_max;
330 s8 mimo3_max;
331} __packed;
332
333static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_eeprom_data *data,
334 struct iwl_eeprom_enhanced_txpwr *txp)
335{
336 s8 result = 0; /* (.5 dBm) */
337
338 /* Take the highest tx power from any valid chains */
339 if (data->valid_tx_ant & ANT_A && txp->chain_a_max > result)
340 result = txp->chain_a_max;
341
342 if (data->valid_tx_ant & ANT_B && txp->chain_b_max > result)
343 result = txp->chain_b_max;
344
345 if (data->valid_tx_ant & ANT_C && txp->chain_c_max > result)
346 result = txp->chain_c_max;
347
348 if ((data->valid_tx_ant == ANT_AB ||
349 data->valid_tx_ant == ANT_BC ||
350 data->valid_tx_ant == ANT_AC) && txp->mimo2_max > result)
351 result = txp->mimo2_max;
352
353 if (data->valid_tx_ant == ANT_ABC && txp->mimo3_max > result)
354 result = txp->mimo3_max;
355
356 return result;
357}
358
359#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
360#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr)
361#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE)
362
363#define TXP_CHECK_AND_PRINT(x) \
364 ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) ? # x " " : "")
365
366static void
367iwl_eeprom_enh_txp_read_element(struct iwl_eeprom_data *data,
368 struct iwl_eeprom_enhanced_txpwr *txp,
369 int n_channels, s8 max_txpower_avg)
370{
371 int ch_idx;
372 enum ieee80211_band band;
373
374 band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
375 IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
376
377 for (ch_idx = 0; ch_idx < n_channels; ch_idx++) {
378 struct ieee80211_channel *chan = &data->channels[ch_idx];
379
380 /* update matching channel or from common data only */
381 if (txp->channel != 0 && chan->hw_value != txp->channel)
382 continue;
383
384 /* update matching band only */
385 if (band != chan->band)
386 continue;
387
388 if (chan->max_power < max_txpower_avg &&
389 !(txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ))
390 chan->max_power = max_txpower_avg;
391 }
392}
393
394static void iwl_eeprom_enhanced_txpower(struct device *dev,
395 struct iwl_eeprom_data *data,
396 const u8 *eeprom, size_t eeprom_size,
397 int n_channels)
398{
399 struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
400 int idx, entries;
401 __le16 *txp_len;
402 s8 max_txp_avg_halfdbm;
403
404 BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
405
406 /* the length is in 16-bit words, but we want entries */
407 txp_len = (__le16 *)iwl_eeprom_query_addr(eeprom, eeprom_size,
408 EEPROM_TXP_SZ_OFFS);
409 entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
410
411 txp_array = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size,
412 EEPROM_TXP_OFFS);
413
414 for (idx = 0; idx < entries; idx++) {
415 txp = &txp_array[idx];
416 /* skip invalid entries */
417 if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID))
418 continue;
419
420 IWL_DEBUG_EEPROM(dev, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n",
421 (txp->channel && (txp->flags &
422 IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ?
423 "Common " : (txp->channel) ?
424 "Channel" : "Common",
425 (txp->channel),
426 TXP_CHECK_AND_PRINT(VALID),
427 TXP_CHECK_AND_PRINT(BAND_52G),
428 TXP_CHECK_AND_PRINT(OFDM),
429 TXP_CHECK_AND_PRINT(40MHZ),
430 TXP_CHECK_AND_PRINT(HT_AP),
431 TXP_CHECK_AND_PRINT(RES1),
432 TXP_CHECK_AND_PRINT(RES2),
433 TXP_CHECK_AND_PRINT(COMMON_TYPE),
434 txp->flags);
435 IWL_DEBUG_EEPROM(dev,
436 "\t\t chain_A: 0x%02x chain_B: 0X%02x chain_C: 0X%02x\n",
437 txp->chain_a_max, txp->chain_b_max,
438 txp->chain_c_max);
439 IWL_DEBUG_EEPROM(dev,
440 "\t\t MIMO2: 0x%02x MIMO3: 0x%02x High 20_on_40: 0x%02x Low 20_on_40: 0x%02x\n",
441 txp->mimo2_max, txp->mimo3_max,
442 ((txp->delta_20_in_40 & 0xf0) >> 4),
443 (txp->delta_20_in_40 & 0x0f));
444
445 max_txp_avg_halfdbm = iwl_get_max_txpwr_half_dbm(data, txp);
446
447 iwl_eeprom_enh_txp_read_element(data, txp, n_channels,
448 DIV_ROUND_UP(max_txp_avg_halfdbm, 2));
449
450 if (max_txp_avg_halfdbm > data->max_tx_pwr_half_dbm)
451 data->max_tx_pwr_half_dbm = max_txp_avg_halfdbm;
452 }
453}
454
455static void iwl_init_band_reference(const struct iwl_cfg *cfg,
456 const u8 *eeprom, size_t eeprom_size,
457 int eeprom_band, int *eeprom_ch_count,
458 const struct iwl_eeprom_channel **ch_info,
459 const u8 **eeprom_ch_array)
460{
461 u32 offset = cfg->eeprom_params->regulatory_bands[eeprom_band - 1];
462
463 offset |= INDIRECT_ADDRESS | INDIRECT_REGULATORY;
464
465 *ch_info = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, offset);
466
467 switch (eeprom_band) {
468 case 1: /* 2.4GHz band */
469 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
470 *eeprom_ch_array = iwl_eeprom_band_1;
471 break;
472 case 2: /* 4.9GHz band */
473 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
474 *eeprom_ch_array = iwl_eeprom_band_2;
475 break;
476 case 3: /* 5.2GHz band */
477 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
478 *eeprom_ch_array = iwl_eeprom_band_3;
479 break;
480 case 4: /* 5.5GHz band */
481 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
482 *eeprom_ch_array = iwl_eeprom_band_4;
483 break;
484 case 5: /* 5.7GHz band */
485 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
486 *eeprom_ch_array = iwl_eeprom_band_5;
487 break;
488 case 6: /* 2.4GHz ht40 channels */
489 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
490 *eeprom_ch_array = iwl_eeprom_band_6;
491 break;
492 case 7: /* 5 GHz ht40 channels */
493 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
494 *eeprom_ch_array = iwl_eeprom_band_7;
495 break;
496 default:
497 *eeprom_ch_count = 0;
498 *eeprom_ch_array = NULL;
499 WARN_ON(1);
500 }
501}
502
503#define CHECK_AND_PRINT(x) \
504 ((eeprom_ch->flags & EEPROM_CHANNEL_##x) ? # x " " : "")
505
506static void iwl_mod_ht40_chan_info(struct device *dev,
507 struct iwl_eeprom_data *data, int n_channels,
508 enum ieee80211_band band, u16 channel,
509 const struct iwl_eeprom_channel *eeprom_ch,
510 u8 clear_ht40_extension_channel)
511{
512 struct ieee80211_channel *chan = NULL;
513 int i;
514
515 for (i = 0; i < n_channels; i++) {
516 if (data->channels[i].band != band)
517 continue;
518 if (data->channels[i].hw_value != channel)
519 continue;
520 chan = &data->channels[i];
521 break;
522 }
523
524 if (!chan)
525 return;
526
527 IWL_DEBUG_EEPROM(dev,
528 "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
529 channel,
530 band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4",
531 CHECK_AND_PRINT(IBSS),
532 CHECK_AND_PRINT(ACTIVE),
533 CHECK_AND_PRINT(RADAR),
534 CHECK_AND_PRINT(WIDE),
535 CHECK_AND_PRINT(DFS),
536 eeprom_ch->flags,
537 eeprom_ch->max_power_avg,
538 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
539 !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? ""
540 : "not ");
541
542 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
543 chan->flags &= ~clear_ht40_extension_channel;
544}
545
546#define CHECK_AND_PRINT_I(x) \
547 ((eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_##x) ? # x " " : "")
548
549static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
550 struct iwl_eeprom_data *data,
551 const u8 *eeprom, size_t eeprom_size)
552{
553 int band, ch_idx;
554 const struct iwl_eeprom_channel *eeprom_ch_info;
555 const u8 *eeprom_ch_array;
556 int eeprom_ch_count;
557 int n_channels = 0;
558
559 /*
560 * Loop through the 5 EEPROM bands and add them to the parse list
561 */
562 for (band = 1; band <= 5; band++) {
563 struct ieee80211_channel *channel;
564
565 iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
566 &eeprom_ch_count, &eeprom_ch_info,
567 &eeprom_ch_array);
568
569 /* Loop through each band adding each of the channels */
570 for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
571 const struct iwl_eeprom_channel *eeprom_ch;
572
573 eeprom_ch = &eeprom_ch_info[ch_idx];
574
575 if (!(eeprom_ch->flags & EEPROM_CHANNEL_VALID)) {
576 IWL_DEBUG_EEPROM(dev,
577 "Ch. %d Flags %x [%sGHz] - No traffic\n",
578 eeprom_ch_array[ch_idx],
579 eeprom_ch_info[ch_idx].flags,
580 (band != 1) ? "5.2" : "2.4");
581 continue;
582 }
583
584 channel = &data->channels[n_channels];
585 n_channels++;
586
587 channel->hw_value = eeprom_ch_array[ch_idx];
588 channel->band = (band == 1) ? IEEE80211_BAND_2GHZ
589 : IEEE80211_BAND_5GHZ;
590 channel->center_freq =
591 ieee80211_channel_to_frequency(
592 channel->hw_value, channel->band);
593
594 /* set no-HT40, will enable as appropriate later */
595 channel->flags = IEEE80211_CHAN_NO_HT40;
596
597 if (!(eeprom_ch->flags & EEPROM_CHANNEL_IBSS))
598 channel->flags |= IEEE80211_CHAN_NO_IBSS;
599
600 if (!(eeprom_ch->flags & EEPROM_CHANNEL_ACTIVE))
601 channel->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
602
603 if (eeprom_ch->flags & EEPROM_CHANNEL_RADAR)
604 channel->flags |= IEEE80211_CHAN_RADAR;
605
606 /* Initialize regulatory-based run-time data */
607 channel->max_power =
608 eeprom_ch_info[ch_idx].max_power_avg;
609 IWL_DEBUG_EEPROM(dev,
610 "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
611 channel->hw_value,
612 (band != 1) ? "5.2" : "2.4",
613 CHECK_AND_PRINT_I(VALID),
614 CHECK_AND_PRINT_I(IBSS),
615 CHECK_AND_PRINT_I(ACTIVE),
616 CHECK_AND_PRINT_I(RADAR),
617 CHECK_AND_PRINT_I(WIDE),
618 CHECK_AND_PRINT_I(DFS),
619 eeprom_ch_info[ch_idx].flags,
620 eeprom_ch_info[ch_idx].max_power_avg,
621 ((eeprom_ch_info[ch_idx].flags &
622 EEPROM_CHANNEL_IBSS) &&
623 !(eeprom_ch_info[ch_idx].flags &
624 EEPROM_CHANNEL_RADAR))
625 ? "" : "not ");
626 }
627 }
628
629 if (cfg->eeprom_params->enhanced_txpower) {
630 /*
631 * for newer device (6000 series and up)
632 * EEPROM contain enhanced tx power information
633 * driver need to process addition information
634 * to determine the max channel tx power limits
635 */
636 iwl_eeprom_enhanced_txpower(dev, data, eeprom, eeprom_size,
637 n_channels);
638 } else {
639 /* All others use data from channel map */
640 int i;
641
642 data->max_tx_pwr_half_dbm = -128;
643
644 for (i = 0; i < n_channels; i++)
645 data->max_tx_pwr_half_dbm =
646 max_t(s8, data->max_tx_pwr_half_dbm,
647 data->channels[i].max_power * 2);
648 }
649
650 /* Check if we do have HT40 channels */
651 if (cfg->eeprom_params->regulatory_bands[5] ==
652 EEPROM_REGULATORY_BAND_NO_HT40 &&
653 cfg->eeprom_params->regulatory_bands[6] ==
654 EEPROM_REGULATORY_BAND_NO_HT40)
655 return n_channels;
656
657 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
658 for (band = 6; band <= 7; band++) {
659 enum ieee80211_band ieeeband;
660
661 iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
662 &eeprom_ch_count, &eeprom_ch_info,
663 &eeprom_ch_array);
664
665 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
666 ieeeband = (band == 6) ? IEEE80211_BAND_2GHZ
667 : IEEE80211_BAND_5GHZ;
668
669 /* Loop through each band adding each of the channels */
670 for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
671 /* Set up driver's info for lower half */
672 iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband,
673 eeprom_ch_array[ch_idx],
674 &eeprom_ch_info[ch_idx],
675 IEEE80211_CHAN_NO_HT40PLUS);
676
677 /* Set up driver's info for upper half */
678 iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband,
679 eeprom_ch_array[ch_idx] + 4,
680 &eeprom_ch_info[ch_idx],
681 IEEE80211_CHAN_NO_HT40MINUS);
682 }
683 }
684
685 return n_channels;
686}
687
688static int iwl_init_sband_channels(struct iwl_eeprom_data *data,
689 struct ieee80211_supported_band *sband,
690 int n_channels, enum ieee80211_band band)
691{
692 struct ieee80211_channel *chan = &data->channels[0];
693 int n = 0, idx = 0;
694
695 while (chan->band != band && idx < n_channels)
696 chan = &data->channels[++idx];
697
698 sband->channels = &data->channels[idx];
699
700 while (chan->band == band && idx < n_channels) {
701 chan = &data->channels[++idx];
702 n++;
703 }
704
705 sband->n_channels = n;
706
707 return n;
708}
709
710#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
711#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
712
713static void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
714 struct iwl_eeprom_data *data,
715 struct ieee80211_sta_ht_cap *ht_info,
716 enum ieee80211_band band)
717{
718 int max_bit_rate = 0;
719 u8 rx_chains;
720 u8 tx_chains;
721
722 tx_chains = hweight8(data->valid_tx_ant);
723 if (cfg->rx_with_siso_diversity)
724 rx_chains = 1;
725 else
726 rx_chains = hweight8(data->valid_rx_ant);
727
728 if (!(data->sku & EEPROM_SKU_CAP_11N_ENABLE) || !cfg->ht_params) {
729 ht_info->ht_supported = false;
730 return;
731 }
732
733 ht_info->ht_supported = true;
734 ht_info->cap = 0;
735
736 if (iwlwifi_mod_params.amsdu_size_8K)
737 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
738
739 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
740 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
741
742 ht_info->mcs.rx_mask[0] = 0xFF;
743 if (rx_chains >= 2)
744 ht_info->mcs.rx_mask[1] = 0xFF;
745 if (rx_chains >= 3)
746 ht_info->mcs.rx_mask[2] = 0xFF;
747
748 if (cfg->ht_params->ht_greenfield_support)
749 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
750 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
751
752 max_bit_rate = MAX_BIT_RATE_20_MHZ;
753
754 if (cfg->ht_params->ht40_bands & BIT(band)) {
755 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
756 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
757 ht_info->mcs.rx_mask[4] = 0x01;
758 max_bit_rate = MAX_BIT_RATE_40_MHZ;
759 }
760
761 /* Highest supported Rx data rate */
762 max_bit_rate *= rx_chains;
763 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
764 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
765
766 /* Tx MCS capabilities */
767 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
768 if (tx_chains != rx_chains) {
769 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
770 ht_info->mcs.tx_params |= ((tx_chains - 1) <<
771 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
772 }
773}
774
775static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
776 struct iwl_eeprom_data *data,
777 const u8 *eeprom, size_t eeprom_size)
778{
779 int n_channels = iwl_init_channel_map(dev, cfg, data,
780 eeprom, eeprom_size);
781 int n_used = 0;
782 struct ieee80211_supported_band *sband;
783
784 sband = &data->bands[IEEE80211_BAND_2GHZ];
785 sband->band = IEEE80211_BAND_2GHZ;
786 sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
787 sband->n_bitrates = N_RATES_24;
788 n_used += iwl_init_sband_channels(data, sband, n_channels,
789 IEEE80211_BAND_2GHZ);
790 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ);
791
792 sband = &data->bands[IEEE80211_BAND_5GHZ];
793 sband->band = IEEE80211_BAND_5GHZ;
794 sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
795 sband->n_bitrates = N_RATES_52;
796 n_used += iwl_init_sband_channels(data, sband, n_channels,
797 IEEE80211_BAND_5GHZ);
798 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ);
799
800 if (n_channels != n_used)
801 IWL_ERR_DEV(dev, "EEPROM: used only %d of %d channels\n",
802 n_used, n_channels);
803}
804
805/* EEPROM data functions */
806
807struct iwl_eeprom_data *
808iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
809 const u8 *eeprom, size_t eeprom_size)
810{
811 struct iwl_eeprom_data *data;
812 const void *tmp;
813
814 if (WARN_ON(!cfg || !cfg->eeprom_params))
815 return NULL;
816
817 data = kzalloc(sizeof(*data) +
818 sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
819 GFP_KERNEL);
820 if (!data)
821 return NULL;
822
823 /* get MAC address(es) */
824 tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_MAC_ADDRESS);
825 if (!tmp)
826 goto err_free;
827 memcpy(data->hw_addr, tmp, ETH_ALEN);
828 data->n_hw_addrs = iwl_eeprom_query16(eeprom, eeprom_size,
829 EEPROM_NUM_MAC_ADDRESS);
830
831 if (iwl_eeprom_read_calib(eeprom, eeprom_size, data))
832 goto err_free;
833
834 tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_XTAL);
835 if (!tmp)
836 goto err_free;
837 memcpy(data->xtal_calib, tmp, sizeof(data->xtal_calib));
838
839 tmp = iwl_eeprom_query_addr(eeprom, eeprom_size,
840 EEPROM_RAW_TEMPERATURE);
841 if (!tmp)
842 goto err_free;
843 data->raw_temperature = *(__le16 *)tmp;
844
845 tmp = iwl_eeprom_query_addr(eeprom, eeprom_size,
846 EEPROM_KELVIN_TEMPERATURE);
847 if (!tmp)
848 goto err_free;
849 data->kelvin_temperature = *(__le16 *)tmp;
850 data->kelvin_voltage = *((__le16 *)tmp + 1);
851
852 data->radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size,
853 EEPROM_RADIO_CONFIG);
854 data->sku = iwl_eeprom_query16(eeprom, eeprom_size,
855 EEPROM_SKU_CAP);
856 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
857 data->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
858
859 data->eeprom_version = iwl_eeprom_query16(eeprom, eeprom_size,
860 EEPROM_VERSION);
861
862 data->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(data->radio_cfg);
863 data->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(data->radio_cfg);
864
865 /* check overrides (some devices have wrong EEPROM) */
866 if (cfg->valid_tx_ant)
867 data->valid_tx_ant = cfg->valid_tx_ant;
868 if (cfg->valid_rx_ant)
869 data->valid_rx_ant = cfg->valid_rx_ant;
870
871 if (!data->valid_tx_ant || !data->valid_rx_ant) {
872 IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n",
873 data->valid_tx_ant, data->valid_rx_ant);
874 goto err_free;
875 }
876
877 iwl_init_sbands(dev, cfg, data, eeprom, eeprom_size);
878
879 return data;
880 err_free:
881 kfree(data);
882 return NULL;
883}
884EXPORT_SYMBOL_GPL(iwl_parse_eeprom_data);
885
886/* helper functions */
887int iwl_eeprom_check_version(struct iwl_eeprom_data *data,
888 struct iwl_trans *trans)
889{
890 if (data->eeprom_version >= trans->cfg->eeprom_ver ||
891 data->calib_version >= trans->cfg->eeprom_calib_ver) {
892 IWL_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n",
893 data->eeprom_version, data->calib_version);
894 return 0;
895 }
896
897 IWL_ERR(trans,
898 "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
899 data->eeprom_version, trans->cfg->eeprom_ver,
900 data->calib_version, trans->cfg->eeprom_calib_ver);
901 return -EINVAL;
902}
903EXPORT_SYMBOL_GPL(iwl_eeprom_check_version);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
new file mode 100644
index 00000000000..9c07c670a1c
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -0,0 +1,138 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62#ifndef __iwl_eeprom_parse_h__
63#define __iwl_eeprom_parse_h__
64
65#include <linux/types.h>
66#include <linux/if_ether.h>
67#include "iwl-trans.h"
68
69/* SKU Capabilities (actual values from EEPROM definition) */
70#define EEPROM_SKU_CAP_BAND_24GHZ (1 << 4)
71#define EEPROM_SKU_CAP_BAND_52GHZ (1 << 5)
72#define EEPROM_SKU_CAP_11N_ENABLE (1 << 6)
73#define EEPROM_SKU_CAP_AMT_ENABLE (1 << 7)
74#define EEPROM_SKU_CAP_IPAN_ENABLE (1 << 8)
75
76/* radio config bits (actual values from EEPROM definition) */
77#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
78#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
79#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
80#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
81#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
82#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
83
84struct iwl_eeprom_data {
85 int n_hw_addrs;
86 u8 hw_addr[ETH_ALEN];
87
88 u16 radio_config;
89
90 u8 calib_version;
91 __le16 calib_voltage;
92
93 __le16 raw_temperature;
94 __le16 kelvin_temperature;
95 __le16 kelvin_voltage;
96 __le16 xtal_calib[2];
97
98 u16 sku;
99 u16 radio_cfg;
100 u16 eeprom_version;
101 s8 max_tx_pwr_half_dbm;
102
103 u8 valid_tx_ant, valid_rx_ant;
104
105 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
106 struct ieee80211_channel channels[];
107};
108
109/**
110 * iwl_parse_eeprom_data - parse EEPROM data and return values
111 *
112 * @dev: device pointer we're parsing for, for debug only
113 * @cfg: device configuration for parsing and overrides
114 * @eeprom: the EEPROM data
115 * @eeprom_size: length of the EEPROM data
116 *
117 * This function parses all EEPROM values we need and then
118 * returns a (newly allocated) struct containing all the
119 * relevant values for driver use. The struct must be freed
120 * later with iwl_free_eeprom_data().
121 */
122struct iwl_eeprom_data *
123iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
124 const u8 *eeprom, size_t eeprom_size);
125
126/**
127 * iwl_free_eeprom_data - free EEPROM data
128 * @data: the data to free
129 */
130static inline void iwl_free_eeprom_data(struct iwl_eeprom_data *data)
131{
132 kfree(data);
133}
134
135int iwl_eeprom_check_version(struct iwl_eeprom_data *data,
136 struct iwl_trans *trans);
137
138#endif /* __iwl_eeprom_parse_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
new file mode 100644
index 00000000000..27c7da3c6ed
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
@@ -0,0 +1,463 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62#include <linux/types.h>
63#include <linux/slab.h>
64#include <linux/export.h>
65
66#include "iwl-debug.h"
67#include "iwl-eeprom-read.h"
68#include "iwl-io.h"
69#include "iwl-prph.h"
70#include "iwl-csr.h"
71
72/*
73 * EEPROM access time values:
74 *
75 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
76 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
77 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
78 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
79 */
80#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
81
82#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
83#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
84
85
86/*
87 * The device's EEPROM semaphore prevents conflicts between driver and uCode
88 * when accessing the EEPROM; each access is a series of pulses to/from the
89 * EEPROM chip, not a single event, so even reads could conflict if they
90 * weren't arbitrated by the semaphore.
91 */
92
93#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
94#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
95
96static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
97{
98 u16 count;
99 int ret;
100
101 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
102 /* Request semaphore */
103 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
104 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
105
106 /* See if we got it */
107 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
108 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
109 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
110 EEPROM_SEM_TIMEOUT);
111 if (ret >= 0) {
112 IWL_DEBUG_EEPROM(trans->dev,
113 "Acquired semaphore after %d tries.\n",
114 count+1);
115 return ret;
116 }
117 }
118
119 return ret;
120}
121
122static void iwl_eeprom_release_semaphore(struct iwl_trans *trans)
123{
124 iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG,
125 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
126}
127
128static int iwl_eeprom_verify_signature(struct iwl_trans *trans, bool nvm_is_otp)
129{
130 u32 gp = iwl_read32(trans, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
131
132 IWL_DEBUG_EEPROM(trans->dev, "EEPROM signature=0x%08x\n", gp);
133
134 switch (gp) {
135 case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
136 if (!nvm_is_otp) {
137 IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n",
138 gp);
139 return -ENOENT;
140 }
141 return 0;
142 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
143 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
144 if (nvm_is_otp) {
145 IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp);
146 return -ENOENT;
147 }
148 return 0;
149 case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
150 default:
151 IWL_ERR(trans,
152 "bad EEPROM/OTP signature, type=%s, EEPROM_GP=0x%08x\n",
153 nvm_is_otp ? "OTP" : "EEPROM", gp);
154 return -ENOENT;
155 }
156}
157
158/******************************************************************************
159 *
160 * OTP related functions
161 *
162******************************************************************************/
163
164static void iwl_set_otp_access_absolute(struct iwl_trans *trans)
165{
166 iwl_read32(trans, CSR_OTP_GP_REG);
167
168 iwl_clear_bit(trans, CSR_OTP_GP_REG,
169 CSR_OTP_GP_REG_OTP_ACCESS_MODE);
170}
171
172static int iwl_nvm_is_otp(struct iwl_trans *trans)
173{
174 u32 otpgp;
175
176 /* OTP only valid for CP/PP and after */
177 switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) {
178 case CSR_HW_REV_TYPE_NONE:
179 IWL_ERR(trans, "Unknown hardware type\n");
180 return -EIO;
181 case CSR_HW_REV_TYPE_5300:
182 case CSR_HW_REV_TYPE_5350:
183 case CSR_HW_REV_TYPE_5100:
184 case CSR_HW_REV_TYPE_5150:
185 return 0;
186 default:
187 otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
188 if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
189 return 1;
190 return 0;
191 }
192}
193
194static int iwl_init_otp_access(struct iwl_trans *trans)
195{
196 int ret;
197
198 /* Enable 40MHz radio clock */
199 iwl_write32(trans, CSR_GP_CNTRL,
200 iwl_read32(trans, CSR_GP_CNTRL) |
201 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
202
203 /* wait for clock to be ready */
204 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
205 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
206 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
207 25000);
208 if (ret < 0) {
209 IWL_ERR(trans, "Time out access OTP\n");
210 } else {
211 iwl_set_bits_prph(trans, APMG_PS_CTRL_REG,
212 APMG_PS_CTRL_VAL_RESET_REQ);
213 udelay(5);
214 iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG,
215 APMG_PS_CTRL_VAL_RESET_REQ);
216
217 /*
218 * CSR auto clock gate disable bit -
219 * this is only applicable for HW with OTP shadow RAM
220 */
221 if (trans->cfg->base_params->shadow_ram_support)
222 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
223 CSR_RESET_LINK_PWR_MGMT_DISABLED);
224 }
225 return ret;
226}
227
228static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr,
229 __le16 *eeprom_data)
230{
231 int ret = 0;
232 u32 r;
233 u32 otpgp;
234
235 iwl_write32(trans, CSR_EEPROM_REG,
236 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
237 ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
238 CSR_EEPROM_REG_READ_VALID_MSK,
239 CSR_EEPROM_REG_READ_VALID_MSK,
240 IWL_EEPROM_ACCESS_TIMEOUT);
241 if (ret < 0) {
242 IWL_ERR(trans, "Time out reading OTP[%d]\n", addr);
243 return ret;
244 }
245 r = iwl_read32(trans, CSR_EEPROM_REG);
246 /* check for ECC errors: */
247 otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
248 if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
249 /* stop in this case */
250 /* set the uncorrectable OTP ECC bit for acknowledgement */
251 iwl_set_bit(trans, CSR_OTP_GP_REG,
252 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
253 IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n");
254 return -EINVAL;
255 }
256 if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
257 /* continue in this case */
258 /* set the correctable OTP ECC bit for acknowledgement */
259 iwl_set_bit(trans, CSR_OTP_GP_REG,
260 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
261 IWL_ERR(trans, "Correctable OTP ECC error, continue read\n");
262 }
263 *eeprom_data = cpu_to_le16(r >> 16);
264 return 0;
265}
266
267/*
268 * iwl_is_otp_empty: check for empty OTP
269 */
270static bool iwl_is_otp_empty(struct iwl_trans *trans)
271{
272 u16 next_link_addr = 0;
273 __le16 link_value;
274 bool is_empty = false;
275
276 /* locate the beginning of OTP link list */
277 if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) {
278 if (!link_value) {
279 IWL_ERR(trans, "OTP is empty\n");
280 is_empty = true;
281 }
282 } else {
283 IWL_ERR(trans, "Unable to read first block of OTP list.\n");
284 is_empty = true;
285 }
286
287 return is_empty;
288}
289
290
291/*
292 * iwl_find_otp_image: find EEPROM image in OTP
293 * finding the OTP block that contains the EEPROM image.
294 * the last valid block on the link list (the block _before_ the last block)
295 * is the block we should read and used to configure the device.
296 * If all the available OTP blocks are full, the last block will be the block
297 * we should read and used to configure the device.
298 * only perform this operation if shadow RAM is disabled
299 */
300static int iwl_find_otp_image(struct iwl_trans *trans,
301 u16 *validblockaddr)
302{
303 u16 next_link_addr = 0, valid_addr;
304 __le16 link_value = 0;
305 int usedblocks = 0;
306
307 /* set addressing mode to absolute to traverse the link list */
308 iwl_set_otp_access_absolute(trans);
309
310 /* checking for empty OTP or error */
311 if (iwl_is_otp_empty(trans))
312 return -EINVAL;
313
314 /*
315 * start traverse link list
316 * until reach the max number of OTP blocks
317 * different devices have different number of OTP blocks
318 */
319 do {
320 /* save current valid block address
321 * check for more block on the link list
322 */
323 valid_addr = next_link_addr;
324 next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
325 IWL_DEBUG_EEPROM(trans->dev, "OTP blocks %d addr 0x%x\n",
326 usedblocks, next_link_addr);
327 if (iwl_read_otp_word(trans, next_link_addr, &link_value))
328 return -EINVAL;
329 if (!link_value) {
330 /*
331 * reach the end of link list, return success and
332 * set address point to the starting address
333 * of the image
334 */
335 *validblockaddr = valid_addr;
336 /* skip first 2 bytes (link list pointer) */
337 *validblockaddr += 2;
338 return 0;
339 }
340 /* more in the link list, continue */
341 usedblocks++;
342 } while (usedblocks <= trans->cfg->base_params->max_ll_items);
343
344 /* OTP has no valid blocks */
345 IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n");
346 return -EINVAL;
347}
348
349/**
350 * iwl_read_eeprom - read EEPROM contents
351 *
352 * Load the EEPROM contents from adapter and return it
353 * and its size.
354 *
355 * NOTE: This routine uses the non-debug IO access functions.
356 */
357int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size)
358{
359 __le16 *e;
360 u32 gp = iwl_read32(trans, CSR_EEPROM_GP);
361 int sz;
362 int ret;
363 u16 addr;
364 u16 validblockaddr = 0;
365 u16 cache_addr = 0;
366 int nvm_is_otp;
367
368 if (!eeprom || !eeprom_size)
369 return -EINVAL;
370
371 nvm_is_otp = iwl_nvm_is_otp(trans);
372 if (nvm_is_otp < 0)
373 return nvm_is_otp;
374
375 sz = trans->cfg->base_params->eeprom_size;
376 IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz);
377
378 e = kmalloc(sz, GFP_KERNEL);
379 if (!e)
380 return -ENOMEM;
381
382 ret = iwl_eeprom_verify_signature(trans, nvm_is_otp);
383 if (ret < 0) {
384 IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
385 goto err_free;
386 }
387
388 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
389 ret = iwl_eeprom_acquire_semaphore(trans);
390 if (ret < 0) {
391 IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n");
392 goto err_free;
393 }
394
395 if (nvm_is_otp) {
396 ret = iwl_init_otp_access(trans);
397 if (ret) {
398 IWL_ERR(trans, "Failed to initialize OTP access.\n");
399 goto err_unlock;
400 }
401
402 iwl_write32(trans, CSR_EEPROM_GP,
403 iwl_read32(trans, CSR_EEPROM_GP) &
404 ~CSR_EEPROM_GP_IF_OWNER_MSK);
405
406 iwl_set_bit(trans, CSR_OTP_GP_REG,
407 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
408 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
409 /* traversing the linked list if no shadow ram supported */
410 if (!trans->cfg->base_params->shadow_ram_support) {
411 ret = iwl_find_otp_image(trans, &validblockaddr);
412 if (ret)
413 goto err_unlock;
414 }
415 for (addr = validblockaddr; addr < validblockaddr + sz;
416 addr += sizeof(u16)) {
417 __le16 eeprom_data;
418
419 ret = iwl_read_otp_word(trans, addr, &eeprom_data);
420 if (ret)
421 goto err_unlock;
422 e[cache_addr / 2] = eeprom_data;
423 cache_addr += sizeof(u16);
424 }
425 } else {
426 /* eeprom is an array of 16bit values */
427 for (addr = 0; addr < sz; addr += sizeof(u16)) {
428 u32 r;
429
430 iwl_write32(trans, CSR_EEPROM_REG,
431 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
432
433 ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
434 CSR_EEPROM_REG_READ_VALID_MSK,
435 CSR_EEPROM_REG_READ_VALID_MSK,
436 IWL_EEPROM_ACCESS_TIMEOUT);
437 if (ret < 0) {
438 IWL_ERR(trans,
439 "Time out reading EEPROM[%d]\n", addr);
440 goto err_unlock;
441 }
442 r = iwl_read32(trans, CSR_EEPROM_REG);
443 e[addr / 2] = cpu_to_le16(r >> 16);
444 }
445 }
446
447 IWL_DEBUG_EEPROM(trans->dev, "NVM Type: %s\n",
448 nvm_is_otp ? "OTP" : "EEPROM");
449
450 iwl_eeprom_release_semaphore(trans);
451
452 *eeprom_size = sz;
453 *eeprom = (u8 *)e;
454 return 0;
455
456 err_unlock:
457 iwl_eeprom_release_semaphore(trans);
458 err_free:
459 kfree(e);
460
461 return ret;
462}
463EXPORT_SYMBOL_GPL(iwl_read_eeprom);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
new file mode 100644
index 00000000000..1337c9d36fe
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
@@ -0,0 +1,70 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_eeprom_h__
64#define __iwl_eeprom_h__
65
66#include "iwl-trans.h"
67
68int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size);
69
70#endif /* __iwl_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
deleted file mode 100644
index b8e2b223ac3..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ /dev/null
@@ -1,1148 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-dev.h"
72#include "iwl-debug.h"
73#include "iwl-agn.h"
74#include "iwl-eeprom.h"
75#include "iwl-io.h"
76#include "iwl-prph.h"
77
78/************************** EEPROM BANDS ****************************
79 *
80 * The iwl_eeprom_band definitions below provide the mapping from the
81 * EEPROM contents to the specific channel number supported for each
82 * band.
83 *
84 * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
85 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
86 * The specific geography and calibration information for that channel
87 * is contained in the eeprom map itself.
88 *
89 * During init, we copy the eeprom information and channel map
90 * information into priv->channel_info_24/52 and priv->channel_map_24/52
91 *
92 * channel_map_24/52 provides the index in the channel_info array for a
93 * given channel. We have to have two separate maps as there is channel
94 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
95 * band_2
96 *
97 * A value of 0xff stored in the channel_map indicates that the channel
98 * is not supported by the hardware at all.
99 *
100 * A value of 0xfe in the channel_map indicates that the channel is not
101 * valid for Tx with the current hardware. This means that
102 * while the system can tune and receive on a given channel, it may not
103 * be able to associate or transmit any frames on that
104 * channel. There is no corresponding channel information for that
105 * entry.
106 *
107 *********************************************************************/
108
109/* 2.4 GHz */
110const u8 iwl_eeprom_band_1[14] = {
111 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
112};
113
114/* 5.2 GHz bands */
115static const u8 iwl_eeprom_band_2[] = { /* 4915-5080MHz */
116 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
117};
118
119static const u8 iwl_eeprom_band_3[] = { /* 5170-5320MHz */
120 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
121};
122
123static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */
124 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
125};
126
127static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */
128 145, 149, 153, 157, 161, 165
129};
130
131static const u8 iwl_eeprom_band_6[] = { /* 2.4 ht40 channel */
132 1, 2, 3, 4, 5, 6, 7
133};
134
135static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
136 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
137};
138
139/******************************************************************************
140 *
141 * generic NVM functions
142 *
143******************************************************************************/
144
145/*
146 * The device's EEPROM semaphore prevents conflicts between driver and uCode
147 * when accessing the EEPROM; each access is a series of pulses to/from the
148 * EEPROM chip, not a single event, so even reads could conflict if they
149 * weren't arbitrated by the semaphore.
150 */
151
152#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
153#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
154
155static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
156{
157 u16 count;
158 int ret;
159
160 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
161 /* Request semaphore */
162 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
163 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
164
165 /* See if we got it */
166 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
167 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
168 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
169 EEPROM_SEM_TIMEOUT);
170 if (ret >= 0) {
171 IWL_DEBUG_EEPROM(trans,
172 "Acquired semaphore after %d tries.\n",
173 count+1);
174 return ret;
175 }
176 }
177
178 return ret;
179}
180
181static void iwl_eeprom_release_semaphore(struct iwl_trans *trans)
182{
183 iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG,
184 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
185
186}
187
188static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
189{
190 u32 gp = iwl_read32(priv->trans, CSR_EEPROM_GP) &
191 CSR_EEPROM_GP_VALID_MSK;
192 int ret = 0;
193
194 IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
195 switch (gp) {
196 case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
197 if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
198 IWL_ERR(priv, "EEPROM with bad signature: 0x%08x\n",
199 gp);
200 ret = -ENOENT;
201 }
202 break;
203 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
204 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
205 if (priv->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
206 IWL_ERR(priv, "OTP with bad signature: 0x%08x\n", gp);
207 ret = -ENOENT;
208 }
209 break;
210 case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
211 default:
212 IWL_ERR(priv, "bad EEPROM/OTP signature, type=%s, "
213 "EEPROM_GP=0x%08x\n",
214 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
215 ? "OTP" : "EEPROM", gp);
216 ret = -ENOENT;
217 break;
218 }
219 return ret;
220}
221
222u16 iwl_eeprom_query16(struct iwl_priv *priv, size_t offset)
223{
224 if (!priv->eeprom)
225 return 0;
226 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
227}
228
229int iwl_eeprom_check_version(struct iwl_priv *priv)
230{
231 u16 eeprom_ver;
232 u16 calib_ver;
233
234 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
235 calib_ver = iwl_eeprom_calib_version(priv);
236
237 if (eeprom_ver < priv->cfg->eeprom_ver ||
238 calib_ver < priv->cfg->eeprom_calib_ver)
239 goto err;
240
241 IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
242 eeprom_ver, calib_ver);
243
244 return 0;
245err:
246 IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
247 "CALIB=0x%x < 0x%x\n",
248 eeprom_ver, priv->cfg->eeprom_ver,
249 calib_ver, priv->cfg->eeprom_calib_ver);
250 return -EINVAL;
251
252}
253
254int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
255{
256 u16 radio_cfg;
257
258 priv->hw_params.sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
259 if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE &&
260 !priv->cfg->ht_params) {
261 IWL_ERR(priv, "Invalid 11n configuration\n");
262 return -EINVAL;
263 }
264
265 if (!priv->hw_params.sku) {
266 IWL_ERR(priv, "Invalid device sku\n");
267 return -EINVAL;
268 }
269
270 IWL_INFO(priv, "Device SKU: 0x%X\n", priv->hw_params.sku);
271
272 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
273
274 priv->hw_params.valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
275 priv->hw_params.valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
276
277 /* check overrides (some devices have wrong EEPROM) */
278 if (priv->cfg->valid_tx_ant)
279 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
280 if (priv->cfg->valid_rx_ant)
281 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
282
283 if (!priv->hw_params.valid_tx_ant || !priv->hw_params.valid_rx_ant) {
284 IWL_ERR(priv, "Invalid chain (0x%X, 0x%X)\n",
285 priv->hw_params.valid_tx_ant,
286 priv->hw_params.valid_rx_ant);
287 return -EINVAL;
288 }
289
290 IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
291 priv->hw_params.valid_tx_ant, priv->hw_params.valid_rx_ant);
292
293 return 0;
294}
295
296u16 iwl_eeprom_calib_version(struct iwl_priv *priv)
297{
298 struct iwl_eeprom_calib_hdr *hdr;
299
300 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
301 EEPROM_CALIB_ALL);
302 return hdr->version;
303}
304
305static u32 eeprom_indirect_address(struct iwl_priv *priv, u32 address)
306{
307 u16 offset = 0;
308
309 if ((address & INDIRECT_ADDRESS) == 0)
310 return address;
311
312 switch (address & INDIRECT_TYPE_MSK) {
313 case INDIRECT_HOST:
314 offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
315 break;
316 case INDIRECT_GENERAL:
317 offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
318 break;
319 case INDIRECT_REGULATORY:
320 offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
321 break;
322 case INDIRECT_TXP_LIMIT:
323 offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
324 break;
325 case INDIRECT_TXP_LIMIT_SIZE:
326 offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
327 break;
328 case INDIRECT_CALIBRATION:
329 offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
330 break;
331 case INDIRECT_PROCESS_ADJST:
332 offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
333 break;
334 case INDIRECT_OTHERS:
335 offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
336 break;
337 default:
338 IWL_ERR(priv, "illegal indirect type: 0x%X\n",
339 address & INDIRECT_TYPE_MSK);
340 break;
341 }
342
343 /* translate the offset from words to byte */
344 return (address & ADDRESS_MSK) + (offset << 1);
345}
346
347const u8 *iwl_eeprom_query_addr(struct iwl_priv *priv, size_t offset)
348{
349 u32 address = eeprom_indirect_address(priv, offset);
350 BUG_ON(address >= priv->cfg->base_params->eeprom_size);
351 return &priv->eeprom[address];
352}
353
354void iwl_eeprom_get_mac(struct iwl_priv *priv, u8 *mac)
355{
356 const u8 *addr = iwl_eeprom_query_addr(priv,
357 EEPROM_MAC_ADDRESS);
358 memcpy(mac, addr, ETH_ALEN);
359}
360
361/******************************************************************************
362 *
363 * OTP related functions
364 *
365******************************************************************************/
366
367static void iwl_set_otp_access(struct iwl_trans *trans,
368 enum iwl_access_mode mode)
369{
370 iwl_read32(trans, CSR_OTP_GP_REG);
371
372 if (mode == IWL_OTP_ACCESS_ABSOLUTE)
373 iwl_clear_bit(trans, CSR_OTP_GP_REG,
374 CSR_OTP_GP_REG_OTP_ACCESS_MODE);
375 else
376 iwl_set_bit(trans, CSR_OTP_GP_REG,
377 CSR_OTP_GP_REG_OTP_ACCESS_MODE);
378}
379
380static int iwl_get_nvm_type(struct iwl_trans *trans, u32 hw_rev)
381{
382 u32 otpgp;
383 int nvm_type;
384
385 /* OTP only valid for CP/PP and after */
386 switch (hw_rev & CSR_HW_REV_TYPE_MSK) {
387 case CSR_HW_REV_TYPE_NONE:
388 IWL_ERR(trans, "Unknown hardware type\n");
389 return -ENOENT;
390 case CSR_HW_REV_TYPE_5300:
391 case CSR_HW_REV_TYPE_5350:
392 case CSR_HW_REV_TYPE_5100:
393 case CSR_HW_REV_TYPE_5150:
394 nvm_type = NVM_DEVICE_TYPE_EEPROM;
395 break;
396 default:
397 otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
398 if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
399 nvm_type = NVM_DEVICE_TYPE_OTP;
400 else
401 nvm_type = NVM_DEVICE_TYPE_EEPROM;
402 break;
403 }
404 return nvm_type;
405}
406
407static int iwl_init_otp_access(struct iwl_trans *trans)
408{
409 int ret;
410
411 /* Enable 40MHz radio clock */
412 iwl_write32(trans, CSR_GP_CNTRL,
413 iwl_read32(trans, CSR_GP_CNTRL) |
414 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
415
416 /* wait for clock to be ready */
417 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
418 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
419 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
420 25000);
421 if (ret < 0)
422 IWL_ERR(trans, "Time out access OTP\n");
423 else {
424 iwl_set_bits_prph(trans, APMG_PS_CTRL_REG,
425 APMG_PS_CTRL_VAL_RESET_REQ);
426 udelay(5);
427 iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG,
428 APMG_PS_CTRL_VAL_RESET_REQ);
429
430 /*
431 * CSR auto clock gate disable bit -
432 * this is only applicable for HW with OTP shadow RAM
433 */
434 if (trans->cfg->base_params->shadow_ram_support)
435 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
436 CSR_RESET_LINK_PWR_MGMT_DISABLED);
437 }
438 return ret;
439}
440
441static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr,
442 __le16 *eeprom_data)
443{
444 int ret = 0;
445 u32 r;
446 u32 otpgp;
447
448 iwl_write32(trans, CSR_EEPROM_REG,
449 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
450 ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
451 CSR_EEPROM_REG_READ_VALID_MSK,
452 CSR_EEPROM_REG_READ_VALID_MSK,
453 IWL_EEPROM_ACCESS_TIMEOUT);
454 if (ret < 0) {
455 IWL_ERR(trans, "Time out reading OTP[%d]\n", addr);
456 return ret;
457 }
458 r = iwl_read32(trans, CSR_EEPROM_REG);
459 /* check for ECC errors: */
460 otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
461 if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
462 /* stop in this case */
463 /* set the uncorrectable OTP ECC bit for acknowledgement */
464 iwl_set_bit(trans, CSR_OTP_GP_REG,
465 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
466 IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n");
467 return -EINVAL;
468 }
469 if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
470 /* continue in this case */
471 /* set the correctable OTP ECC bit for acknowledgement */
472 iwl_set_bit(trans, CSR_OTP_GP_REG,
473 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
474 IWL_ERR(trans, "Correctable OTP ECC error, continue read\n");
475 }
476 *eeprom_data = cpu_to_le16(r >> 16);
477 return 0;
478}
479
480/*
481 * iwl_is_otp_empty: check for empty OTP
482 */
483static bool iwl_is_otp_empty(struct iwl_trans *trans)
484{
485 u16 next_link_addr = 0;
486 __le16 link_value;
487 bool is_empty = false;
488
489 /* locate the beginning of OTP link list */
490 if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) {
491 if (!link_value) {
492 IWL_ERR(trans, "OTP is empty\n");
493 is_empty = true;
494 }
495 } else {
496 IWL_ERR(trans, "Unable to read first block of OTP list.\n");
497 is_empty = true;
498 }
499
500 return is_empty;
501}
502
503
504/*
505 * iwl_find_otp_image: find EEPROM image in OTP
506 * finding the OTP block that contains the EEPROM image.
507 * the last valid block on the link list (the block _before_ the last block)
508 * is the block we should read and used to configure the device.
509 * If all the available OTP blocks are full, the last block will be the block
510 * we should read and used to configure the device.
511 * only perform this operation if shadow RAM is disabled
512 */
513static int iwl_find_otp_image(struct iwl_trans *trans,
514 u16 *validblockaddr)
515{
516 u16 next_link_addr = 0, valid_addr;
517 __le16 link_value = 0;
518 int usedblocks = 0;
519
520 /* set addressing mode to absolute to traverse the link list */
521 iwl_set_otp_access(trans, IWL_OTP_ACCESS_ABSOLUTE);
522
523 /* checking for empty OTP or error */
524 if (iwl_is_otp_empty(trans))
525 return -EINVAL;
526
527 /*
528 * start traverse link list
529 * until reach the max number of OTP blocks
530 * different devices have different number of OTP blocks
531 */
532 do {
533 /* save current valid block address
534 * check for more block on the link list
535 */
536 valid_addr = next_link_addr;
537 next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
538 IWL_DEBUG_EEPROM(trans, "OTP blocks %d addr 0x%x\n",
539 usedblocks, next_link_addr);
540 if (iwl_read_otp_word(trans, next_link_addr, &link_value))
541 return -EINVAL;
542 if (!link_value) {
543 /*
544 * reach the end of link list, return success and
545 * set address point to the starting address
546 * of the image
547 */
548 *validblockaddr = valid_addr;
549 /* skip first 2 bytes (link list pointer) */
550 *validblockaddr += 2;
551 return 0;
552 }
553 /* more in the link list, continue */
554 usedblocks++;
555 } while (usedblocks <= trans->cfg->base_params->max_ll_items);
556
557 /* OTP has no valid blocks */
558 IWL_DEBUG_EEPROM(trans, "OTP has no valid blocks\n");
559 return -EINVAL;
560}
561
562/******************************************************************************
563 *
564 * Tx Power related functions
565 *
566******************************************************************************/
567/**
568 * iwl_get_max_txpower_avg - get the highest tx power from all chains.
569 * find the highest tx power from all chains for the channel
570 */
571static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
572 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
573 int element, s8 *max_txpower_in_half_dbm)
574{
575 s8 max_txpower_avg = 0; /* (dBm) */
576
577 /* Take the highest tx power from any valid chains */
578 if ((priv->hw_params.valid_tx_ant & ANT_A) &&
579 (enhanced_txpower[element].chain_a_max > max_txpower_avg))
580 max_txpower_avg = enhanced_txpower[element].chain_a_max;
581 if ((priv->hw_params.valid_tx_ant & ANT_B) &&
582 (enhanced_txpower[element].chain_b_max > max_txpower_avg))
583 max_txpower_avg = enhanced_txpower[element].chain_b_max;
584 if ((priv->hw_params.valid_tx_ant & ANT_C) &&
585 (enhanced_txpower[element].chain_c_max > max_txpower_avg))
586 max_txpower_avg = enhanced_txpower[element].chain_c_max;
587 if (((priv->hw_params.valid_tx_ant == ANT_AB) |
588 (priv->hw_params.valid_tx_ant == ANT_BC) |
589 (priv->hw_params.valid_tx_ant == ANT_AC)) &&
590 (enhanced_txpower[element].mimo2_max > max_txpower_avg))
591 max_txpower_avg = enhanced_txpower[element].mimo2_max;
592 if ((priv->hw_params.valid_tx_ant == ANT_ABC) &&
593 (enhanced_txpower[element].mimo3_max > max_txpower_avg))
594 max_txpower_avg = enhanced_txpower[element].mimo3_max;
595
596 /*
597 * max. tx power in EEPROM is in 1/2 dBm format
598 * convert from 1/2 dBm to dBm (round-up convert)
599 * but we also do not want to loss 1/2 dBm resolution which
600 * will impact performance
601 */
602 *max_txpower_in_half_dbm = max_txpower_avg;
603 return (max_txpower_avg & 0x01) + (max_txpower_avg >> 1);
604}
605
606static void
607iwl_eeprom_enh_txp_read_element(struct iwl_priv *priv,
608 struct iwl_eeprom_enhanced_txpwr *txp,
609 s8 max_txpower_avg)
610{
611 int ch_idx;
612 bool is_ht40 = txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ;
613 enum ieee80211_band band;
614
615 band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
616 IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
617
618 for (ch_idx = 0; ch_idx < priv->channel_count; ch_idx++) {
619 struct iwl_channel_info *ch_info = &priv->channel_info[ch_idx];
620
621 /* update matching channel or from common data only */
622 if (txp->channel != 0 && ch_info->channel != txp->channel)
623 continue;
624
625 /* update matching band only */
626 if (band != ch_info->band)
627 continue;
628
629 if (ch_info->max_power_avg < max_txpower_avg && !is_ht40) {
630 ch_info->max_power_avg = max_txpower_avg;
631 ch_info->curr_txpow = max_txpower_avg;
632 ch_info->scan_power = max_txpower_avg;
633 }
634
635 if (is_ht40 && ch_info->ht40_max_power_avg < max_txpower_avg)
636 ch_info->ht40_max_power_avg = max_txpower_avg;
637 }
638}
639
640#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
641#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr)
642#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE)
643
644#define TXP_CHECK_AND_PRINT(x) ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) \
645 ? # x " " : "")
646
647static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
648{
649 struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
650 int idx, entries;
651 __le16 *txp_len;
652 s8 max_txp_avg, max_txp_avg_halfdbm;
653
654 BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
655
656 /* the length is in 16-bit words, but we want entries */
657 txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
658 entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
659
660 txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
661
662 for (idx = 0; idx < entries; idx++) {
663 txp = &txp_array[idx];
664 /* skip invalid entries */
665 if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID))
666 continue;
667
668 IWL_DEBUG_EEPROM(priv, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n",
669 (txp->channel && (txp->flags &
670 IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ?
671 "Common " : (txp->channel) ?
672 "Channel" : "Common",
673 (txp->channel),
674 TXP_CHECK_AND_PRINT(VALID),
675 TXP_CHECK_AND_PRINT(BAND_52G),
676 TXP_CHECK_AND_PRINT(OFDM),
677 TXP_CHECK_AND_PRINT(40MHZ),
678 TXP_CHECK_AND_PRINT(HT_AP),
679 TXP_CHECK_AND_PRINT(RES1),
680 TXP_CHECK_AND_PRINT(RES2),
681 TXP_CHECK_AND_PRINT(COMMON_TYPE),
682 txp->flags);
683 IWL_DEBUG_EEPROM(priv, "\t\t chain_A: 0x%02x "
684 "chain_B: 0X%02x chain_C: 0X%02x\n",
685 txp->chain_a_max, txp->chain_b_max,
686 txp->chain_c_max);
687 IWL_DEBUG_EEPROM(priv, "\t\t MIMO2: 0x%02x "
688 "MIMO3: 0x%02x High 20_on_40: 0x%02x "
689 "Low 20_on_40: 0x%02x\n",
690 txp->mimo2_max, txp->mimo3_max,
691 ((txp->delta_20_in_40 & 0xf0) >> 4),
692 (txp->delta_20_in_40 & 0x0f));
693
694 max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
695 &max_txp_avg_halfdbm);
696
697 /*
698 * Update the user limit values values to the highest
699 * power supported by any channel
700 */
701 if (max_txp_avg > priv->tx_power_user_lmt)
702 priv->tx_power_user_lmt = max_txp_avg;
703 if (max_txp_avg_halfdbm > priv->tx_power_lmt_in_half_dbm)
704 priv->tx_power_lmt_in_half_dbm = max_txp_avg_halfdbm;
705
706 iwl_eeprom_enh_txp_read_element(priv, txp, max_txp_avg);
707 }
708}
709
710/**
711 * iwl_eeprom_init - read EEPROM contents
712 *
713 * Load the EEPROM contents from adapter into priv->eeprom
714 *
715 * NOTE: This routine uses the non-debug IO access functions.
716 */
717int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
718{
719 __le16 *e;
720 u32 gp = iwl_read32(priv->trans, CSR_EEPROM_GP);
721 int sz;
722 int ret;
723 u16 addr;
724 u16 validblockaddr = 0;
725 u16 cache_addr = 0;
726
727 priv->nvm_device_type = iwl_get_nvm_type(priv->trans, hw_rev);
728 if (priv->nvm_device_type == -ENOENT)
729 return -ENOENT;
730 /* allocate eeprom */
731 sz = priv->cfg->base_params->eeprom_size;
732 IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
733 priv->eeprom = kzalloc(sz, GFP_KERNEL);
734 if (!priv->eeprom) {
735 ret = -ENOMEM;
736 goto alloc_err;
737 }
738 e = (__le16 *)priv->eeprom;
739
740 ret = iwl_eeprom_verify_signature(priv);
741 if (ret < 0) {
742 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
743 ret = -ENOENT;
744 goto err;
745 }
746
747 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
748 ret = iwl_eeprom_acquire_semaphore(priv->trans);
749 if (ret < 0) {
750 IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
751 ret = -ENOENT;
752 goto err;
753 }
754
755 if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
756
757 ret = iwl_init_otp_access(priv->trans);
758 if (ret) {
759 IWL_ERR(priv, "Failed to initialize OTP access.\n");
760 ret = -ENOENT;
761 goto done;
762 }
763 iwl_write32(priv->trans, CSR_EEPROM_GP,
764 iwl_read32(priv->trans, CSR_EEPROM_GP) &
765 ~CSR_EEPROM_GP_IF_OWNER_MSK);
766
767 iwl_set_bit(priv->trans, CSR_OTP_GP_REG,
768 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
769 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
770 /* traversing the linked list if no shadow ram supported */
771 if (!priv->cfg->base_params->shadow_ram_support) {
772 if (iwl_find_otp_image(priv->trans, &validblockaddr)) {
773 ret = -ENOENT;
774 goto done;
775 }
776 }
777 for (addr = validblockaddr; addr < validblockaddr + sz;
778 addr += sizeof(u16)) {
779 __le16 eeprom_data;
780
781 ret = iwl_read_otp_word(priv->trans, addr,
782 &eeprom_data);
783 if (ret)
784 goto done;
785 e[cache_addr / 2] = eeprom_data;
786 cache_addr += sizeof(u16);
787 }
788 } else {
789 /* eeprom is an array of 16bit values */
790 for (addr = 0; addr < sz; addr += sizeof(u16)) {
791 u32 r;
792
793 iwl_write32(priv->trans, CSR_EEPROM_REG,
794 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
795
796 ret = iwl_poll_bit(priv->trans, CSR_EEPROM_REG,
797 CSR_EEPROM_REG_READ_VALID_MSK,
798 CSR_EEPROM_REG_READ_VALID_MSK,
799 IWL_EEPROM_ACCESS_TIMEOUT);
800 if (ret < 0) {
801 IWL_ERR(priv,
802 "Time out reading EEPROM[%d]\n", addr);
803 goto done;
804 }
805 r = iwl_read32(priv->trans, CSR_EEPROM_REG);
806 e[addr / 2] = cpu_to_le16(r >> 16);
807 }
808 }
809
810 IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
811 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
812 ? "OTP" : "EEPROM",
813 iwl_eeprom_query16(priv, EEPROM_VERSION));
814
815 ret = 0;
816done:
817 iwl_eeprom_release_semaphore(priv->trans);
818
819err:
820 if (ret)
821 iwl_eeprom_free(priv);
822alloc_err:
823 return ret;
824}
825
826void iwl_eeprom_free(struct iwl_priv *priv)
827{
828 kfree(priv->eeprom);
829 priv->eeprom = NULL;
830}
831
832static void iwl_init_band_reference(struct iwl_priv *priv,
833 int eep_band, int *eeprom_ch_count,
834 const struct iwl_eeprom_channel **eeprom_ch_info,
835 const u8 **eeprom_ch_index)
836{
837 u32 offset = priv->lib->
838 eeprom_ops.regulatory_bands[eep_band - 1];
839 switch (eep_band) {
840 case 1: /* 2.4GHz band */
841 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
842 *eeprom_ch_info = (struct iwl_eeprom_channel *)
843 iwl_eeprom_query_addr(priv, offset);
844 *eeprom_ch_index = iwl_eeprom_band_1;
845 break;
846 case 2: /* 4.9GHz band */
847 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
848 *eeprom_ch_info = (struct iwl_eeprom_channel *)
849 iwl_eeprom_query_addr(priv, offset);
850 *eeprom_ch_index = iwl_eeprom_band_2;
851 break;
852 case 3: /* 5.2GHz band */
853 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
854 *eeprom_ch_info = (struct iwl_eeprom_channel *)
855 iwl_eeprom_query_addr(priv, offset);
856 *eeprom_ch_index = iwl_eeprom_band_3;
857 break;
858 case 4: /* 5.5GHz band */
859 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
860 *eeprom_ch_info = (struct iwl_eeprom_channel *)
861 iwl_eeprom_query_addr(priv, offset);
862 *eeprom_ch_index = iwl_eeprom_band_4;
863 break;
864 case 5: /* 5.7GHz band */
865 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
866 *eeprom_ch_info = (struct iwl_eeprom_channel *)
867 iwl_eeprom_query_addr(priv, offset);
868 *eeprom_ch_index = iwl_eeprom_band_5;
869 break;
870 case 6: /* 2.4GHz ht40 channels */
871 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
872 *eeprom_ch_info = (struct iwl_eeprom_channel *)
873 iwl_eeprom_query_addr(priv, offset);
874 *eeprom_ch_index = iwl_eeprom_band_6;
875 break;
876 case 7: /* 5 GHz ht40 channels */
877 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
878 *eeprom_ch_info = (struct iwl_eeprom_channel *)
879 iwl_eeprom_query_addr(priv, offset);
880 *eeprom_ch_index = iwl_eeprom_band_7;
881 break;
882 default:
883 BUG();
884 return;
885 }
886}
887
888#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
889 ? # x " " : "")
890/**
891 * iwl_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
892 *
893 * Does not set up a command, or touch hardware.
894 */
895static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
896 enum ieee80211_band band, u16 channel,
897 const struct iwl_eeprom_channel *eeprom_ch,
898 u8 clear_ht40_extension_channel)
899{
900 struct iwl_channel_info *ch_info;
901
902 ch_info = (struct iwl_channel_info *)
903 iwl_get_channel_info(priv, band, channel);
904
905 if (!is_channel_valid(ch_info))
906 return -1;
907
908 IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
909 " Ad-Hoc %ssupported\n",
910 ch_info->channel,
911 is_channel_a_band(ch_info) ?
912 "5.2" : "2.4",
913 CHECK_AND_PRINT(IBSS),
914 CHECK_AND_PRINT(ACTIVE),
915 CHECK_AND_PRINT(RADAR),
916 CHECK_AND_PRINT(WIDE),
917 CHECK_AND_PRINT(DFS),
918 eeprom_ch->flags,
919 eeprom_ch->max_power_avg,
920 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
921 && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
922 "" : "not ");
923
924 ch_info->ht40_eeprom = *eeprom_ch;
925 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
926 ch_info->ht40_flags = eeprom_ch->flags;
927 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
928 ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
929
930 return 0;
931}
932
933#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
934 ? # x " " : "")
935
936/**
937 * iwl_init_channel_map - Set up driver's info for all possible channels
938 */
939int iwl_init_channel_map(struct iwl_priv *priv)
940{
941 int eeprom_ch_count = 0;
942 const u8 *eeprom_ch_index = NULL;
943 const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
944 int band, ch;
945 struct iwl_channel_info *ch_info;
946
947 if (priv->channel_count) {
948 IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
949 return 0;
950 }
951
952 IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
953
954 priv->channel_count =
955 ARRAY_SIZE(iwl_eeprom_band_1) +
956 ARRAY_SIZE(iwl_eeprom_band_2) +
957 ARRAY_SIZE(iwl_eeprom_band_3) +
958 ARRAY_SIZE(iwl_eeprom_band_4) +
959 ARRAY_SIZE(iwl_eeprom_band_5);
960
961 IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
962 priv->channel_count);
963
964 priv->channel_info = kcalloc(priv->channel_count,
965 sizeof(struct iwl_channel_info),
966 GFP_KERNEL);
967 if (!priv->channel_info) {
968 IWL_ERR(priv, "Could not allocate channel_info\n");
969 priv->channel_count = 0;
970 return -ENOMEM;
971 }
972
973 ch_info = priv->channel_info;
974
975 /* Loop through the 5 EEPROM bands adding them in order to the
976 * channel map we maintain (that contains additional information than
977 * what just in the EEPROM) */
978 for (band = 1; band <= 5; band++) {
979
980 iwl_init_band_reference(priv, band, &eeprom_ch_count,
981 &eeprom_ch_info, &eeprom_ch_index);
982
983 /* Loop through each band adding each of the channels */
984 for (ch = 0; ch < eeprom_ch_count; ch++) {
985 ch_info->channel = eeprom_ch_index[ch];
986 ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
987 IEEE80211_BAND_5GHZ;
988
989 /* permanently store EEPROM's channel regulatory flags
990 * and max power in channel info database. */
991 ch_info->eeprom = eeprom_ch_info[ch];
992
993 /* Copy the run-time flags so they are there even on
994 * invalid channels */
995 ch_info->flags = eeprom_ch_info[ch].flags;
996 /* First write that ht40 is not enabled, and then enable
997 * one by one */
998 ch_info->ht40_extension_channel =
999 IEEE80211_CHAN_NO_HT40;
1000
1001 if (!(is_channel_valid(ch_info))) {
1002 IWL_DEBUG_EEPROM(priv,
1003 "Ch. %d Flags %x [%sGHz] - "
1004 "No traffic\n",
1005 ch_info->channel,
1006 ch_info->flags,
1007 is_channel_a_band(ch_info) ?
1008 "5.2" : "2.4");
1009 ch_info++;
1010 continue;
1011 }
1012
1013 /* Initialize regulatory-based run-time data */
1014 ch_info->max_power_avg = ch_info->curr_txpow =
1015 eeprom_ch_info[ch].max_power_avg;
1016 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
1017 ch_info->min_power = 0;
1018
1019 IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
1020 "%s%s%s%s%s%s(0x%02x %ddBm):"
1021 " Ad-Hoc %ssupported\n",
1022 ch_info->channel,
1023 is_channel_a_band(ch_info) ?
1024 "5.2" : "2.4",
1025 CHECK_AND_PRINT_I(VALID),
1026 CHECK_AND_PRINT_I(IBSS),
1027 CHECK_AND_PRINT_I(ACTIVE),
1028 CHECK_AND_PRINT_I(RADAR),
1029 CHECK_AND_PRINT_I(WIDE),
1030 CHECK_AND_PRINT_I(DFS),
1031 eeprom_ch_info[ch].flags,
1032 eeprom_ch_info[ch].max_power_avg,
1033 ((eeprom_ch_info[ch].
1034 flags & EEPROM_CHANNEL_IBSS)
1035 && !(eeprom_ch_info[ch].
1036 flags & EEPROM_CHANNEL_RADAR))
1037 ? "" : "not ");
1038
1039 ch_info++;
1040 }
1041 }
1042
1043 /* Check if we do have HT40 channels */
1044 if (priv->lib->eeprom_ops.regulatory_bands[5] ==
1045 EEPROM_REGULATORY_BAND_NO_HT40 &&
1046 priv->lib->eeprom_ops.regulatory_bands[6] ==
1047 EEPROM_REGULATORY_BAND_NO_HT40)
1048 return 0;
1049
1050 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
1051 for (band = 6; band <= 7; band++) {
1052 enum ieee80211_band ieeeband;
1053
1054 iwl_init_band_reference(priv, band, &eeprom_ch_count,
1055 &eeprom_ch_info, &eeprom_ch_index);
1056
1057 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
1058 ieeeband =
1059 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
1060
1061 /* Loop through each band adding each of the channels */
1062 for (ch = 0; ch < eeprom_ch_count; ch++) {
1063 /* Set up driver's info for lower half */
1064 iwl_mod_ht40_chan_info(priv, ieeeband,
1065 eeprom_ch_index[ch],
1066 &eeprom_ch_info[ch],
1067 IEEE80211_CHAN_NO_HT40PLUS);
1068
1069 /* Set up driver's info for upper half */
1070 iwl_mod_ht40_chan_info(priv, ieeeband,
1071 eeprom_ch_index[ch] + 4,
1072 &eeprom_ch_info[ch],
1073 IEEE80211_CHAN_NO_HT40MINUS);
1074 }
1075 }
1076
1077 /* for newer device (6000 series and up)
1078 * EEPROM contain enhanced tx power information
1079 * driver need to process addition information
1080 * to determine the max channel tx power limits
1081 */
1082 if (priv->lib->eeprom_ops.enhanced_txpower)
1083 iwl_eeprom_enhanced_txpower(priv);
1084
1085 return 0;
1086}
1087
1088/*
1089 * iwl_free_channel_map - undo allocations in iwl_init_channel_map
1090 */
1091void iwl_free_channel_map(struct iwl_priv *priv)
1092{
1093 kfree(priv->channel_info);
1094 priv->channel_count = 0;
1095}
1096
1097/**
1098 * iwl_get_channel_info - Find driver's private channel info
1099 *
1100 * Based on band and channel number.
1101 */
1102const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv,
1103 enum ieee80211_band band, u16 channel)
1104{
1105 int i;
1106
1107 switch (band) {
1108 case IEEE80211_BAND_5GHZ:
1109 for (i = 14; i < priv->channel_count; i++) {
1110 if (priv->channel_info[i].channel == channel)
1111 return &priv->channel_info[i];
1112 }
1113 break;
1114 case IEEE80211_BAND_2GHZ:
1115 if (channel >= 1 && channel <= 14)
1116 return &priv->channel_info[channel - 1];
1117 break;
1118 default:
1119 BUG();
1120 }
1121
1122 return NULL;
1123}
1124
1125void iwl_rf_config(struct iwl_priv *priv)
1126{
1127 u16 radio_cfg;
1128
1129 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
1130
1131 /* write radio config values to register */
1132 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
1133 iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
1134 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
1135 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
1136 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
1137 IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n",
1138 EEPROM_RF_CFG_TYPE_MSK(radio_cfg),
1139 EEPROM_RF_CFG_STEP_MSK(radio_cfg),
1140 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
1141 } else
1142 WARN_ON(1);
1143
1144 /* set CSR_HW_CONFIG_REG for uCode use */
1145 iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
1146 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
1147 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
1148}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
deleted file mode 100644
index 64bfd947cae..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ /dev/null
@@ -1,269 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_eeprom_h__
64#define __iwl_eeprom_h__
65
66#include <net/mac80211.h>
67
68struct iwl_priv;
69
70/*
71 * EEPROM access time values:
72 *
73 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
74 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
75 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
76 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
77 */
78#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
79
80#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
81#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
82
83
84/*
85 * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
86 *
87 * IBSS and/or AP operation is allowed *only* on those channels with
88 * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
89 * RADAR detection is not supported by the 4965 driver, but is a
90 * requirement for establishing a new network for legal operation on channels
91 * requiring RADAR detection or restricting ACTIVE scanning.
92 *
93 * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
94 * It only indicates that 20 MHz channel use is supported; HT40 channel
95 * usage is indicated by a separate set of regulatory flags for each
96 * HT40 channel pair.
97 *
98 * NOTE: Using a channel inappropriately will result in a uCode error!
99 */
100#define IWL_NUM_TX_CALIB_GROUPS 5
101enum {
102 EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
103 EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
104 /* Bit 2 Reserved */
105 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
106 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
107 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
108 /* Bit 6 Reserved (was Narrow Channel) */
109 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
110};
111
112/* SKU Capabilities */
113#define EEPROM_SKU_CAP_BAND_24GHZ (1 << 4)
114#define EEPROM_SKU_CAP_BAND_52GHZ (1 << 5)
115#define EEPROM_SKU_CAP_11N_ENABLE (1 << 6)
116#define EEPROM_SKU_CAP_AMT_ENABLE (1 << 7)
117#define EEPROM_SKU_CAP_IPAN_ENABLE (1 << 8)
118
119/* *regulatory* channel data format in eeprom, one for each channel.
120 * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
121struct iwl_eeprom_channel {
122 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
123 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
124} __packed;
125
126enum iwl_eeprom_enhanced_txpwr_flags {
127 IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0),
128 IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1),
129 IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2),
130 IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3),
131 IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4),
132 IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5),
133 IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6),
134 IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7),
135};
136
137/**
138 * iwl_eeprom_enhanced_txpwr structure
139 * This structure presents the enhanced regulatory tx power limit layout
140 * in eeprom image
141 * Enhanced regulatory tx power portion of eeprom image can be broken down
142 * into individual structures; each one is 8 bytes in size and contain the
143 * following information
144 * @flags: entry flags
145 * @channel: channel number
146 * @chain_a_max_pwr: chain a max power in 1/2 dBm
147 * @chain_b_max_pwr: chain b max power in 1/2 dBm
148 * @chain_c_max_pwr: chain c max power in 1/2 dBm
149 * @delta_20_in_40: 20-in-40 deltas (hi/lo)
150 * @mimo2_max_pwr: mimo2 max power in 1/2 dBm
151 * @mimo3_max_pwr: mimo3 max power in 1/2 dBm
152 *
153 */
154struct iwl_eeprom_enhanced_txpwr {
155 u8 flags;
156 u8 channel;
157 s8 chain_a_max;
158 s8 chain_b_max;
159 s8 chain_c_max;
160 u8 delta_20_in_40;
161 s8 mimo2_max;
162 s8 mimo3_max;
163} __packed;
164
165/* calibration */
166struct iwl_eeprom_calib_hdr {
167 u8 version;
168 u8 pa_type;
169 __le16 voltage;
170} __packed;
171
172#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
173#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL)
174
175/* temperature */
176#define EEPROM_KELVIN_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
177#define EEPROM_RAW_TEMPERATURE ((2*0x12B) | EEPROM_CALIB_ALL)
178
179
180/* agn links */
181#define EEPROM_LINK_HOST (2*0x64)
182#define EEPROM_LINK_GENERAL (2*0x65)
183#define EEPROM_LINK_REGULATORY (2*0x66)
184#define EEPROM_LINK_CALIBRATION (2*0x67)
185#define EEPROM_LINK_PROCESS_ADJST (2*0x68)
186#define EEPROM_LINK_OTHERS (2*0x69)
187#define EEPROM_LINK_TXP_LIMIT (2*0x6a)
188#define EEPROM_LINK_TXP_LIMIT_SIZE (2*0x6b)
189
190/* agn regulatory - indirect access */
191#define EEPROM_REG_BAND_1_CHANNELS ((0x08)\
192 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 28 bytes */
193#define EEPROM_REG_BAND_2_CHANNELS ((0x26)\
194 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 26 bytes */
195#define EEPROM_REG_BAND_3_CHANNELS ((0x42)\
196 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
197#define EEPROM_REG_BAND_4_CHANNELS ((0x5C)\
198 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
199#define EEPROM_REG_BAND_5_CHANNELS ((0x74)\
200 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 12 bytes */
201#define EEPROM_REG_BAND_24_HT40_CHANNELS ((0x82)\
202 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
203#define EEPROM_REG_BAND_52_HT40_CHANNELS ((0x92)\
204 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
205
206/* 6000 regulatory - indirect access */
207#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS ((0x80)\
208 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
209/* 2.4 GHz */
210extern const u8 iwl_eeprom_band_1[14];
211
212#define ADDRESS_MSK 0x0000FFFF
213#define INDIRECT_TYPE_MSK 0x000F0000
214#define INDIRECT_HOST 0x00010000
215#define INDIRECT_GENERAL 0x00020000
216#define INDIRECT_REGULATORY 0x00030000
217#define INDIRECT_CALIBRATION 0x00040000
218#define INDIRECT_PROCESS_ADJST 0x00050000
219#define INDIRECT_OTHERS 0x00060000
220#define INDIRECT_TXP_LIMIT 0x00070000
221#define INDIRECT_TXP_LIMIT_SIZE 0x00080000
222#define INDIRECT_ADDRESS 0x00100000
223
224/* General */
225#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
226#define EEPROM_SUBSYSTEM_ID (2*0x0A) /* 2 bytes */
227#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
228#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
229#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
230#define EEPROM_VERSION (2*0x44) /* 2 bytes */
231#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
232#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
233#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
234#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
235
236/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
237#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
238#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
239#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
240#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
241#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
242#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
243
244#define EEPROM_RF_CONFIG_TYPE_MAX 0x3
245
246#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
247
248struct iwl_eeprom_ops {
249 const u32 regulatory_bands[7];
250 bool enhanced_txpower;
251};
252
253
254int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev);
255void iwl_eeprom_free(struct iwl_priv *priv);
256int iwl_eeprom_check_version(struct iwl_priv *priv);
257int iwl_eeprom_init_hw_params(struct iwl_priv *priv);
258u16 iwl_eeprom_calib_version(struct iwl_priv *priv);
259const u8 *iwl_eeprom_query_addr(struct iwl_priv *priv, size_t offset);
260u16 iwl_eeprom_query16(struct iwl_priv *priv, size_t offset);
261void iwl_eeprom_get_mac(struct iwl_priv *priv, u8 *mac);
262int iwl_init_channel_map(struct iwl_priv *priv);
263void iwl_free_channel_map(struct iwl_priv *priv);
264const struct iwl_channel_info *iwl_get_channel_info(
265 const struct iwl_priv *priv,
266 enum ieee80211_band band, u16 channel);
267void iwl_rf_config(struct iwl_priv *priv);
268
269#endif /* __iwl_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 74bce97a860..80604664174 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -421,6 +421,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
421 (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4) 421 (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
422 422
423#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98) 423#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
424#define FH_TX_TRB_REG(_chan) (FH_MEM_LOWER_BOUND + 0x958 + (_chan) * 4)
425
424/* Instruct FH to increment the retry count of a packet when 426/* Instruct FH to increment the retry count of a packet when
425 * it is brought from the memory to TX-FIFO 427 * it is brought from the memory to TX-FIFO
426 */ 428 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 081dd34d238..66c873399ab 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -27,6 +27,7 @@
27 *****************************************************************************/ 27 *****************************************************************************/
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/device.h> 29#include <linux/device.h>
30#include <linux/export.h>
30 31
31#include "iwl-io.h" 32#include "iwl-io.h"
32#include"iwl-csr.h" 33#include"iwl-csr.h"
@@ -52,6 +53,7 @@ void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
52 __iwl_set_bit(trans, reg, mask); 53 __iwl_set_bit(trans, reg, mask);
53 spin_unlock_irqrestore(&trans->reg_lock, flags); 54 spin_unlock_irqrestore(&trans->reg_lock, flags);
54} 55}
56EXPORT_SYMBOL_GPL(iwl_set_bit);
55 57
56void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask) 58void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
57{ 59{
@@ -61,6 +63,25 @@ void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
61 __iwl_clear_bit(trans, reg, mask); 63 __iwl_clear_bit(trans, reg, mask);
62 spin_unlock_irqrestore(&trans->reg_lock, flags); 64 spin_unlock_irqrestore(&trans->reg_lock, flags);
63} 65}
66EXPORT_SYMBOL_GPL(iwl_clear_bit);
67
68void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
69{
70 unsigned long flags;
71 u32 v;
72
73#ifdef CONFIG_IWLWIFI_DEBUG
74 WARN_ON_ONCE(value & ~mask);
75#endif
76
77 spin_lock_irqsave(&trans->reg_lock, flags);
78 v = iwl_read32(trans, reg);
79 v &= ~mask;
80 v |= value;
81 iwl_write32(trans, reg, v);
82 spin_unlock_irqrestore(&trans->reg_lock, flags);
83}
84EXPORT_SYMBOL_GPL(iwl_set_bits_mask);
64 85
65int iwl_poll_bit(struct iwl_trans *trans, u32 addr, 86int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
66 u32 bits, u32 mask, int timeout) 87 u32 bits, u32 mask, int timeout)
@@ -76,6 +97,7 @@ int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
76 97
77 return -ETIMEDOUT; 98 return -ETIMEDOUT;
78} 99}
100EXPORT_SYMBOL_GPL(iwl_poll_bit);
79 101
80int iwl_grab_nic_access_silent(struct iwl_trans *trans) 102int iwl_grab_nic_access_silent(struct iwl_trans *trans)
81{ 103{
@@ -117,6 +139,7 @@ int iwl_grab_nic_access_silent(struct iwl_trans *trans)
117 139
118 return 0; 140 return 0;
119} 141}
142EXPORT_SYMBOL_GPL(iwl_grab_nic_access_silent);
120 143
121bool iwl_grab_nic_access(struct iwl_trans *trans) 144bool iwl_grab_nic_access(struct iwl_trans *trans)
122{ 145{
@@ -130,6 +153,7 @@ bool iwl_grab_nic_access(struct iwl_trans *trans)
130 153
131 return true; 154 return true;
132} 155}
156EXPORT_SYMBOL_GPL(iwl_grab_nic_access);
133 157
134void iwl_release_nic_access(struct iwl_trans *trans) 158void iwl_release_nic_access(struct iwl_trans *trans)
135{ 159{
@@ -144,6 +168,7 @@ void iwl_release_nic_access(struct iwl_trans *trans)
144 */ 168 */
145 mmiowb(); 169 mmiowb();
146} 170}
171EXPORT_SYMBOL_GPL(iwl_release_nic_access);
147 172
148u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg) 173u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
149{ 174{
@@ -158,6 +183,7 @@ u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
158 183
159 return value; 184 return value;
160} 185}
186EXPORT_SYMBOL_GPL(iwl_read_direct32);
161 187
162void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value) 188void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
163{ 189{
@@ -170,6 +196,7 @@ void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
170 } 196 }
171 spin_unlock_irqrestore(&trans->reg_lock, flags); 197 spin_unlock_irqrestore(&trans->reg_lock, flags);
172} 198}
199EXPORT_SYMBOL_GPL(iwl_write_direct32);
173 200
174int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask, 201int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
175 int timeout) 202 int timeout)
@@ -185,6 +212,7 @@ int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
185 212
186 return -ETIMEDOUT; 213 return -ETIMEDOUT;
187} 214}
215EXPORT_SYMBOL_GPL(iwl_poll_direct_bit);
188 216
189static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 reg) 217static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 reg)
190{ 218{
@@ -211,6 +239,7 @@ u32 iwl_read_prph(struct iwl_trans *trans, u32 reg)
211 spin_unlock_irqrestore(&trans->reg_lock, flags); 239 spin_unlock_irqrestore(&trans->reg_lock, flags);
212 return val; 240 return val;
213} 241}
242EXPORT_SYMBOL_GPL(iwl_read_prph);
214 243
215void iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val) 244void iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
216{ 245{
@@ -223,6 +252,7 @@ void iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
223 } 252 }
224 spin_unlock_irqrestore(&trans->reg_lock, flags); 253 spin_unlock_irqrestore(&trans->reg_lock, flags);
225} 254}
255EXPORT_SYMBOL_GPL(iwl_write_prph);
226 256
227void iwl_set_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask) 257void iwl_set_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
228{ 258{
@@ -236,6 +266,7 @@ void iwl_set_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
236 } 266 }
237 spin_unlock_irqrestore(&trans->reg_lock, flags); 267 spin_unlock_irqrestore(&trans->reg_lock, flags);
238} 268}
269EXPORT_SYMBOL_GPL(iwl_set_bits_prph);
239 270
240void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg, 271void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg,
241 u32 bits, u32 mask) 272 u32 bits, u32 mask)
@@ -250,6 +281,7 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg,
250 } 281 }
251 spin_unlock_irqrestore(&trans->reg_lock, flags); 282 spin_unlock_irqrestore(&trans->reg_lock, flags);
252} 283}
284EXPORT_SYMBOL_GPL(iwl_set_bits_mask_prph);
253 285
254void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask) 286void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
255{ 287{
@@ -264,9 +296,10 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
264 } 296 }
265 spin_unlock_irqrestore(&trans->reg_lock, flags); 297 spin_unlock_irqrestore(&trans->reg_lock, flags);
266} 298}
299EXPORT_SYMBOL_GPL(iwl_clear_bits_prph);
267 300
268void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr, 301void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
269 void *buf, int words) 302 void *buf, int dwords)
270{ 303{
271 unsigned long flags; 304 unsigned long flags;
272 int offs; 305 int offs;
@@ -275,24 +308,26 @@ void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr,
275 spin_lock_irqsave(&trans->reg_lock, flags); 308 spin_lock_irqsave(&trans->reg_lock, flags);
276 if (likely(iwl_grab_nic_access(trans))) { 309 if (likely(iwl_grab_nic_access(trans))) {
277 iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr); 310 iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
278 for (offs = 0; offs < words; offs++) 311 for (offs = 0; offs < dwords; offs++)
279 vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT); 312 vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
280 iwl_release_nic_access(trans); 313 iwl_release_nic_access(trans);
281 } 314 }
282 spin_unlock_irqrestore(&trans->reg_lock, flags); 315 spin_unlock_irqrestore(&trans->reg_lock, flags);
283} 316}
317EXPORT_SYMBOL_GPL(_iwl_read_targ_mem_dwords);
284 318
285u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr) 319u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr)
286{ 320{
287 u32 value; 321 u32 value;
288 322
289 _iwl_read_targ_mem_words(trans, addr, &value, 1); 323 _iwl_read_targ_mem_dwords(trans, addr, &value, 1);
290 324
291 return value; 325 return value;
292} 326}
327EXPORT_SYMBOL_GPL(iwl_read_targ_mem);
293 328
294int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr, 329int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
295 void *buf, int words) 330 void *buf, int dwords)
296{ 331{
297 unsigned long flags; 332 unsigned long flags;
298 int offs, result = 0; 333 int offs, result = 0;
@@ -301,7 +336,7 @@ int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
301 spin_lock_irqsave(&trans->reg_lock, flags); 336 spin_lock_irqsave(&trans->reg_lock, flags);
302 if (likely(iwl_grab_nic_access(trans))) { 337 if (likely(iwl_grab_nic_access(trans))) {
303 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); 338 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
304 for (offs = 0; offs < words; offs++) 339 for (offs = 0; offs < dwords; offs++)
305 iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]); 340 iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]);
306 iwl_release_nic_access(trans); 341 iwl_release_nic_access(trans);
307 } else 342 } else
@@ -310,8 +345,10 @@ int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
310 345
311 return result; 346 return result;
312} 347}
348EXPORT_SYMBOL_GPL(_iwl_write_targ_mem_dwords);
313 349
314int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val) 350int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val)
315{ 351{
316 return _iwl_write_targ_mem_words(trans, addr, &val, 1); 352 return _iwl_write_targ_mem_dwords(trans, addr, &val, 1);
317} 353}
354EXPORT_SYMBOL_GPL(iwl_write_targ_mem);
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index abb3250164b..50d3819739d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -54,6 +54,8 @@ static inline u32 iwl_read32(struct iwl_trans *trans, u32 ofs)
54void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask); 54void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask);
55void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask); 55void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask);
56 56
57void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value);
58
57int iwl_poll_bit(struct iwl_trans *trans, u32 addr, 59int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
58 u32 bits, u32 mask, int timeout); 60 u32 bits, u32 mask, int timeout);
59int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask, 61int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
@@ -74,18 +76,18 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg,
74 u32 bits, u32 mask); 76 u32 bits, u32 mask);
75void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask); 77void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask);
76 78
77void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr, 79void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
78 void *buf, int words); 80 void *buf, int dwords);
79 81
80#define iwl_read_targ_mem_words(trans, addr, buf, bufsize) \ 82#define iwl_read_targ_mem_bytes(trans, addr, buf, bufsize) \
81 do { \ 83 do { \
82 BUILD_BUG_ON((bufsize) % sizeof(u32)); \ 84 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
83 _iwl_read_targ_mem_words(trans, addr, buf, \ 85 _iwl_read_targ_mem_dwords(trans, addr, buf, \
84 (bufsize) / sizeof(u32));\ 86 (bufsize) / sizeof(u32));\
85 } while (0) 87 } while (0)
86 88
87int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr, 89int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
88 void *buf, int words); 90 void *buf, int dwords);
89 91
90u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr); 92u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr);
91int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val); 93int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val);
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
index 0066b899fe5..c61f2070f15 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
@@ -61,6 +61,7 @@
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63#include <linux/sched.h> 63#include <linux/sched.h>
64#include <linux/export.h>
64 65
65#include "iwl-notif-wait.h" 66#include "iwl-notif-wait.h"
66 67
@@ -71,6 +72,7 @@ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
71 INIT_LIST_HEAD(&notif_wait->notif_waits); 72 INIT_LIST_HEAD(&notif_wait->notif_waits);
72 init_waitqueue_head(&notif_wait->notif_waitq); 73 init_waitqueue_head(&notif_wait->notif_waitq);
73} 74}
75EXPORT_SYMBOL_GPL(iwl_notification_wait_init);
74 76
75void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait, 77void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
76 struct iwl_rx_packet *pkt) 78 struct iwl_rx_packet *pkt)
@@ -115,20 +117,20 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
115 if (triggered) 117 if (triggered)
116 wake_up_all(&notif_wait->notif_waitq); 118 wake_up_all(&notif_wait->notif_waitq);
117} 119}
120EXPORT_SYMBOL_GPL(iwl_notification_wait_notify);
118 121
119void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait) 122void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
120{ 123{
121 unsigned long flags;
122 struct iwl_notification_wait *wait_entry; 124 struct iwl_notification_wait *wait_entry;
123 125
124 spin_lock_irqsave(&notif_wait->notif_wait_lock, flags); 126 spin_lock(&notif_wait->notif_wait_lock);
125 list_for_each_entry(wait_entry, &notif_wait->notif_waits, list) 127 list_for_each_entry(wait_entry, &notif_wait->notif_waits, list)
126 wait_entry->aborted = true; 128 wait_entry->aborted = true;
127 spin_unlock_irqrestore(&notif_wait->notif_wait_lock, flags); 129 spin_unlock(&notif_wait->notif_wait_lock);
128 130
129 wake_up_all(&notif_wait->notif_waitq); 131 wake_up_all(&notif_wait->notif_waitq);
130} 132}
131 133EXPORT_SYMBOL_GPL(iwl_abort_notification_waits);
132 134
133void 135void
134iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait, 136iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
@@ -152,6 +154,7 @@ iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
152 list_add(&wait_entry->list, &notif_wait->notif_waits); 154 list_add(&wait_entry->list, &notif_wait->notif_waits);
153 spin_unlock_bh(&notif_wait->notif_wait_lock); 155 spin_unlock_bh(&notif_wait->notif_wait_lock);
154} 156}
157EXPORT_SYMBOL_GPL(iwl_init_notification_wait);
155 158
156int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait, 159int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait,
157 struct iwl_notification_wait *wait_entry, 160 struct iwl_notification_wait *wait_entry,
@@ -175,6 +178,7 @@ int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait,
175 return -ETIMEDOUT; 178 return -ETIMEDOUT;
176 return 0; 179 return 0;
177} 180}
181EXPORT_SYMBOL_GPL(iwl_wait_notification);
178 182
179void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait, 183void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait,
180 struct iwl_notification_wait *wait_entry) 184 struct iwl_notification_wait *wait_entry)
@@ -183,3 +187,4 @@ void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait,
183 list_del(&wait_entry->list); 187 list_del(&wait_entry->list);
184 spin_unlock_bh(&notif_wait->notif_wait_lock); 188 spin_unlock_bh(&notif_wait->notif_wait_lock);
185} 189}
190EXPORT_SYMBOL_GPL(iwl_remove_notification);
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 4ef742b28e0..64886f95664 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -111,22 +111,25 @@ struct iwl_cfg;
111 * May sleep 111 * May sleep
112 * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the 112 * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
113 * HCMD the this Rx responds to. 113 * HCMD the this Rx responds to.
114 * Must be atomic. 114 * Must be atomic and called with BH disabled.
115 * @queue_full: notifies that a HW queue is full. 115 * @queue_full: notifies that a HW queue is full.
116 * Must be atomic 116 * Must be atomic and called with BH disabled.
117 * @queue_not_full: notifies that a HW queue is not full any more. 117 * @queue_not_full: notifies that a HW queue is not full any more.
118 * Must be atomic 118 * Must be atomic and called with BH disabled.
119 * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that 119 * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
120 * the radio is killed. Must be atomic. 120 * the radio is killed. Must be atomic.
121 * @free_skb: allows the transport layer to free skbs that haven't been 121 * @free_skb: allows the transport layer to free skbs that haven't been
122 * reclaimed by the op_mode. This can happen when the driver is freed and 122 * reclaimed by the op_mode. This can happen when the driver is freed and
123 * there are Tx packets pending in the transport layer. 123 * there are Tx packets pending in the transport layer.
124 * Must be atomic 124 * Must be atomic
125 * @nic_error: error notification. Must be atomic 125 * @nic_error: error notification. Must be atomic and must be called with BH
126 * @cmd_queue_full: Called when the command queue gets full. Must be atomic. 126 * disabled.
127 * @cmd_queue_full: Called when the command queue gets full. Must be atomic and
128 * called with BH disabled.
127 * @nic_config: configure NIC, called before firmware is started. 129 * @nic_config: configure NIC, called before firmware is started.
128 * May sleep 130 * May sleep
129 * @wimax_active: invoked when WiMax becomes active. Must be atomic. 131 * @wimax_active: invoked when WiMax becomes active. Must be atomic and called
132 * with BH disabled.
130 */ 133 */
131struct iwl_op_mode_ops { 134struct iwl_op_mode_ops {
132 struct iwl_op_mode *(*start)(struct iwl_trans *trans, 135 struct iwl_op_mode *(*start)(struct iwl_trans *trans,
@@ -145,6 +148,9 @@ struct iwl_op_mode_ops {
145 void (*wimax_active)(struct iwl_op_mode *op_mode); 148 void (*wimax_active)(struct iwl_op_mode *op_mode);
146}; 149};
147 150
151int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
152void iwl_opmode_deregister(const char *name);
153
148/** 154/**
149 * struct iwl_op_mode - operational mode 155 * struct iwl_op_mode - operational mode
150 * 156 *
@@ -162,7 +168,6 @@ struct iwl_op_mode {
162static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode) 168static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode)
163{ 169{
164 might_sleep(); 170 might_sleep();
165
166 op_mode->ops->stop(op_mode); 171 op_mode->ops->stop(op_mode);
167} 172}
168 173
@@ -218,9 +223,4 @@ static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
218 op_mode->ops->wimax_active(op_mode); 223 op_mode->ops->wimax_active(op_mode);
219} 224}
220 225
221/*****************************************************
222* Op mode layers implementations
223******************************************************/
224extern const struct iwl_op_mode_ops iwl_dvm_ops;
225
226#endif /* __iwl_op_mode_h__ */ 226#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index dfd54662e3e..9253ef1dba7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -187,7 +187,7 @@
187#define SCD_QUEUE_STTS_REG_POS_ACTIVE (3) 187#define SCD_QUEUE_STTS_REG_POS_ACTIVE (3)
188#define SCD_QUEUE_STTS_REG_POS_WSL (4) 188#define SCD_QUEUE_STTS_REG_POS_WSL (4)
189#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19) 189#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
190#define SCD_QUEUE_STTS_REG_MSK (0x00FF0000) 190#define SCD_QUEUE_STTS_REG_MSK (0x017F0000)
191 191
192#define SCD_QUEUE_CTX_REG1_CREDIT_POS (8) 192#define SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
193#define SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00) 193#define SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.c b/drivers/net/wireless/iwlwifi/iwl-test.c
new file mode 100644
index 00000000000..81e8c7126d7
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-test.c
@@ -0,0 +1,856 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/export.h>
65#include <net/netlink.h>
66
67#include "iwl-io.h"
68#include "iwl-fh.h"
69#include "iwl-prph.h"
70#include "iwl-trans.h"
71#include "iwl-test.h"
72#include "iwl-csr.h"
73#include "iwl-testmode.h"
74
75/*
76 * Periphery registers absolute lower bound. This is used in order to
77 * differentiate registery access through HBUS_TARG_PRPH_* and
78 * HBUS_TARG_MEM_* accesses.
79 */
80#define IWL_ABS_PRPH_START (0xA00000)
81
82/*
83 * The TLVs used in the gnl message policy between the kernel module and
84 * user space application. iwl_testmode_gnl_msg_policy is to be carried
85 * through the NL80211_CMD_TESTMODE channel regulated by nl80211.
86 * See iwl-testmode.h
87 */
88static
89struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
90 [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
91
92 [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
93 [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
94
95 [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
96 [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
97 [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
98
99 [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
100 [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
101
102 [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
103
104 [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
105 [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
106 [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
107
108 [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
109
110 [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
111
112 [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, },
113 [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, },
114 [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, },
115
116 [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
117 [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
118 [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
119 [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
120 [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
121
122 [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
123};
124
125static inline void iwl_test_trace_clear(struct iwl_test *tst)
126{
127 memset(&tst->trace, 0, sizeof(struct iwl_test_trace));
128}
129
130static void iwl_test_trace_stop(struct iwl_test *tst)
131{
132 if (!tst->trace.enabled)
133 return;
134
135 if (tst->trace.cpu_addr && tst->trace.dma_addr)
136 dma_free_coherent(tst->trans->dev,
137 tst->trace.tsize,
138 tst->trace.cpu_addr,
139 tst->trace.dma_addr);
140
141 iwl_test_trace_clear(tst);
142}
143
144static inline void iwl_test_mem_clear(struct iwl_test *tst)
145{
146 memset(&tst->mem, 0, sizeof(struct iwl_test_mem));
147}
148
149static inline void iwl_test_mem_stop(struct iwl_test *tst)
150{
151 if (!tst->mem.in_read)
152 return;
153
154 iwl_test_mem_clear(tst);
155}
156
157/*
158 * Initializes the test object
159 * During the lifetime of the test object it is assumed that the transport is
160 * started. The test object should be stopped before the transport is stopped.
161 */
162void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
163 struct iwl_test_ops *ops)
164{
165 tst->trans = trans;
166 tst->ops = ops;
167
168 iwl_test_trace_clear(tst);
169 iwl_test_mem_clear(tst);
170}
171EXPORT_SYMBOL_GPL(iwl_test_init);
172
173/*
174 * Stop the test object
175 */
176void iwl_test_free(struct iwl_test *tst)
177{
178 iwl_test_mem_stop(tst);
179 iwl_test_trace_stop(tst);
180}
181EXPORT_SYMBOL_GPL(iwl_test_free);
182
183static inline int iwl_test_send_cmd(struct iwl_test *tst,
184 struct iwl_host_cmd *cmd)
185{
186 return tst->ops->send_cmd(tst->trans->op_mode, cmd);
187}
188
189static inline bool iwl_test_valid_hw_addr(struct iwl_test *tst, u32 addr)
190{
191 return tst->ops->valid_hw_addr(addr);
192}
193
194static inline u32 iwl_test_fw_ver(struct iwl_test *tst)
195{
196 return tst->ops->get_fw_ver(tst->trans->op_mode);
197}
198
199static inline struct sk_buff*
200iwl_test_alloc_reply(struct iwl_test *tst, int len)
201{
202 return tst->ops->alloc_reply(tst->trans->op_mode, len);
203}
204
205static inline int iwl_test_reply(struct iwl_test *tst, struct sk_buff *skb)
206{
207 return tst->ops->reply(tst->trans->op_mode, skb);
208}
209
210static inline struct sk_buff*
211iwl_test_alloc_event(struct iwl_test *tst, int len)
212{
213 return tst->ops->alloc_event(tst->trans->op_mode, len);
214}
215
216static inline void
217iwl_test_event(struct iwl_test *tst, struct sk_buff *skb)
218{
219 return tst->ops->event(tst->trans->op_mode, skb);
220}
221
222/*
223 * This function handles the user application commands to the fw. The fw
224 * commands are sent in a synchronuous manner. In case that the user requested
225 * to get commands response, it is send to the user.
226 */
227static int iwl_test_fw_cmd(struct iwl_test *tst, struct nlattr **tb)
228{
229 struct iwl_host_cmd cmd;
230 struct iwl_rx_packet *pkt;
231 struct sk_buff *skb;
232 void *reply_buf;
233 u32 reply_len;
234 int ret;
235 bool cmd_want_skb;
236
237 memset(&cmd, 0, sizeof(struct iwl_host_cmd));
238
239 if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
240 !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
241 IWL_ERR(tst->trans, "Missing fw command mandatory fields\n");
242 return -ENOMSG;
243 }
244
245 cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
246 cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
247 if (cmd_want_skb)
248 cmd.flags |= CMD_WANT_SKB;
249
250 cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
251 cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
252 cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
253 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
254 IWL_DEBUG_INFO(tst->trans, "test fw cmd=0x%x, flags 0x%x, len %d\n",
255 cmd.id, cmd.flags, cmd.len[0]);
256
257 ret = iwl_test_send_cmd(tst, &cmd);
258 if (ret) {
259 IWL_ERR(tst->trans, "Failed to send hcmd\n");
260 return ret;
261 }
262 if (!cmd_want_skb)
263 return ret;
264
265 /* Handling return of SKB to the user */
266 pkt = cmd.resp_pkt;
267 if (!pkt) {
268 IWL_ERR(tst->trans, "HCMD received a null response packet\n");
269 return ret;
270 }
271
272 reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
273 skb = iwl_test_alloc_reply(tst, reply_len + 20);
274 reply_buf = kmalloc(reply_len, GFP_KERNEL);
275 if (!skb || !reply_buf) {
276 kfree_skb(skb);
277 kfree(reply_buf);
278 return -ENOMEM;
279 }
280
281 /* The reply is in a page, that we cannot send to user space. */
282 memcpy(reply_buf, &(pkt->hdr), reply_len);
283 iwl_free_resp(&cmd);
284
285 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
286 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
287 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
288 goto nla_put_failure;
289 return iwl_test_reply(tst, skb);
290
291nla_put_failure:
292 IWL_DEBUG_INFO(tst->trans, "Failed creating NL attributes\n");
293 kfree(reply_buf);
294 kfree_skb(skb);
295 return -ENOMSG;
296}
297
298/*
299 * Handles the user application commands for register access.
300 */
301static int iwl_test_reg(struct iwl_test *tst, struct nlattr **tb)
302{
303 u32 ofs, val32, cmd;
304 u8 val8;
305 struct sk_buff *skb;
306 int status = 0;
307 struct iwl_trans *trans = tst->trans;
308
309 if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
310 IWL_ERR(trans, "Missing reg offset\n");
311 return -ENOMSG;
312 }
313
314 ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
315 IWL_DEBUG_INFO(trans, "test reg access cmd offset=0x%x\n", ofs);
316
317 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
318
319 /*
320 * Allow access only to FH/CSR/HBUS in direct mode.
321 * Since we don't have the upper bounds for the CSR and HBUS segments,
322 * we will use only the upper bound of FH for sanity check.
323 */
324 if (ofs >= FH_MEM_UPPER_BOUND) {
325 IWL_ERR(trans, "offset out of segment (0x0 - 0x%x)\n",
326 FH_MEM_UPPER_BOUND);
327 return -EINVAL;
328 }
329
330 switch (cmd) {
331 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
332 val32 = iwl_read_direct32(tst->trans, ofs);
333 IWL_DEBUG_INFO(trans, "32 value to read 0x%x\n", val32);
334
335 skb = iwl_test_alloc_reply(tst, 20);
336 if (!skb) {
337 IWL_ERR(trans, "Memory allocation fail\n");
338 return -ENOMEM;
339 }
340 if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
341 goto nla_put_failure;
342 status = iwl_test_reply(tst, skb);
343 if (status < 0)
344 IWL_ERR(trans, "Error sending msg : %d\n", status);
345 break;
346
347 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
348 if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
349 IWL_ERR(trans, "Missing value to write\n");
350 return -ENOMSG;
351 } else {
352 val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
353 IWL_DEBUG_INFO(trans, "32b write val=0x%x\n", val32);
354 iwl_write_direct32(tst->trans, ofs, val32);
355 }
356 break;
357
358 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
359 if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
360 IWL_ERR(trans, "Missing value to write\n");
361 return -ENOMSG;
362 } else {
363 val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
364 IWL_DEBUG_INFO(trans, "8b write val=0x%x\n", val8);
365 iwl_write8(tst->trans, ofs, val8);
366 }
367 break;
368
369 default:
370 IWL_ERR(trans, "Unknown test register cmd ID\n");
371 return -ENOMSG;
372 }
373
374 return status;
375
376nla_put_failure:
377 kfree_skb(skb);
378 return -EMSGSIZE;
379}
380
381/*
382 * Handles the request to start FW tracing. Allocates of the trace buffer
383 * and sends a reply to user space with the address of the allocated buffer.
384 */
385static int iwl_test_trace_begin(struct iwl_test *tst, struct nlattr **tb)
386{
387 struct sk_buff *skb;
388 int status = 0;
389
390 if (tst->trace.enabled)
391 return -EBUSY;
392
393 if (!tb[IWL_TM_ATTR_TRACE_SIZE])
394 tst->trace.size = TRACE_BUFF_SIZE_DEF;
395 else
396 tst->trace.size =
397 nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
398
399 if (!tst->trace.size)
400 return -EINVAL;
401
402 if (tst->trace.size < TRACE_BUFF_SIZE_MIN ||
403 tst->trace.size > TRACE_BUFF_SIZE_MAX)
404 return -EINVAL;
405
406 tst->trace.tsize = tst->trace.size + TRACE_BUFF_PADD;
407 tst->trace.cpu_addr = dma_alloc_coherent(tst->trans->dev,
408 tst->trace.tsize,
409 &tst->trace.dma_addr,
410 GFP_KERNEL);
411 if (!tst->trace.cpu_addr)
412 return -ENOMEM;
413
414 tst->trace.enabled = true;
415 tst->trace.trace_addr = (u8 *)PTR_ALIGN(tst->trace.cpu_addr, 0x100);
416
417 memset(tst->trace.trace_addr, 0x03B, tst->trace.size);
418
419 skb = iwl_test_alloc_reply(tst, sizeof(tst->trace.dma_addr) + 20);
420 if (!skb) {
421 IWL_ERR(tst->trans, "Memory allocation fail\n");
422 iwl_test_trace_stop(tst);
423 return -ENOMEM;
424 }
425
426 if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
427 sizeof(tst->trace.dma_addr),
428 (u64 *)&tst->trace.dma_addr))
429 goto nla_put_failure;
430
431 status = iwl_test_reply(tst, skb);
432 if (status < 0)
433 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
434
435 tst->trace.nchunks = DIV_ROUND_UP(tst->trace.size,
436 DUMP_CHUNK_SIZE);
437
438 return status;
439
440nla_put_failure:
441 kfree_skb(skb);
442 if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
443 IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
444 iwl_test_trace_stop(tst);
445 return -EMSGSIZE;
446}
447
448/*
449 * Handles indirect read from the periphery or the SRAM. The read is performed
450 * to a temporary buffer. The user space application should later issue a dump
451 */
452static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
453{
454 struct iwl_trans *trans = tst->trans;
455 unsigned long flags;
456 int i;
457
458 if (size & 0x3)
459 return -EINVAL;
460
461 tst->mem.size = size;
462 tst->mem.addr = kmalloc(tst->mem.size, GFP_KERNEL);
463 if (tst->mem.addr == NULL)
464 return -ENOMEM;
465
466 /* Hard-coded periphery absolute address */
467 if (IWL_ABS_PRPH_START <= addr &&
468 addr < IWL_ABS_PRPH_START + PRPH_END) {
469 spin_lock_irqsave(&trans->reg_lock, flags);
470 iwl_grab_nic_access(trans);
471 iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
472 addr | (3 << 24));
473 for (i = 0; i < size; i += 4)
474 *(u32 *)(tst->mem.addr + i) =
475 iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
476 iwl_release_nic_access(trans);
477 spin_unlock_irqrestore(&trans->reg_lock, flags);
478 } else { /* target memory (SRAM) */
479 _iwl_read_targ_mem_dwords(trans, addr,
480 tst->mem.addr,
481 tst->mem.size / 4);
482 }
483
484 tst->mem.nchunks =
485 DIV_ROUND_UP(tst->mem.size, DUMP_CHUNK_SIZE);
486 tst->mem.in_read = true;
487 return 0;
488
489}
490
491/*
492 * Handles indirect write to the periphery or SRAM. The is performed to a
493 * temporary buffer.
494 */
495static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr,
496 u32 size, unsigned char *buf)
497{
498 struct iwl_trans *trans = tst->trans;
499 u32 val, i;
500 unsigned long flags;
501
502 if (IWL_ABS_PRPH_START <= addr &&
503 addr < IWL_ABS_PRPH_START + PRPH_END) {
504 /* Periphery writes can be 1-3 bytes long, or DWORDs */
505 if (size < 4) {
506 memcpy(&val, buf, size);
507 spin_lock_irqsave(&trans->reg_lock, flags);
508 iwl_grab_nic_access(trans);
509 iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
510 (addr & 0x0000FFFF) |
511 ((size - 1) << 24));
512 iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
513 iwl_release_nic_access(trans);
514 /* needed after consecutive writes w/o read */
515 mmiowb();
516 spin_unlock_irqrestore(&trans->reg_lock, flags);
517 } else {
518 if (size % 4)
519 return -EINVAL;
520 for (i = 0; i < size; i += 4)
521 iwl_write_prph(trans, addr+i,
522 *(u32 *)(buf+i));
523 }
524 } else if (iwl_test_valid_hw_addr(tst, addr)) {
525 _iwl_write_targ_mem_dwords(trans, addr, buf, size / 4);
526 } else {
527 return -EINVAL;
528 }
529 return 0;
530}
531
532/*
533 * Handles the user application commands for indirect read/write
534 * to/from the periphery or the SRAM.
535 */
536static int iwl_test_indirect_mem(struct iwl_test *tst, struct nlattr **tb)
537{
538 u32 addr, size, cmd;
539 unsigned char *buf;
540
541 /* Both read and write should be blocked, for atomicity */
542 if (tst->mem.in_read)
543 return -EBUSY;
544
545 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
546 if (!tb[IWL_TM_ATTR_MEM_ADDR]) {
547 IWL_ERR(tst->trans, "Error finding memory offset address\n");
548 return -ENOMSG;
549 }
550 addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]);
551 if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) {
552 IWL_ERR(tst->trans, "Error finding size for memory reading\n");
553 return -ENOMSG;
554 }
555 size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]);
556
557 if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ) {
558 return iwl_test_indirect_read(tst, addr, size);
559 } else {
560 if (!tb[IWL_TM_ATTR_BUFFER_DUMP])
561 return -EINVAL;
562 buf = (unsigned char *)nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]);
563 return iwl_test_indirect_write(tst, addr, size, buf);
564 }
565}
566
567/*
568 * Enable notifications to user space
569 */
570static int iwl_test_notifications(struct iwl_test *tst,
571 struct nlattr **tb)
572{
573 tst->notify = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
574 return 0;
575}
576
577/*
578 * Handles the request to get the device id
579 */
580static int iwl_test_get_dev_id(struct iwl_test *tst, struct nlattr **tb)
581{
582 u32 devid = tst->trans->hw_id;
583 struct sk_buff *skb;
584 int status;
585
586 IWL_DEBUG_INFO(tst->trans, "hw version: 0x%x\n", devid);
587
588 skb = iwl_test_alloc_reply(tst, 20);
589 if (!skb) {
590 IWL_ERR(tst->trans, "Memory allocation fail\n");
591 return -ENOMEM;
592 }
593
594 if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
595 goto nla_put_failure;
596 status = iwl_test_reply(tst, skb);
597 if (status < 0)
598 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
599
600 return 0;
601
602nla_put_failure:
603 kfree_skb(skb);
604 return -EMSGSIZE;
605}
606
607/*
608 * Handles the request to get the FW version
609 */
610static int iwl_test_get_fw_ver(struct iwl_test *tst, struct nlattr **tb)
611{
612 struct sk_buff *skb;
613 int status;
614 u32 ver = iwl_test_fw_ver(tst);
615
616 IWL_DEBUG_INFO(tst->trans, "uCode version raw: 0x%x\n", ver);
617
618 skb = iwl_test_alloc_reply(tst, 20);
619 if (!skb) {
620 IWL_ERR(tst->trans, "Memory allocation fail\n");
621 return -ENOMEM;
622 }
623
624 if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION, ver))
625 goto nla_put_failure;
626
627 status = iwl_test_reply(tst, skb);
628 if (status < 0)
629 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
630
631 return 0;
632
633nla_put_failure:
634 kfree_skb(skb);
635 return -EMSGSIZE;
636}
637
638/*
639 * Parse the netlink message and validate that the IWL_TM_ATTR_CMD exists
640 */
641int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
642 void *data, int len)
643{
644 int result;
645
646 result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
647 iwl_testmode_gnl_msg_policy);
648 if (result) {
649 IWL_ERR(tst->trans, "Fail parse gnl msg: %d\n", result);
650 return result;
651 }
652
653 /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
654 if (!tb[IWL_TM_ATTR_COMMAND]) {
655 IWL_ERR(tst->trans, "Missing testmode command type\n");
656 return -ENOMSG;
657 }
658 return 0;
659}
660EXPORT_SYMBOL_GPL(iwl_test_parse);
661
662/*
663 * Handle test commands.
664 * Returns 1 for unknown commands (not handled by the test object); negative
665 * value in case of error.
666 */
667int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb)
668{
669 int result;
670
671 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
672 case IWL_TM_CMD_APP2DEV_UCODE:
673 IWL_DEBUG_INFO(tst->trans, "test cmd to uCode\n");
674 result = iwl_test_fw_cmd(tst, tb);
675 break;
676
677 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
678 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
679 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
680 IWL_DEBUG_INFO(tst->trans, "test cmd to register\n");
681 result = iwl_test_reg(tst, tb);
682 break;
683
684 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
685 IWL_DEBUG_INFO(tst->trans, "test uCode trace cmd to driver\n");
686 result = iwl_test_trace_begin(tst, tb);
687 break;
688
689 case IWL_TM_CMD_APP2DEV_END_TRACE:
690 iwl_test_trace_stop(tst);
691 result = 0;
692 break;
693
694 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
695 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
696 IWL_DEBUG_INFO(tst->trans, "test indirect memory cmd\n");
697 result = iwl_test_indirect_mem(tst, tb);
698 break;
699
700 case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
701 IWL_DEBUG_INFO(tst->trans, "test notifications cmd\n");
702 result = iwl_test_notifications(tst, tb);
703 break;
704
705 case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
706 IWL_DEBUG_INFO(tst->trans, "test get FW ver cmd\n");
707 result = iwl_test_get_fw_ver(tst, tb);
708 break;
709
710 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
711 IWL_DEBUG_INFO(tst->trans, "test Get device ID cmd\n");
712 result = iwl_test_get_dev_id(tst, tb);
713 break;
714
715 default:
716 IWL_DEBUG_INFO(tst->trans, "Unknown test command\n");
717 result = 1;
718 break;
719 }
720 return result;
721}
722EXPORT_SYMBOL_GPL(iwl_test_handle_cmd);
723
724static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb,
725 struct netlink_callback *cb)
726{
727 int idx, length;
728
729 if (!tst->trace.enabled || !tst->trace.trace_addr)
730 return -EFAULT;
731
732 idx = cb->args[4];
733 if (idx >= tst->trace.nchunks)
734 return -ENOENT;
735
736 length = DUMP_CHUNK_SIZE;
737 if (((idx + 1) == tst->trace.nchunks) &&
738 (tst->trace.size % DUMP_CHUNK_SIZE))
739 length = tst->trace.size %
740 DUMP_CHUNK_SIZE;
741
742 if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
743 tst->trace.trace_addr + (DUMP_CHUNK_SIZE * idx)))
744 goto nla_put_failure;
745
746 cb->args[4] = ++idx;
747 return 0;
748
749 nla_put_failure:
750 return -ENOBUFS;
751}
752
753static int iwl_test_buffer_dump(struct iwl_test *tst, struct sk_buff *skb,
754 struct netlink_callback *cb)
755{
756 int idx, length;
757
758 if (!tst->mem.in_read)
759 return -EFAULT;
760
761 idx = cb->args[4];
762 if (idx >= tst->mem.nchunks) {
763 iwl_test_mem_stop(tst);
764 return -ENOENT;
765 }
766
767 length = DUMP_CHUNK_SIZE;
768 if (((idx + 1) == tst->mem.nchunks) &&
769 (tst->mem.size % DUMP_CHUNK_SIZE))
770 length = tst->mem.size % DUMP_CHUNK_SIZE;
771
772 if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
773 tst->mem.addr + (DUMP_CHUNK_SIZE * idx)))
774 goto nla_put_failure;
775
776 cb->args[4] = ++idx;
777 return 0;
778
779 nla_put_failure:
780 return -ENOBUFS;
781}
782
783/*
784 * Handle dump commands.
785 * Returns 1 for unknown commands (not handled by the test object); negative
786 * value in case of error.
787 */
788int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
789 struct netlink_callback *cb)
790{
791 int result;
792
793 switch (cmd) {
794 case IWL_TM_CMD_APP2DEV_READ_TRACE:
795 IWL_DEBUG_INFO(tst->trans, "uCode trace cmd\n");
796 result = iwl_test_trace_dump(tst, skb, cb);
797 break;
798
799 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
800 IWL_DEBUG_INFO(tst->trans, "testmode sram dump cmd\n");
801 result = iwl_test_buffer_dump(tst, skb, cb);
802 break;
803
804 default:
805 result = 1;
806 break;
807 }
808 return result;
809}
810EXPORT_SYMBOL_GPL(iwl_test_dump);
811
812/*
813 * Multicast a spontaneous messages from the device to the user space.
814 */
815static void iwl_test_send_rx(struct iwl_test *tst,
816 struct iwl_rx_cmd_buffer *rxb)
817{
818 struct sk_buff *skb;
819 struct iwl_rx_packet *data;
820 int length;
821
822 data = rxb_addr(rxb);
823 length = le32_to_cpu(data->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
824
825 /* the length doesn't include len_n_flags field, so add it manually */
826 length += sizeof(__le32);
827
828 skb = iwl_test_alloc_event(tst, length + 20);
829 if (skb == NULL) {
830 IWL_ERR(tst->trans, "Out of memory for message to user\n");
831 return;
832 }
833
834 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
835 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
836 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data))
837 goto nla_put_failure;
838
839 iwl_test_event(tst, skb);
840 return;
841
842nla_put_failure:
843 kfree_skb(skb);
844 IWL_ERR(tst->trans, "Ouch, overran buffer, check allocation!\n");
845}
846
847/*
848 * Called whenever a Rx frames is recevied from the device. If notifications to
849 * the user space are requested, sends the frames to the user.
850 */
851void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb)
852{
853 if (tst->notify)
854 iwl_test_send_rx(tst, rxb);
855}
856EXPORT_SYMBOL_GPL(iwl_test_rx);
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.h b/drivers/net/wireless/iwlwifi/iwl-test.h
new file mode 100644
index 00000000000..e13ffa8acc0
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-test.h
@@ -0,0 +1,161 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#ifndef __IWL_TEST_H__
65#define __IWL_TEST_H__
66
67#include <linux/types.h>
68#include "iwl-trans.h"
69
70struct iwl_test_trace {
71 u32 size;
72 u32 tsize;
73 u32 nchunks;
74 u8 *cpu_addr;
75 u8 *trace_addr;
76 dma_addr_t dma_addr;
77 bool enabled;
78};
79
80struct iwl_test_mem {
81 u32 size;
82 u32 nchunks;
83 u8 *addr;
84 bool in_read;
85};
86
87/*
88 * struct iwl_test_ops: callback to the op mode
89 *
90 * The structure defines the callbacks that the op_mode should handle,
91 * inorder to handle logic that is out of the scope of iwl_test. The
92 * op_mode must set all the callbacks.
93
94 * @send_cmd: handler that is used by the test object to request the
95 * op_mode to send a command to the fw.
96 *
97 * @valid_hw_addr: handler that is used by the test object to request the
98 * op_mode to check if the given address is a valid address.
99 *
100 * @get_fw_ver: handler used to get the FW version.
101 *
102 * @alloc_reply: handler used by the test object to request the op_mode
103 * to allocate an skb for sending a reply to the user, and initialize
104 * the skb. It is assumed that the test object only fills the required
105 * attributes.
106 *
107 * @reply: handler used by the test object to request the op_mode to reply
108 * to a request. The skb is an skb previously allocated by the the
109 * alloc_reply callback.
110 I
111 * @alloc_event: handler used by the test object to request the op_mode
112 * to allocate an skb for sending an event, and initialize
113 * the skb. It is assumed that the test object only fills the required
114 * attributes.
115 *
116 * @reply: handler used by the test object to request the op_mode to send
117 * an event. The skb is an skb previously allocated by the the
118 * alloc_event callback.
119 */
120struct iwl_test_ops {
121 int (*send_cmd)(struct iwl_op_mode *op_modes,
122 struct iwl_host_cmd *cmd);
123 bool (*valid_hw_addr)(u32 addr);
124 u32 (*get_fw_ver)(struct iwl_op_mode *op_mode);
125
126 struct sk_buff *(*alloc_reply)(struct iwl_op_mode *op_mode, int len);
127 int (*reply)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
128 struct sk_buff* (*alloc_event)(struct iwl_op_mode *op_mode, int len);
129 void (*event)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
130};
131
132struct iwl_test {
133 struct iwl_trans *trans;
134 struct iwl_test_ops *ops;
135 struct iwl_test_trace trace;
136 struct iwl_test_mem mem;
137 bool notify;
138};
139
140void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
141 struct iwl_test_ops *ops);
142
143void iwl_test_free(struct iwl_test *tst);
144
145int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
146 void *data, int len);
147
148int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb);
149
150int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
151 struct netlink_callback *cb);
152
153void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb);
154
155static inline void iwl_test_enable_notifications(struct iwl_test *tst,
156 bool enable)
157{
158 tst->notify = enable;
159}
160
161#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.c b/drivers/net/wireless/iwlwifi/iwl-testmode.c
deleted file mode 100644
index 060aac3e22f..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.c
+++ /dev/null
@@ -1,1114 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#include <linux/init.h>
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/dma-mapping.h>
67#include <net/net_namespace.h>
68#include <linux/netdevice.h>
69#include <net/cfg80211.h>
70#include <net/mac80211.h>
71#include <net/netlink.h>
72
73#include "iwl-dev.h"
74#include "iwl-debug.h"
75#include "iwl-io.h"
76#include "iwl-agn.h"
77#include "iwl-testmode.h"
78#include "iwl-trans.h"
79#include "iwl-fh.h"
80#include "iwl-prph.h"
81
82
83/* Periphery registers absolute lower bound. This is used in order to
84 * differentiate registery access through HBUS_TARG_PRPH_* and
85 * HBUS_TARG_MEM_* accesses.
86 */
87#define IWL_TM_ABS_PRPH_START (0xA00000)
88
89/* The TLVs used in the gnl message policy between the kernel module and
90 * user space application. iwl_testmode_gnl_msg_policy is to be carried
91 * through the NL80211_CMD_TESTMODE channel regulated by nl80211.
92 * See iwl-testmode.h
93 */
94static
95struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
96 [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
97
98 [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
99 [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
100
101 [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
102 [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
103 [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
104
105 [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
106 [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
107
108 [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
109
110 [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
111 [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
112 [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
113
114 [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
115
116 [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
117
118 [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, },
119 [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, },
120 [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, },
121
122 [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
123 [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
124 [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
125 [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
126 [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
127
128 [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
129};
130
131/*
132 * See the struct iwl_rx_packet in iwl-commands.h for the format of the
133 * received events from the device
134 */
135static inline int get_event_length(struct iwl_rx_cmd_buffer *rxb)
136{
137 struct iwl_rx_packet *pkt = rxb_addr(rxb);
138 if (pkt)
139 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
140 else
141 return 0;
142}
143
144
145/*
146 * This function multicasts the spontaneous messages from the device to the
147 * user space. It is invoked whenever there is a received messages
148 * from the device. This function is called within the ISR of the rx handlers
149 * in iwlagn driver.
150 *
151 * The parsing of the message content is left to the user space application,
152 * The message content is treated as unattacked raw data and is encapsulated
153 * with IWL_TM_ATTR_UCODE_RX_PKT multicasting to the user space.
154 *
155 * @priv: the instance of iwlwifi device
156 * @rxb: pointer to rx data content received by the ISR
157 *
158 * See the message policies and TLVs in iwl_testmode_gnl_msg_policy[].
159 * For the messages multicasting to the user application, the mandatory
160 * TLV fields are :
161 * IWL_TM_ATTR_COMMAND must be IWL_TM_CMD_DEV2APP_UCODE_RX_PKT
162 * IWL_TM_ATTR_UCODE_RX_PKT for carrying the message content
163 */
164
165static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv,
166 struct iwl_rx_cmd_buffer *rxb)
167{
168 struct ieee80211_hw *hw = priv->hw;
169 struct sk_buff *skb;
170 void *data;
171 int length;
172
173 data = (void *)rxb_addr(rxb);
174 length = get_event_length(rxb);
175
176 if (!data || length == 0)
177 return;
178
179 skb = cfg80211_testmode_alloc_event_skb(hw->wiphy, 20 + length,
180 GFP_ATOMIC);
181 if (skb == NULL) {
182 IWL_ERR(priv,
183 "Run out of memory for messages to user space ?\n");
184 return;
185 }
186 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
187 /* the length doesn't include len_n_flags field, so add it manually */
188 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data))
189 goto nla_put_failure;
190 cfg80211_testmode_event(skb, GFP_ATOMIC);
191 return;
192
193nla_put_failure:
194 kfree_skb(skb);
195 IWL_ERR(priv, "Ouch, overran buffer, check allocation!\n");
196}
197
198void iwl_testmode_init(struct iwl_priv *priv)
199{
200 priv->pre_rx_handler = NULL;
201 priv->testmode_trace.trace_enabled = false;
202 priv->testmode_mem.read_in_progress = false;
203}
204
205static void iwl_mem_cleanup(struct iwl_priv *priv)
206{
207 if (priv->testmode_mem.read_in_progress) {
208 kfree(priv->testmode_mem.buff_addr);
209 priv->testmode_mem.buff_addr = NULL;
210 priv->testmode_mem.buff_size = 0;
211 priv->testmode_mem.num_chunks = 0;
212 priv->testmode_mem.read_in_progress = false;
213 }
214}
215
216static void iwl_trace_cleanup(struct iwl_priv *priv)
217{
218 if (priv->testmode_trace.trace_enabled) {
219 if (priv->testmode_trace.cpu_addr &&
220 priv->testmode_trace.dma_addr)
221 dma_free_coherent(priv->trans->dev,
222 priv->testmode_trace.total_size,
223 priv->testmode_trace.cpu_addr,
224 priv->testmode_trace.dma_addr);
225 priv->testmode_trace.trace_enabled = false;
226 priv->testmode_trace.cpu_addr = NULL;
227 priv->testmode_trace.trace_addr = NULL;
228 priv->testmode_trace.dma_addr = 0;
229 priv->testmode_trace.buff_size = 0;
230 priv->testmode_trace.total_size = 0;
231 }
232}
233
234
235void iwl_testmode_cleanup(struct iwl_priv *priv)
236{
237 iwl_trace_cleanup(priv);
238 iwl_mem_cleanup(priv);
239}
240
241
242/*
243 * This function handles the user application commands to the ucode.
244 *
245 * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_CMD_ID and
246 * IWL_TM_ATTR_UCODE_CMD_DATA and calls to the handler to send the
247 * host command to the ucode.
248 *
249 * If any mandatory field is missing, -ENOMSG is replied to the user space
250 * application; otherwise, waits for the host command to be sent and checks
251 * the return code. In case or error, it is returned, otherwise a reply is
252 * allocated and the reply RX packet
253 * is returned.
254 *
255 * @hw: ieee80211_hw object that represents the device
256 * @tb: gnl message fields from the user space
257 */
258static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
259{
260 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
261 struct iwl_host_cmd cmd;
262 struct iwl_rx_packet *pkt;
263 struct sk_buff *skb;
264 void *reply_buf;
265 u32 reply_len;
266 int ret;
267 bool cmd_want_skb;
268
269 memset(&cmd, 0, sizeof(struct iwl_host_cmd));
270
271 if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
272 !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
273 IWL_ERR(priv, "Missing ucode command mandatory fields\n");
274 return -ENOMSG;
275 }
276
277 cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
278 cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
279 if (cmd_want_skb)
280 cmd.flags |= CMD_WANT_SKB;
281
282 cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
283 cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
284 cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
285 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
286 IWL_DEBUG_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x,"
287 " len %d\n", cmd.id, cmd.flags, cmd.len[0]);
288
289 ret = iwl_dvm_send_cmd(priv, &cmd);
290 if (ret) {
291 IWL_ERR(priv, "Failed to send hcmd\n");
292 return ret;
293 }
294 if (!cmd_want_skb)
295 return ret;
296
297 /* Handling return of SKB to the user */
298 pkt = cmd.resp_pkt;
299 if (!pkt) {
300 IWL_ERR(priv, "HCMD received a null response packet\n");
301 return ret;
302 }
303
304 reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
305 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, reply_len + 20);
306 reply_buf = kmalloc(reply_len, GFP_KERNEL);
307 if (!skb || !reply_buf) {
308 kfree_skb(skb);
309 kfree(reply_buf);
310 return -ENOMEM;
311 }
312
313 /* The reply is in a page, that we cannot send to user space. */
314 memcpy(reply_buf, &(pkt->hdr), reply_len);
315 iwl_free_resp(&cmd);
316
317 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
318 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
319 goto nla_put_failure;
320 return cfg80211_testmode_reply(skb);
321
322nla_put_failure:
323 IWL_DEBUG_INFO(priv, "Failed creating NL attributes\n");
324 return -ENOMSG;
325}
326
327
328/*
329 * This function handles the user application commands for register access.
330 *
331 * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
332 * handlers respectively.
333 *
334 * If it's an unknown commdn ID, -ENOSYS is returned; or -ENOMSG if the
335 * mandatory fields(IWL_TM_ATTR_REG_OFFSET,IWL_TM_ATTR_REG_VALUE32,
336 * IWL_TM_ATTR_REG_VALUE8) are missing; Otherwise 0 is replied indicating
337 * the success of the command execution.
338 *
339 * If IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_READ32, the register read
340 * value is returned with IWL_TM_ATTR_REG_VALUE32.
341 *
342 * @hw: ieee80211_hw object that represents the device
343 * @tb: gnl message fields from the user space
344 */
345static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
346{
347 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
348 u32 ofs, val32, cmd;
349 u8 val8;
350 struct sk_buff *skb;
351 int status = 0;
352
353 if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
354 IWL_ERR(priv, "Missing register offset\n");
355 return -ENOMSG;
356 }
357 ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
358 IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs);
359
360 /* Allow access only to FH/CSR/HBUS in direct mode.
361 Since we don't have the upper bounds for the CSR and HBUS segments,
362 we will use only the upper bound of FH for sanity check. */
363 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
364 if ((cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 ||
365 cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 ||
366 cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8) &&
367 (ofs >= FH_MEM_UPPER_BOUND)) {
368 IWL_ERR(priv, "offset out of segment (0x0 - 0x%x)\n",
369 FH_MEM_UPPER_BOUND);
370 return -EINVAL;
371 }
372
373 switch (cmd) {
374 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
375 val32 = iwl_read_direct32(priv->trans, ofs);
376 IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
377
378 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
379 if (!skb) {
380 IWL_ERR(priv, "Memory allocation fail\n");
381 return -ENOMEM;
382 }
383 if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
384 goto nla_put_failure;
385 status = cfg80211_testmode_reply(skb);
386 if (status < 0)
387 IWL_ERR(priv, "Error sending msg : %d\n", status);
388 break;
389 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
390 if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
391 IWL_ERR(priv, "Missing value to write\n");
392 return -ENOMSG;
393 } else {
394 val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
395 IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
396 iwl_write_direct32(priv->trans, ofs, val32);
397 }
398 break;
399 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
400 if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
401 IWL_ERR(priv, "Missing value to write\n");
402 return -ENOMSG;
403 } else {
404 val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
405 IWL_INFO(priv, "8bit value to write 0x%x\n", val8);
406 iwl_write8(priv->trans, ofs, val8);
407 }
408 break;
409 default:
410 IWL_ERR(priv, "Unknown testmode register command ID\n");
411 return -ENOSYS;
412 }
413
414 return status;
415
416nla_put_failure:
417 kfree_skb(skb);
418 return -EMSGSIZE;
419}
420
421
422static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
423{
424 struct iwl_notification_wait calib_wait;
425 static const u8 calib_complete[] = {
426 CALIBRATION_COMPLETE_NOTIFICATION
427 };
428 int ret;
429
430 iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
431 calib_complete, ARRAY_SIZE(calib_complete),
432 NULL, NULL);
433 ret = iwl_init_alive_start(priv);
434 if (ret) {
435 IWL_ERR(priv, "Fail init calibration: %d\n", ret);
436 goto cfg_init_calib_error;
437 }
438
439 ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, 2 * HZ);
440 if (ret)
441 IWL_ERR(priv, "Error detecting"
442 " CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
443 return ret;
444
445cfg_init_calib_error:
446 iwl_remove_notification(&priv->notif_wait, &calib_wait);
447 return ret;
448}
449
450/*
451 * This function handles the user application commands for driver.
452 *
453 * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
454 * handlers respectively.
455 *
456 * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
457 * value of the actual command execution is replied to the user application.
458 *
459 * If there's any message responding to the user space, IWL_TM_ATTR_SYNC_RSP
460 * is used for carry the message while IWL_TM_ATTR_COMMAND must set to
461 * IWL_TM_CMD_DEV2APP_SYNC_RSP.
462 *
463 * @hw: ieee80211_hw object that represents the device
464 * @tb: gnl message fields from the user space
465 */
466static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
467{
468 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
469 struct iwl_trans *trans = priv->trans;
470 struct sk_buff *skb;
471 unsigned char *rsp_data_ptr = NULL;
472 int status = 0, rsp_data_len = 0;
473 u32 devid, inst_size = 0, data_size = 0;
474 const struct fw_img *img;
475
476 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
477 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
478 rsp_data_ptr = (unsigned char *)priv->cfg->name;
479 rsp_data_len = strlen(priv->cfg->name);
480 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
481 rsp_data_len + 20);
482 if (!skb) {
483 IWL_ERR(priv, "Memory allocation fail\n");
484 return -ENOMEM;
485 }
486 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
487 IWL_TM_CMD_DEV2APP_SYNC_RSP) ||
488 nla_put(skb, IWL_TM_ATTR_SYNC_RSP,
489 rsp_data_len, rsp_data_ptr))
490 goto nla_put_failure;
491 status = cfg80211_testmode_reply(skb);
492 if (status < 0)
493 IWL_ERR(priv, "Error sending msg : %d\n", status);
494 break;
495
496 case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
497 status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
498 if (status)
499 IWL_ERR(priv, "Error loading init ucode: %d\n", status);
500 break;
501
502 case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
503 iwl_testmode_cfg_init_calib(priv);
504 priv->ucode_loaded = false;
505 iwl_trans_stop_device(trans);
506 break;
507
508 case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
509 status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
510 if (status) {
511 IWL_ERR(priv,
512 "Error loading runtime ucode: %d\n", status);
513 break;
514 }
515 status = iwl_alive_start(priv);
516 if (status)
517 IWL_ERR(priv,
518 "Error starting the device: %d\n", status);
519 break;
520
521 case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
522 iwl_scan_cancel_timeout(priv, 200);
523 priv->ucode_loaded = false;
524 iwl_trans_stop_device(trans);
525 status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
526 if (status) {
527 IWL_ERR(priv,
528 "Error loading WOWLAN ucode: %d\n", status);
529 break;
530 }
531 status = iwl_alive_start(priv);
532 if (status)
533 IWL_ERR(priv,
534 "Error starting the device: %d\n", status);
535 break;
536
537 case IWL_TM_CMD_APP2DEV_GET_EEPROM:
538 if (priv->eeprom) {
539 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
540 priv->cfg->base_params->eeprom_size + 20);
541 if (!skb) {
542 IWL_ERR(priv, "Memory allocation fail\n");
543 return -ENOMEM;
544 }
545 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
546 IWL_TM_CMD_DEV2APP_EEPROM_RSP) ||
547 nla_put(skb, IWL_TM_ATTR_EEPROM,
548 priv->cfg->base_params->eeprom_size,
549 priv->eeprom))
550 goto nla_put_failure;
551 status = cfg80211_testmode_reply(skb);
552 if (status < 0)
553 IWL_ERR(priv, "Error sending msg : %d\n",
554 status);
555 } else
556 return -EFAULT;
557 break;
558
559 case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
560 if (!tb[IWL_TM_ATTR_FIXRATE]) {
561 IWL_ERR(priv, "Missing fixrate setting\n");
562 return -ENOMSG;
563 }
564 priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
565 break;
566
567 case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
568 IWL_INFO(priv, "uCode version raw: 0x%x\n",
569 priv->fw->ucode_ver);
570
571 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
572 if (!skb) {
573 IWL_ERR(priv, "Memory allocation fail\n");
574 return -ENOMEM;
575 }
576 if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION,
577 priv->fw->ucode_ver))
578 goto nla_put_failure;
579 status = cfg80211_testmode_reply(skb);
580 if (status < 0)
581 IWL_ERR(priv, "Error sending msg : %d\n", status);
582 break;
583
584 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
585 devid = priv->trans->hw_id;
586 IWL_INFO(priv, "hw version: 0x%x\n", devid);
587
588 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
589 if (!skb) {
590 IWL_ERR(priv, "Memory allocation fail\n");
591 return -ENOMEM;
592 }
593 if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
594 goto nla_put_failure;
595 status = cfg80211_testmode_reply(skb);
596 if (status < 0)
597 IWL_ERR(priv, "Error sending msg : %d\n", status);
598 break;
599
600 case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
601 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20 + 8);
602 if (!skb) {
603 IWL_ERR(priv, "Memory allocation fail\n");
604 return -ENOMEM;
605 }
606 if (!priv->ucode_loaded) {
607 IWL_ERR(priv, "No uCode has not been loaded\n");
608 return -EINVAL;
609 } else {
610 img = &priv->fw->img[priv->cur_ucode];
611 inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
612 data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
613 }
614 if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode) ||
615 nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) ||
616 nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size))
617 goto nla_put_failure;
618 status = cfg80211_testmode_reply(skb);
619 if (status < 0)
620 IWL_ERR(priv, "Error sending msg : %d\n", status);
621 break;
622
623 default:
624 IWL_ERR(priv, "Unknown testmode driver command ID\n");
625 return -ENOSYS;
626 }
627 return status;
628
629nla_put_failure:
630 kfree_skb(skb);
631 return -EMSGSIZE;
632}
633
634
635/*
636 * This function handles the user application commands for uCode trace
637 *
638 * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
639 * handlers respectively.
640 *
641 * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
642 * value of the actual command execution is replied to the user application.
643 *
644 * @hw: ieee80211_hw object that represents the device
645 * @tb: gnl message fields from the user space
646 */
647static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
648{
649 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
650 struct sk_buff *skb;
651 int status = 0;
652 struct device *dev = priv->trans->dev;
653
654 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
655 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
656 if (priv->testmode_trace.trace_enabled)
657 return -EBUSY;
658
659 if (!tb[IWL_TM_ATTR_TRACE_SIZE])
660 priv->testmode_trace.buff_size = TRACE_BUFF_SIZE_DEF;
661 else
662 priv->testmode_trace.buff_size =
663 nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
664 if (!priv->testmode_trace.buff_size)
665 return -EINVAL;
666 if (priv->testmode_trace.buff_size < TRACE_BUFF_SIZE_MIN ||
667 priv->testmode_trace.buff_size > TRACE_BUFF_SIZE_MAX)
668 return -EINVAL;
669
670 priv->testmode_trace.total_size =
671 priv->testmode_trace.buff_size + TRACE_BUFF_PADD;
672 priv->testmode_trace.cpu_addr =
673 dma_alloc_coherent(dev,
674 priv->testmode_trace.total_size,
675 &priv->testmode_trace.dma_addr,
676 GFP_KERNEL);
677 if (!priv->testmode_trace.cpu_addr)
678 return -ENOMEM;
679 priv->testmode_trace.trace_enabled = true;
680 priv->testmode_trace.trace_addr = (u8 *)PTR_ALIGN(
681 priv->testmode_trace.cpu_addr, 0x100);
682 memset(priv->testmode_trace.trace_addr, 0x03B,
683 priv->testmode_trace.buff_size);
684 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
685 sizeof(priv->testmode_trace.dma_addr) + 20);
686 if (!skb) {
687 IWL_ERR(priv, "Memory allocation fail\n");
688 iwl_trace_cleanup(priv);
689 return -ENOMEM;
690 }
691 if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
692 sizeof(priv->testmode_trace.dma_addr),
693 (u64 *)&priv->testmode_trace.dma_addr))
694 goto nla_put_failure;
695 status = cfg80211_testmode_reply(skb);
696 if (status < 0) {
697 IWL_ERR(priv, "Error sending msg : %d\n", status);
698 }
699 priv->testmode_trace.num_chunks =
700 DIV_ROUND_UP(priv->testmode_trace.buff_size,
701 DUMP_CHUNK_SIZE);
702 break;
703
704 case IWL_TM_CMD_APP2DEV_END_TRACE:
705 iwl_trace_cleanup(priv);
706 break;
707 default:
708 IWL_ERR(priv, "Unknown testmode mem command ID\n");
709 return -ENOSYS;
710 }
711 return status;
712
713nla_put_failure:
714 kfree_skb(skb);
715 if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
716 IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
717 iwl_trace_cleanup(priv);
718 return -EMSGSIZE;
719}
720
721static int iwl_testmode_trace_dump(struct ieee80211_hw *hw,
722 struct sk_buff *skb,
723 struct netlink_callback *cb)
724{
725 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
726 int idx, length;
727
728 if (priv->testmode_trace.trace_enabled &&
729 priv->testmode_trace.trace_addr) {
730 idx = cb->args[4];
731 if (idx >= priv->testmode_trace.num_chunks)
732 return -ENOENT;
733 length = DUMP_CHUNK_SIZE;
734 if (((idx + 1) == priv->testmode_trace.num_chunks) &&
735 (priv->testmode_trace.buff_size % DUMP_CHUNK_SIZE))
736 length = priv->testmode_trace.buff_size %
737 DUMP_CHUNK_SIZE;
738
739 if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
740 priv->testmode_trace.trace_addr +
741 (DUMP_CHUNK_SIZE * idx)))
742 goto nla_put_failure;
743 idx++;
744 cb->args[4] = idx;
745 return 0;
746 } else
747 return -EFAULT;
748
749 nla_put_failure:
750 return -ENOBUFS;
751}
752
753/*
754 * This function handles the user application switch ucode ownership.
755 *
756 * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_OWNER and
757 * decide who the current owner of the uCode
758 *
759 * If the current owner is OWNERSHIP_TM, then the only host command
760 * can deliver to uCode is from testmode, all the other host commands
761 * will dropped.
762 *
763 * default driver is the owner of uCode in normal operational mode
764 *
765 * @hw: ieee80211_hw object that represents the device
766 * @tb: gnl message fields from the user space
767 */
768static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
769{
770 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
771 u8 owner;
772
773 if (!tb[IWL_TM_ATTR_UCODE_OWNER]) {
774 IWL_ERR(priv, "Missing ucode owner\n");
775 return -ENOMSG;
776 }
777
778 owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
779 if (owner == IWL_OWNERSHIP_DRIVER) {
780 priv->ucode_owner = owner;
781 priv->pre_rx_handler = NULL;
782 } else if (owner == IWL_OWNERSHIP_TM) {
783 priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
784 priv->ucode_owner = owner;
785 } else {
786 IWL_ERR(priv, "Invalid owner\n");
787 return -EINVAL;
788 }
789 return 0;
790}
791
792static int iwl_testmode_indirect_read(struct iwl_priv *priv, u32 addr, u32 size)
793{
794 struct iwl_trans *trans = priv->trans;
795 unsigned long flags;
796 int i;
797
798 if (size & 0x3)
799 return -EINVAL;
800 priv->testmode_mem.buff_size = size;
801 priv->testmode_mem.buff_addr =
802 kmalloc(priv->testmode_mem.buff_size, GFP_KERNEL);
803 if (priv->testmode_mem.buff_addr == NULL)
804 return -ENOMEM;
805
806 /* Hard-coded periphery absolute address */
807 if (IWL_TM_ABS_PRPH_START <= addr &&
808 addr < IWL_TM_ABS_PRPH_START + PRPH_END) {
809 spin_lock_irqsave(&trans->reg_lock, flags);
810 iwl_grab_nic_access(trans);
811 iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
812 addr | (3 << 24));
813 for (i = 0; i < size; i += 4)
814 *(u32 *)(priv->testmode_mem.buff_addr + i) =
815 iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
816 iwl_release_nic_access(trans);
817 spin_unlock_irqrestore(&trans->reg_lock, flags);
818 } else { /* target memory (SRAM) */
819 _iwl_read_targ_mem_words(trans, addr,
820 priv->testmode_mem.buff_addr,
821 priv->testmode_mem.buff_size / 4);
822 }
823
824 priv->testmode_mem.num_chunks =
825 DIV_ROUND_UP(priv->testmode_mem.buff_size, DUMP_CHUNK_SIZE);
826 priv->testmode_mem.read_in_progress = true;
827 return 0;
828
829}
830
831static int iwl_testmode_indirect_write(struct iwl_priv *priv, u32 addr,
832 u32 size, unsigned char *buf)
833{
834 struct iwl_trans *trans = priv->trans;
835 u32 val, i;
836 unsigned long flags;
837
838 if (IWL_TM_ABS_PRPH_START <= addr &&
839 addr < IWL_TM_ABS_PRPH_START + PRPH_END) {
840 /* Periphery writes can be 1-3 bytes long, or DWORDs */
841 if (size < 4) {
842 memcpy(&val, buf, size);
843 spin_lock_irqsave(&trans->reg_lock, flags);
844 iwl_grab_nic_access(trans);
845 iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
846 (addr & 0x0000FFFF) |
847 ((size - 1) << 24));
848 iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
849 iwl_release_nic_access(trans);
850 /* needed after consecutive writes w/o read */
851 mmiowb();
852 spin_unlock_irqrestore(&trans->reg_lock, flags);
853 } else {
854 if (size % 4)
855 return -EINVAL;
856 for (i = 0; i < size; i += 4)
857 iwl_write_prph(trans, addr+i,
858 *(u32 *)(buf+i));
859 }
860 } else if (iwlagn_hw_valid_rtc_data_addr(addr) ||
861 (IWLAGN_RTC_INST_LOWER_BOUND <= addr &&
862 addr < IWLAGN_RTC_INST_UPPER_BOUND)) {
863 _iwl_write_targ_mem_words(trans, addr, buf, size/4);
864 } else
865 return -EINVAL;
866 return 0;
867}
868
869/*
870 * This function handles the user application commands for SRAM data dump
871 *
872 * It retrieves the mandatory fields IWL_TM_ATTR_SRAM_ADDR and
873 * IWL_TM_ATTR_SRAM_SIZE to decide the memory area for SRAM data reading
874 *
875 * Several error will be retured, -EBUSY if the SRAM data retrieved by
876 * previous command has not been delivered to userspace, or -ENOMSG if
877 * the mandatory fields (IWL_TM_ATTR_SRAM_ADDR,IWL_TM_ATTR_SRAM_SIZE)
878 * are missing, or -ENOMEM if the buffer allocation fails.
879 *
880 * Otherwise 0 is replied indicating the success of the SRAM reading.
881 *
882 * @hw: ieee80211_hw object that represents the device
883 * @tb: gnl message fields from the user space
884 */
885static int iwl_testmode_indirect_mem(struct ieee80211_hw *hw,
886 struct nlattr **tb)
887{
888 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
889 u32 addr, size, cmd;
890 unsigned char *buf;
891
892 /* Both read and write should be blocked, for atomicity */
893 if (priv->testmode_mem.read_in_progress)
894 return -EBUSY;
895
896 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
897 if (!tb[IWL_TM_ATTR_MEM_ADDR]) {
898 IWL_ERR(priv, "Error finding memory offset address\n");
899 return -ENOMSG;
900 }
901 addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]);
902 if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) {
903 IWL_ERR(priv, "Error finding size for memory reading\n");
904 return -ENOMSG;
905 }
906 size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]);
907
908 if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ)
909 return iwl_testmode_indirect_read(priv, addr, size);
910 else {
911 if (!tb[IWL_TM_ATTR_BUFFER_DUMP])
912 return -EINVAL;
913 buf = (unsigned char *) nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]);
914 return iwl_testmode_indirect_write(priv, addr, size, buf);
915 }
916}
917
918static int iwl_testmode_buffer_dump(struct ieee80211_hw *hw,
919 struct sk_buff *skb,
920 struct netlink_callback *cb)
921{
922 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
923 int idx, length;
924
925 if (priv->testmode_mem.read_in_progress) {
926 idx = cb->args[4];
927 if (idx >= priv->testmode_mem.num_chunks) {
928 iwl_mem_cleanup(priv);
929 return -ENOENT;
930 }
931 length = DUMP_CHUNK_SIZE;
932 if (((idx + 1) == priv->testmode_mem.num_chunks) &&
933 (priv->testmode_mem.buff_size % DUMP_CHUNK_SIZE))
934 length = priv->testmode_mem.buff_size %
935 DUMP_CHUNK_SIZE;
936
937 if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
938 priv->testmode_mem.buff_addr +
939 (DUMP_CHUNK_SIZE * idx)))
940 goto nla_put_failure;
941 idx++;
942 cb->args[4] = idx;
943 return 0;
944 } else
945 return -EFAULT;
946
947 nla_put_failure:
948 return -ENOBUFS;
949}
950
951static int iwl_testmode_notifications(struct ieee80211_hw *hw,
952 struct nlattr **tb)
953{
954 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
955 bool enable;
956
957 enable = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
958 if (enable)
959 priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
960 else
961 priv->pre_rx_handler = NULL;
962 return 0;
963}
964
965
966/* The testmode gnl message handler that takes the gnl message from the
967 * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
968 * invoke the corresponding handlers.
969 *
970 * This function is invoked when there is user space application sending
971 * gnl message through the testmode tunnel NL80211_CMD_TESTMODE regulated
972 * by nl80211.
973 *
974 * It retrieves the mandatory field, IWL_TM_ATTR_COMMAND, before
975 * dispatching it to the corresponding handler.
976 *
977 * If IWL_TM_ATTR_COMMAND is missing, -ENOMSG is replied to user application;
978 * -ENOSYS is replied to the user application if the command is unknown;
979 * Otherwise, the command is dispatched to the respective handler.
980 *
981 * @hw: ieee80211_hw object that represents the device
982 * @data: pointer to user space message
983 * @len: length in byte of @data
984 */
985int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
986{
987 struct nlattr *tb[IWL_TM_ATTR_MAX];
988 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
989 int result;
990
991 result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
992 iwl_testmode_gnl_msg_policy);
993 if (result != 0) {
994 IWL_ERR(priv, "Error parsing the gnl message : %d\n", result);
995 return result;
996 }
997
998 /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
999 if (!tb[IWL_TM_ATTR_COMMAND]) {
1000 IWL_ERR(priv, "Missing testmode command type\n");
1001 return -ENOMSG;
1002 }
1003 /* in case multiple accesses to the device happens */
1004 mutex_lock(&priv->mutex);
1005
1006 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
1007 case IWL_TM_CMD_APP2DEV_UCODE:
1008 IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n");
1009 result = iwl_testmode_ucode(hw, tb);
1010 break;
1011 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
1012 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
1013 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
1014 IWL_DEBUG_INFO(priv, "testmode cmd to register\n");
1015 result = iwl_testmode_reg(hw, tb);
1016 break;
1017 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
1018 case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
1019 case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
1020 case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
1021 case IWL_TM_CMD_APP2DEV_GET_EEPROM:
1022 case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
1023 case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
1024 case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
1025 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
1026 case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
1027 IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
1028 result = iwl_testmode_driver(hw, tb);
1029 break;
1030
1031 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
1032 case IWL_TM_CMD_APP2DEV_END_TRACE:
1033 case IWL_TM_CMD_APP2DEV_READ_TRACE:
1034 IWL_DEBUG_INFO(priv, "testmode uCode trace cmd to driver\n");
1035 result = iwl_testmode_trace(hw, tb);
1036 break;
1037
1038 case IWL_TM_CMD_APP2DEV_OWNERSHIP:
1039 IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n");
1040 result = iwl_testmode_ownership(hw, tb);
1041 break;
1042
1043 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
1044 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
1045 IWL_DEBUG_INFO(priv, "testmode indirect memory cmd "
1046 "to driver\n");
1047 result = iwl_testmode_indirect_mem(hw, tb);
1048 break;
1049
1050 case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
1051 IWL_DEBUG_INFO(priv, "testmode notifications cmd "
1052 "to driver\n");
1053 result = iwl_testmode_notifications(hw, tb);
1054 break;
1055
1056 default:
1057 IWL_ERR(priv, "Unknown testmode command\n");
1058 result = -ENOSYS;
1059 break;
1060 }
1061
1062 mutex_unlock(&priv->mutex);
1063 return result;
1064}
1065
1066int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
1067 struct netlink_callback *cb,
1068 void *data, int len)
1069{
1070 struct nlattr *tb[IWL_TM_ATTR_MAX];
1071 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1072 int result;
1073 u32 cmd;
1074
1075 if (cb->args[3]) {
1076 /* offset by 1 since commands start at 0 */
1077 cmd = cb->args[3] - 1;
1078 } else {
1079 result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
1080 iwl_testmode_gnl_msg_policy);
1081 if (result) {
1082 IWL_ERR(priv,
1083 "Error parsing the gnl message : %d\n", result);
1084 return result;
1085 }
1086
1087 /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
1088 if (!tb[IWL_TM_ATTR_COMMAND]) {
1089 IWL_ERR(priv, "Missing testmode command type\n");
1090 return -ENOMSG;
1091 }
1092 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
1093 cb->args[3] = cmd + 1;
1094 }
1095
1096 /* in case multiple accesses to the device happens */
1097 mutex_lock(&priv->mutex);
1098 switch (cmd) {
1099 case IWL_TM_CMD_APP2DEV_READ_TRACE:
1100 IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
1101 result = iwl_testmode_trace_dump(hw, skb, cb);
1102 break;
1103 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
1104 IWL_DEBUG_INFO(priv, "testmode sram dump cmd to driver\n");
1105 result = iwl_testmode_buffer_dump(hw, skb, cb);
1106 break;
1107 default:
1108 result = -EINVAL;
1109 break;
1110 }
1111
1112 mutex_unlock(&priv->mutex);
1113 return result;
1114}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 79a1e7ae499..00efde8e553 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -154,6 +154,9 @@ struct iwl_cmd_header {
154 __le16 sequence; 154 __le16 sequence;
155} __packed; 155} __packed;
156 156
157/* iwl_cmd_header flags value */
158#define IWL_CMD_FAILED_MSK 0x40
159
157 160
158#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */ 161#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
159#define FH_RSCSR_FRAME_INVALID 0x55550000 162#define FH_RSCSR_FRAME_INVALID 0x55550000
@@ -280,6 +283,8 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
280 283
281#define MAX_NO_RECLAIM_CMDS 6 284#define MAX_NO_RECLAIM_CMDS 6
282 285
286#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
287
283/* 288/*
284 * Maximum number of HW queues the transport layer 289 * Maximum number of HW queues the transport layer
285 * currently supports 290 * currently supports
@@ -350,10 +355,10 @@ struct iwl_trans;
350 * Must be atomic 355 * Must be atomic
351 * @reclaim: free packet until ssn. Returns a list of freed packets. 356 * @reclaim: free packet until ssn. Returns a list of freed packets.
352 * Must be atomic 357 * Must be atomic
353 * @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is 358 * @txq_enable: setup a tx queue for AMPDU - will be called once the HW is
354 * ready and a successful ADDBA response has been received. 359 * ready and a successful ADDBA response has been received.
355 * May sleep 360 * May sleep
356 * @tx_agg_disable: de-configure a Tx queue to send AMPDUs 361 * @txq_disable: de-configure a Tx queue to send AMPDUs
357 * Must be atomic 362 * Must be atomic
358 * @wait_tx_queue_empty: wait until all tx queues are empty 363 * @wait_tx_queue_empty: wait until all tx queues are empty
359 * May sleep 364 * May sleep
@@ -386,9 +391,9 @@ struct iwl_trans_ops {
386 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, 391 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
387 struct sk_buff_head *skbs); 392 struct sk_buff_head *skbs);
388 393
389 void (*tx_agg_setup)(struct iwl_trans *trans, int queue, int fifo, 394 void (*txq_enable)(struct iwl_trans *trans, int queue, int fifo,
390 int sta_id, int tid, int frame_limit, u16 ssn); 395 int sta_id, int tid, int frame_limit, u16 ssn);
391 void (*tx_agg_disable)(struct iwl_trans *trans, int queue); 396 void (*txq_disable)(struct iwl_trans *trans, int queue);
392 397
393 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir); 398 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
394 int (*wait_tx_queue_empty)(struct iwl_trans *trans); 399 int (*wait_tx_queue_empty)(struct iwl_trans *trans);
@@ -428,6 +433,11 @@ enum iwl_trans_state {
428 * @hw_id_str: a string with info about HW ID. Set during transport allocation. 433 * @hw_id_str: a string with info about HW ID. Set during transport allocation.
429 * @pm_support: set to true in start_hw if link pm is supported 434 * @pm_support: set to true in start_hw if link pm is supported
430 * @wait_command_queue: the wait_queue for SYNC host commands 435 * @wait_command_queue: the wait_queue for SYNC host commands
436 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
437 * The user should use iwl_trans_{alloc,free}_tx_cmd.
438 * @dev_cmd_headroom: room needed for the transport's private use before the
439 * device_cmd for Tx - for internal use only
440 * The user should use iwl_trans_{alloc,free}_tx_cmd.
431 */ 441 */
432struct iwl_trans { 442struct iwl_trans {
433 const struct iwl_trans_ops *ops; 443 const struct iwl_trans_ops *ops;
@@ -445,6 +455,10 @@ struct iwl_trans {
445 455
446 wait_queue_head_t wait_command_queue; 456 wait_queue_head_t wait_command_queue;
447 457
458 /* The following fields are internal only */
459 struct kmem_cache *dev_cmd_pool;
460 size_t dev_cmd_headroom;
461
448 /* pointer to trans specific struct */ 462 /* pointer to trans specific struct */
449 /*Ensure that this pointer will always be aligned to sizeof pointer */ 463 /*Ensure that this pointer will always be aligned to sizeof pointer */
450 char trans_specific[0] __aligned(sizeof(void *)); 464 char trans_specific[0] __aligned(sizeof(void *));
@@ -520,6 +534,26 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
520 return trans->ops->send_cmd(trans, cmd); 534 return trans->ops->send_cmd(trans, cmd);
521} 535}
522 536
537static inline struct iwl_device_cmd *
538iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
539{
540 u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
541
542 if (unlikely(dev_cmd_ptr == NULL))
543 return NULL;
544
545 return (struct iwl_device_cmd *)
546 (dev_cmd_ptr + trans->dev_cmd_headroom);
547}
548
549static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
550 struct iwl_device_cmd *dev_cmd)
551{
552 u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom;
553
554 kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr);
555}
556
523static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, 557static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
524 struct iwl_device_cmd *dev_cmd, int queue) 558 struct iwl_device_cmd *dev_cmd, int queue)
525{ 559{
@@ -538,24 +572,24 @@ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
538 trans->ops->reclaim(trans, queue, ssn, skbs); 572 trans->ops->reclaim(trans, queue, ssn, skbs);
539} 573}
540 574
541static inline void iwl_trans_tx_agg_disable(struct iwl_trans *trans, int queue) 575static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue)
542{ 576{
543 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 577 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
544 "%s bad state = %d", __func__, trans->state); 578 "%s bad state = %d", __func__, trans->state);
545 579
546 trans->ops->tx_agg_disable(trans, queue); 580 trans->ops->txq_disable(trans, queue);
547} 581}
548 582
549static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans, int queue, 583static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
550 int fifo, int sta_id, int tid, 584 int fifo, int sta_id, int tid,
551 int frame_limit, u16 ssn) 585 int frame_limit, u16 ssn)
552{ 586{
553 might_sleep(); 587 might_sleep();
554 588
555 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 589 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
556 "%s bad state = %d", __func__, trans->state); 590 "%s bad state = %d", __func__, trans->state);
557 591
558 trans->ops->tx_agg_setup(trans, queue, fifo, sta_id, tid, 592 trans->ops->txq_enable(trans, queue, fifo, sta_id, tid,
559 frame_limit, ssn); 593 frame_limit, ssn);
560} 594}
561 595
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/pcie/1000.c
index 2629a6602df..81b83f484f0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/1000.c
@@ -27,9 +27,9 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/stringify.h> 28#include <linux/stringify.h>
29#include "iwl-config.h" 29#include "iwl-config.h"
30#include "iwl-cfg.h"
31#include "iwl-csr.h" 30#include "iwl-csr.h"
32#include "iwl-agn-hw.h" 31#include "iwl-agn-hw.h"
32#include "cfg.h"
33 33
34/* Highest firmware API version supported */ 34/* Highest firmware API version supported */
35#define IWL1000_UCODE_API_MAX 5 35#define IWL1000_UCODE_API_MAX 5
@@ -64,13 +64,26 @@ static const struct iwl_base_params iwl1000_base_params = {
64 .support_ct_kill_exit = true, 64 .support_ct_kill_exit = true,
65 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, 65 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
66 .chain_noise_scale = 1000, 66 .chain_noise_scale = 1000,
67 .wd_timeout = IWL_WATCHHDOG_DISABLED, 67 .wd_timeout = IWL_WATCHDOG_DISABLED,
68 .max_event_log_size = 128, 68 .max_event_log_size = 128,
69}; 69};
70 70
71static const struct iwl_ht_params iwl1000_ht_params = { 71static const struct iwl_ht_params iwl1000_ht_params = {
72 .ht_greenfield_support = true, 72 .ht_greenfield_support = true,
73 .use_rts_for_aggregation = true, /* use rts/cts protection */ 73 .use_rts_for_aggregation = true, /* use rts/cts protection */
74 .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
75};
76
77static const struct iwl_eeprom_params iwl1000_eeprom_params = {
78 .regulatory_bands = {
79 EEPROM_REG_BAND_1_CHANNELS,
80 EEPROM_REG_BAND_2_CHANNELS,
81 EEPROM_REG_BAND_3_CHANNELS,
82 EEPROM_REG_BAND_4_CHANNELS,
83 EEPROM_REG_BAND_5_CHANNELS,
84 EEPROM_REG_BAND_24_HT40_CHANNELS,
85 EEPROM_REGULATORY_BAND_NO_HT40,
86 }
74}; 87};
75 88
76#define IWL_DEVICE_1000 \ 89#define IWL_DEVICE_1000 \
@@ -84,6 +97,7 @@ static const struct iwl_ht_params iwl1000_ht_params = {
84 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \ 97 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
85 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ 98 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
86 .base_params = &iwl1000_base_params, \ 99 .base_params = &iwl1000_base_params, \
100 .eeprom_params = &iwl1000_eeprom_params, \
87 .led_mode = IWL_LED_BLINK 101 .led_mode = IWL_LED_BLINK
88 102
89const struct iwl_cfg iwl1000_bgn_cfg = { 103const struct iwl_cfg iwl1000_bgn_cfg = {
@@ -108,6 +122,7 @@ const struct iwl_cfg iwl1000_bg_cfg = {
108 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \ 122 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
109 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ 123 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
110 .base_params = &iwl1000_base_params, \ 124 .base_params = &iwl1000_base_params, \
125 .eeprom_params = &iwl1000_eeprom_params, \
111 .led_mode = IWL_LED_RF_STATE, \ 126 .led_mode = IWL_LED_RF_STATE, \
112 .rx_with_siso_diversity = true 127 .rx_with_siso_diversity = true
113 128
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/pcie/2000.c
index 8133105ac64..fd4e78f56fa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/2000.c
@@ -27,9 +27,9 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/stringify.h> 28#include <linux/stringify.h>
29#include "iwl-config.h" 29#include "iwl-config.h"
30#include "iwl-cfg.h"
31#include "iwl-agn-hw.h" 30#include "iwl-agn-hw.h"
32#include "iwl-commands.h" /* needed for BT for now */ 31#include "cfg.h"
32#include "dvm/commands.h" /* needed for BT for now */
33 33
34/* Highest firmware API version supported */ 34/* Highest firmware API version supported */
35#define IWL2030_UCODE_API_MAX 6 35#define IWL2030_UCODE_API_MAX 6
@@ -104,6 +104,7 @@ static const struct iwl_base_params iwl2030_base_params = {
104static const struct iwl_ht_params iwl2000_ht_params = { 104static const struct iwl_ht_params iwl2000_ht_params = {
105 .ht_greenfield_support = true, 105 .ht_greenfield_support = true,
106 .use_rts_for_aggregation = true, /* use rts/cts protection */ 106 .use_rts_for_aggregation = true, /* use rts/cts protection */
107 .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
107}; 108};
108 109
109static const struct iwl_bt_params iwl2030_bt_params = { 110static const struct iwl_bt_params iwl2030_bt_params = {
@@ -116,6 +117,19 @@ static const struct iwl_bt_params iwl2030_bt_params = {
116 .bt_session_2 = true, 117 .bt_session_2 = true,
117}; 118};
118 119
120static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
121 .regulatory_bands = {
122 EEPROM_REG_BAND_1_CHANNELS,
123 EEPROM_REG_BAND_2_CHANNELS,
124 EEPROM_REG_BAND_3_CHANNELS,
125 EEPROM_REG_BAND_4_CHANNELS,
126 EEPROM_REG_BAND_5_CHANNELS,
127 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
128 EEPROM_REGULATORY_BAND_NO_HT40,
129 },
130 .enhanced_txpower = true,
131};
132
119#define IWL_DEVICE_2000 \ 133#define IWL_DEVICE_2000 \
120 .fw_name_pre = IWL2000_FW_PRE, \ 134 .fw_name_pre = IWL2000_FW_PRE, \
121 .ucode_api_max = IWL2000_UCODE_API_MAX, \ 135 .ucode_api_max = IWL2000_UCODE_API_MAX, \
@@ -127,6 +141,7 @@ static const struct iwl_bt_params iwl2030_bt_params = {
127 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ 141 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
128 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 142 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
129 .base_params = &iwl2000_base_params, \ 143 .base_params = &iwl2000_base_params, \
144 .eeprom_params = &iwl20x0_eeprom_params, \
130 .need_temp_offset_calib = true, \ 145 .need_temp_offset_calib = true, \
131 .temp_offset_v2 = true, \ 146 .temp_offset_v2 = true, \
132 .led_mode = IWL_LED_RF_STATE 147 .led_mode = IWL_LED_RF_STATE
@@ -155,6 +170,7 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
155 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 170 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
156 .base_params = &iwl2030_base_params, \ 171 .base_params = &iwl2030_base_params, \
157 .bt_params = &iwl2030_bt_params, \ 172 .bt_params = &iwl2030_bt_params, \
173 .eeprom_params = &iwl20x0_eeprom_params, \
158 .need_temp_offset_calib = true, \ 174 .need_temp_offset_calib = true, \
159 .temp_offset_v2 = true, \ 175 .temp_offset_v2 = true, \
160 .led_mode = IWL_LED_RF_STATE, \ 176 .led_mode = IWL_LED_RF_STATE, \
@@ -177,6 +193,7 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
177 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ 193 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
178 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 194 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
179 .base_params = &iwl2000_base_params, \ 195 .base_params = &iwl2000_base_params, \
196 .eeprom_params = &iwl20x0_eeprom_params, \
180 .need_temp_offset_calib = true, \ 197 .need_temp_offset_calib = true, \
181 .temp_offset_v2 = true, \ 198 .temp_offset_v2 = true, \
182 .led_mode = IWL_LED_RF_STATE, \ 199 .led_mode = IWL_LED_RF_STATE, \
@@ -207,6 +224,7 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
207 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 224 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
208 .base_params = &iwl2030_base_params, \ 225 .base_params = &iwl2030_base_params, \
209 .bt_params = &iwl2030_bt_params, \ 226 .bt_params = &iwl2030_bt_params, \
227 .eeprom_params = &iwl20x0_eeprom_params, \
210 .need_temp_offset_calib = true, \ 228 .need_temp_offset_calib = true, \
211 .temp_offset_v2 = true, \ 229 .temp_offset_v2 = true, \
212 .led_mode = IWL_LED_RF_STATE, \ 230 .led_mode = IWL_LED_RF_STATE, \
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/pcie/5000.c
index 8e26bc825f2..d1665fa6d15 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/5000.c
@@ -27,9 +27,9 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/stringify.h> 28#include <linux/stringify.h>
29#include "iwl-config.h" 29#include "iwl-config.h"
30#include "iwl-cfg.h"
31#include "iwl-agn-hw.h" 30#include "iwl-agn-hw.h"
32#include "iwl-csr.h" 31#include "iwl-csr.h"
32#include "cfg.h"
33 33
34/* Highest firmware API version supported */ 34/* Highest firmware API version supported */
35#define IWL5000_UCODE_API_MAX 5 35#define IWL5000_UCODE_API_MAX 5
@@ -62,13 +62,26 @@ static const struct iwl_base_params iwl5000_base_params = {
62 .led_compensation = 51, 62 .led_compensation = 51,
63 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 63 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
64 .chain_noise_scale = 1000, 64 .chain_noise_scale = 1000,
65 .wd_timeout = IWL_WATCHHDOG_DISABLED, 65 .wd_timeout = IWL_WATCHDOG_DISABLED,
66 .max_event_log_size = 512, 66 .max_event_log_size = 512,
67 .no_idle_support = true, 67 .no_idle_support = true,
68}; 68};
69 69
70static const struct iwl_ht_params iwl5000_ht_params = { 70static const struct iwl_ht_params iwl5000_ht_params = {
71 .ht_greenfield_support = true, 71 .ht_greenfield_support = true,
72 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
73};
74
75static const struct iwl_eeprom_params iwl5000_eeprom_params = {
76 .regulatory_bands = {
77 EEPROM_REG_BAND_1_CHANNELS,
78 EEPROM_REG_BAND_2_CHANNELS,
79 EEPROM_REG_BAND_3_CHANNELS,
80 EEPROM_REG_BAND_4_CHANNELS,
81 EEPROM_REG_BAND_5_CHANNELS,
82 EEPROM_REG_BAND_24_HT40_CHANNELS,
83 EEPROM_REG_BAND_52_HT40_CHANNELS
84 },
72}; 85};
73 86
74#define IWL_DEVICE_5000 \ 87#define IWL_DEVICE_5000 \
@@ -82,6 +95,7 @@ static const struct iwl_ht_params iwl5000_ht_params = {
82 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, \ 95 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, \
83 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ 96 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
84 .base_params = &iwl5000_base_params, \ 97 .base_params = &iwl5000_base_params, \
98 .eeprom_params = &iwl5000_eeprom_params, \
85 .led_mode = IWL_LED_BLINK 99 .led_mode = IWL_LED_BLINK
86 100
87const struct iwl_cfg iwl5300_agn_cfg = { 101const struct iwl_cfg iwl5300_agn_cfg = {
@@ -128,6 +142,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
128 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 142 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
129 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 143 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
130 .base_params = &iwl5000_base_params, 144 .base_params = &iwl5000_base_params,
145 .eeprom_params = &iwl5000_eeprom_params,
131 .ht_params = &iwl5000_ht_params, 146 .ht_params = &iwl5000_ht_params,
132 .led_mode = IWL_LED_BLINK, 147 .led_mode = IWL_LED_BLINK,
133 .internal_wimax_coex = true, 148 .internal_wimax_coex = true,
@@ -144,6 +159,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
144 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, \ 159 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, \
145 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \ 160 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
146 .base_params = &iwl5000_base_params, \ 161 .base_params = &iwl5000_base_params, \
162 .eeprom_params = &iwl5000_eeprom_params, \
147 .no_xtal_calib = true, \ 163 .no_xtal_calib = true, \
148 .led_mode = IWL_LED_BLINK, \ 164 .led_mode = IWL_LED_BLINK, \
149 .internal_wimax_coex = true 165 .internal_wimax_coex = true
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/pcie/6000.c
index e5e8ada4aaf..4a57624afc4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/6000.c
@@ -27,9 +27,9 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/stringify.h> 28#include <linux/stringify.h>
29#include "iwl-config.h" 29#include "iwl-config.h"
30#include "iwl-cfg.h"
31#include "iwl-agn-hw.h" 30#include "iwl-agn-hw.h"
32#include "iwl-commands.h" /* needed for BT for now */ 31#include "cfg.h"
32#include "dvm/commands.h" /* needed for BT for now */
33 33
34/* Highest firmware API version supported */ 34/* Highest firmware API version supported */
35#define IWL6000_UCODE_API_MAX 6 35#define IWL6000_UCODE_API_MAX 6
@@ -127,6 +127,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
127static const struct iwl_ht_params iwl6000_ht_params = { 127static const struct iwl_ht_params iwl6000_ht_params = {
128 .ht_greenfield_support = true, 128 .ht_greenfield_support = true,
129 .use_rts_for_aggregation = true, /* use rts/cts protection */ 129 .use_rts_for_aggregation = true, /* use rts/cts protection */
130 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
130}; 131};
131 132
132static const struct iwl_bt_params iwl6000_bt_params = { 133static const struct iwl_bt_params iwl6000_bt_params = {
@@ -138,6 +139,19 @@ static const struct iwl_bt_params iwl6000_bt_params = {
138 .bt_sco_disable = true, 139 .bt_sco_disable = true,
139}; 140};
140 141
142static const struct iwl_eeprom_params iwl6000_eeprom_params = {
143 .regulatory_bands = {
144 EEPROM_REG_BAND_1_CHANNELS,
145 EEPROM_REG_BAND_2_CHANNELS,
146 EEPROM_REG_BAND_3_CHANNELS,
147 EEPROM_REG_BAND_4_CHANNELS,
148 EEPROM_REG_BAND_5_CHANNELS,
149 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
150 EEPROM_REG_BAND_52_HT40_CHANNELS
151 },
152 .enhanced_txpower = true,
153};
154
141#define IWL_DEVICE_6005 \ 155#define IWL_DEVICE_6005 \
142 .fw_name_pre = IWL6005_FW_PRE, \ 156 .fw_name_pre = IWL6005_FW_PRE, \
143 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ 157 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
@@ -149,6 +163,7 @@ static const struct iwl_bt_params iwl6000_bt_params = {
149 .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \ 163 .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \
150 .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ 164 .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
151 .base_params = &iwl6000_g2_base_params, \ 165 .base_params = &iwl6000_g2_base_params, \
166 .eeprom_params = &iwl6000_eeprom_params, \
152 .need_temp_offset_calib = true, \ 167 .need_temp_offset_calib = true, \
153 .led_mode = IWL_LED_RF_STATE 168 .led_mode = IWL_LED_RF_STATE
154 169
@@ -204,6 +219,7 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
204 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ 219 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
205 .base_params = &iwl6000_g2_base_params, \ 220 .base_params = &iwl6000_g2_base_params, \
206 .bt_params = &iwl6000_bt_params, \ 221 .bt_params = &iwl6000_bt_params, \
222 .eeprom_params = &iwl6000_eeprom_params, \
207 .need_temp_offset_calib = true, \ 223 .need_temp_offset_calib = true, \
208 .led_mode = IWL_LED_RF_STATE, \ 224 .led_mode = IWL_LED_RF_STATE, \
209 .adv_pm = true \ 225 .adv_pm = true \
@@ -242,6 +258,7 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
242 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ 258 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
243 .base_params = &iwl6000_g2_base_params, \ 259 .base_params = &iwl6000_g2_base_params, \
244 .bt_params = &iwl6000_bt_params, \ 260 .bt_params = &iwl6000_bt_params, \
261 .eeprom_params = &iwl6000_eeprom_params, \
245 .need_temp_offset_calib = true, \ 262 .need_temp_offset_calib = true, \
246 .led_mode = IWL_LED_RF_STATE, \ 263 .led_mode = IWL_LED_RF_STATE, \
247 .adv_pm = true 264 .adv_pm = true
@@ -292,6 +309,7 @@ const struct iwl_cfg iwl130_bg_cfg = {
292 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, \ 309 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, \
293 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ 310 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
294 .base_params = &iwl6000_base_params, \ 311 .base_params = &iwl6000_base_params, \
312 .eeprom_params = &iwl6000_eeprom_params, \
295 .led_mode = IWL_LED_BLINK 313 .led_mode = IWL_LED_BLINK
296 314
297const struct iwl_cfg iwl6000i_2agn_cfg = { 315const struct iwl_cfg iwl6000i_2agn_cfg = {
@@ -322,6 +340,7 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
322 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ 340 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
323 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ 341 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
324 .base_params = &iwl6050_base_params, \ 342 .base_params = &iwl6050_base_params, \
343 .eeprom_params = &iwl6000_eeprom_params, \
325 .led_mode = IWL_LED_BLINK, \ 344 .led_mode = IWL_LED_BLINK, \
326 .internal_wimax_coex = true 345 .internal_wimax_coex = true
327 346
@@ -346,6 +365,7 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
346 .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \ 365 .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
347 .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \ 366 .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
348 .base_params = &iwl6050_base_params, \ 367 .base_params = &iwl6050_base_params, \
368 .eeprom_params = &iwl6000_eeprom_params, \
349 .led_mode = IWL_LED_BLINK, \ 369 .led_mode = IWL_LED_BLINK, \
350 .internal_wimax_coex = true 370 .internal_wimax_coex = true
351 371
@@ -372,6 +392,7 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
372 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 392 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
373 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, 393 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
374 .base_params = &iwl6000_base_params, 394 .base_params = &iwl6000_base_params,
395 .eeprom_params = &iwl6000_eeprom_params,
375 .ht_params = &iwl6000_ht_params, 396 .ht_params = &iwl6000_ht_params,
376 .led_mode = IWL_LED_BLINK, 397 .led_mode = IWL_LED_BLINK,
377}; 398};
diff --git a/drivers/net/wireless/iwlwifi/iwl-cfg.h b/drivers/net/wireless/iwlwifi/pcie/cfg.h
index 82152311d73..82152311d73 100644
--- a/drivers/net/wireless/iwlwifi/iwl-cfg.h
+++ b/drivers/net/wireless/iwlwifi/pcie/cfg.h
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 0c8a1c2d884..f4c3500b68c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -68,10 +68,11 @@
68#include <linux/pci-aspm.h> 68#include <linux/pci-aspm.h>
69 69
70#include "iwl-trans.h" 70#include "iwl-trans.h"
71#include "iwl-cfg.h"
72#include "iwl-drv.h" 71#include "iwl-drv.h"
73#include "iwl-trans.h" 72#include "iwl-trans.h"
74#include "iwl-trans-pcie-int.h" 73
74#include "cfg.h"
75#include "internal.h"
75 76
76#define IWL_PCI_DEVICE(dev, subdev, cfg) \ 77#define IWL_PCI_DEVICE(dev, subdev, cfg) \
77 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ 78 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index e959207c630..5024fb662bf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -313,7 +313,7 @@ void iwl_bg_rx_replenish(struct work_struct *data);
313void iwl_irq_tasklet(struct iwl_trans *trans); 313void iwl_irq_tasklet(struct iwl_trans *trans);
314void iwlagn_rx_replenish(struct iwl_trans *trans); 314void iwlagn_rx_replenish(struct iwl_trans *trans);
315void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, 315void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
316 struct iwl_rx_queue *q); 316 struct iwl_rx_queue *q);
317 317
318/***************************************************** 318/*****************************************************
319* ICT 319* ICT
@@ -328,7 +328,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data);
328* TX / HCMD 328* TX / HCMD
329******************************************************/ 329******************************************************/
330void iwl_txq_update_write_ptr(struct iwl_trans *trans, 330void iwl_txq_update_write_ptr(struct iwl_trans *trans,
331 struct iwl_tx_queue *txq); 331 struct iwl_tx_queue *txq);
332int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, 332int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
333 struct iwl_tx_queue *txq, 333 struct iwl_tx_queue *txq,
334 dma_addr_t addr, u16 len, u8 reset); 334 dma_addr_t addr, u16 len, u8 reset);
@@ -337,17 +337,13 @@ int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
337void iwl_tx_cmd_complete(struct iwl_trans *trans, 337void iwl_tx_cmd_complete(struct iwl_trans *trans,
338 struct iwl_rx_cmd_buffer *rxb, int handler_status); 338 struct iwl_rx_cmd_buffer *rxb, int handler_status);
339void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, 339void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
340 struct iwl_tx_queue *txq, 340 struct iwl_tx_queue *txq,
341 u16 byte_cnt); 341 u16 byte_cnt);
342void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int queue); 342void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
343void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index); 343 int sta_id, int tid, int frame_limit, u16 ssn);
344void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, 344void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
345 struct iwl_tx_queue *txq, 345void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
346 int tx_fifo_id, bool active); 346 enum dma_data_direction dma_dir);
347void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo,
348 int sta_id, int tid, int frame_limit, u16 ssn);
349void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
350 enum dma_data_direction dma_dir);
351int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, 347int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
352 struct sk_buff_head *skbs); 348 struct sk_buff_head *skbs);
353int iwl_queue_space(const struct iwl_queue *q); 349int iwl_queue_space(const struct iwl_queue *q);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 08517d3c80b..be143eb4aa4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -32,7 +32,7 @@
32 32
33#include "iwl-prph.h" 33#include "iwl-prph.h"
34#include "iwl-io.h" 34#include "iwl-io.h"
35#include "iwl-trans-pcie-int.h" 35#include "internal.h"
36#include "iwl-op-mode.h" 36#include "iwl-op-mode.h"
37 37
38#ifdef CONFIG_IWLWIFI_IDI 38#ifdef CONFIG_IWLWIFI_IDI
@@ -130,7 +130,7 @@ static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
130 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue 130 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
131 */ 131 */
132void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, 132void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
133 struct iwl_rx_queue *q) 133 struct iwl_rx_queue *q)
134{ 134{
135 unsigned long flags; 135 unsigned long flags;
136 u32 reg; 136 u32 reg;
@@ -201,9 +201,7 @@ static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
201 */ 201 */
202static void iwlagn_rx_queue_restock(struct iwl_trans *trans) 202static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
203{ 203{
204 struct iwl_trans_pcie *trans_pcie = 204 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
205 IWL_TRANS_GET_PCIE_TRANS(trans);
206
207 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 205 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
208 struct list_head *element; 206 struct list_head *element;
209 struct iwl_rx_mem_buffer *rxb; 207 struct iwl_rx_mem_buffer *rxb;
@@ -253,9 +251,7 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
253 */ 251 */
254static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority) 252static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
255{ 253{
256 struct iwl_trans_pcie *trans_pcie = 254 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
257 IWL_TRANS_GET_PCIE_TRANS(trans);
258
259 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 255 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
260 struct list_head *element; 256 struct list_head *element;
261 struct iwl_rx_mem_buffer *rxb; 257 struct iwl_rx_mem_buffer *rxb;
@@ -278,8 +274,7 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
278 gfp_mask |= __GFP_COMP; 274 gfp_mask |= __GFP_COMP;
279 275
280 /* Alloc a new receive buffer */ 276 /* Alloc a new receive buffer */
281 page = alloc_pages(gfp_mask, 277 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
282 trans_pcie->rx_page_order);
283 if (!page) { 278 if (!page) {
284 if (net_ratelimit()) 279 if (net_ratelimit())
285 IWL_DEBUG_INFO(trans, "alloc_pages failed, " 280 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
@@ -315,9 +310,10 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
315 BUG_ON(rxb->page); 310 BUG_ON(rxb->page);
316 rxb->page = page; 311 rxb->page = page;
317 /* Get physical address of the RB */ 312 /* Get physical address of the RB */
318 rxb->page_dma = dma_map_page(trans->dev, page, 0, 313 rxb->page_dma =
319 PAGE_SIZE << trans_pcie->rx_page_order, 314 dma_map_page(trans->dev, page, 0,
320 DMA_FROM_DEVICE); 315 PAGE_SIZE << trans_pcie->rx_page_order,
316 DMA_FROM_DEVICE);
321 /* dma address must be no more than 36 bits */ 317 /* dma address must be no more than 36 bits */
322 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); 318 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
323 /* and also 256 byte aligned! */ 319 /* and also 256 byte aligned! */
@@ -465,8 +461,8 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
465 if (rxb->page != NULL) { 461 if (rxb->page != NULL) {
466 rxb->page_dma = 462 rxb->page_dma =
467 dma_map_page(trans->dev, rxb->page, 0, 463 dma_map_page(trans->dev, rxb->page, 0,
468 PAGE_SIZE << trans_pcie->rx_page_order, 464 PAGE_SIZE << trans_pcie->rx_page_order,
469 DMA_FROM_DEVICE); 465 DMA_FROM_DEVICE);
470 list_add_tail(&rxb->list, &rxq->rx_free); 466 list_add_tail(&rxb->list, &rxq->rx_free);
471 rxq->free_count++; 467 rxq->free_count++;
472 } else 468 } else
@@ -497,7 +493,7 @@ static void iwl_rx_handle(struct iwl_trans *trans)
497 493
498 /* Rx interrupt, but nothing sent from uCode */ 494 /* Rx interrupt, but nothing sent from uCode */
499 if (i == r) 495 if (i == r)
500 IWL_DEBUG_RX(trans, "r = %d, i = %d\n", r, i); 496 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
501 497
502 /* calculate total frames need to be restock after handling RX */ 498 /* calculate total frames need to be restock after handling RX */
503 total_empty = r - rxq->write_actual; 499 total_empty = r - rxq->write_actual;
@@ -513,8 +509,8 @@ static void iwl_rx_handle(struct iwl_trans *trans)
513 rxb = rxq->queue[i]; 509 rxb = rxq->queue[i];
514 rxq->queue[i] = NULL; 510 rxq->queue[i] = NULL;
515 511
516 IWL_DEBUG_RX(trans, "rxbuf: r = %d, i = %d (%p)\n", rxb); 512 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
517 513 r, i, rxb);
518 iwl_rx_handle_rxbuf(trans, rxb); 514 iwl_rx_handle_rxbuf(trans, rxb);
519 515
520 i = (i + 1) & RX_QUEUE_MASK; 516 i = (i + 1) & RX_QUEUE_MASK;
@@ -546,12 +542,12 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
546 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ 542 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
547 if (trans->cfg->internal_wimax_coex && 543 if (trans->cfg->internal_wimax_coex &&
548 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & 544 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
549 APMS_CLK_VAL_MRB_FUNC_MODE) || 545 APMS_CLK_VAL_MRB_FUNC_MODE) ||
550 (iwl_read_prph(trans, APMG_PS_CTRL_REG) & 546 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
551 APMG_PS_CTRL_VAL_RESET_REQ))) { 547 APMG_PS_CTRL_VAL_RESET_REQ))) {
552 struct iwl_trans_pcie *trans_pcie; 548 struct iwl_trans_pcie *trans_pcie =
549 IWL_TRANS_GET_PCIE_TRANS(trans);
553 550
554 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
555 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); 551 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
556 iwl_op_mode_wimax_active(trans->op_mode); 552 iwl_op_mode_wimax_active(trans->op_mode);
557 wake_up(&trans->wait_command_queue); 553 wake_up(&trans->wait_command_queue);
@@ -567,6 +563,8 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
567/* tasklet for iwlagn interrupt */ 563/* tasklet for iwlagn interrupt */
568void iwl_irq_tasklet(struct iwl_trans *trans) 564void iwl_irq_tasklet(struct iwl_trans *trans)
569{ 565{
566 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
567 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
570 u32 inta = 0; 568 u32 inta = 0;
571 u32 handled = 0; 569 u32 handled = 0;
572 unsigned long flags; 570 unsigned long flags;
@@ -575,10 +573,6 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
575 u32 inta_mask; 573 u32 inta_mask;
576#endif 574#endif
577 575
578 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
579 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
580
581
582 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 576 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
583 577
584 /* Ack/clear/reset pending uCode interrupts. 578 /* Ack/clear/reset pending uCode interrupts.
@@ -593,7 +587,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
593 * interrupt coalescing can still be achieved. 587 * interrupt coalescing can still be achieved.
594 */ 588 */
595 iwl_write32(trans, CSR_INT, 589 iwl_write32(trans, CSR_INT,
596 trans_pcie->inta | ~trans_pcie->inta_mask); 590 trans_pcie->inta | ~trans_pcie->inta_mask);
597 591
598 inta = trans_pcie->inta; 592 inta = trans_pcie->inta;
599 593
@@ -602,7 +596,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
602 /* just for debug */ 596 /* just for debug */
603 inta_mask = iwl_read32(trans, CSR_INT_MASK); 597 inta_mask = iwl_read32(trans, CSR_INT_MASK);
604 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", 598 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
605 inta, inta_mask); 599 inta, inta_mask);
606 } 600 }
607#endif 601#endif
608 602
@@ -651,7 +645,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
651 645
652 hw_rfkill = iwl_is_rfkill_set(trans); 646 hw_rfkill = iwl_is_rfkill_set(trans);
653 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", 647 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
654 hw_rfkill ? "disable radio" : "enable radio"); 648 hw_rfkill ? "disable radio" : "enable radio");
655 649
656 isr_stats->rfkill++; 650 isr_stats->rfkill++;
657 651
@@ -693,7 +687,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
693 * Rx "responses" (frame-received notification), and other 687 * Rx "responses" (frame-received notification), and other
694 * notifications from uCode come through here*/ 688 * notifications from uCode come through here*/
695 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | 689 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
696 CSR_INT_BIT_RX_PERIODIC)) { 690 CSR_INT_BIT_RX_PERIODIC)) {
697 IWL_DEBUG_ISR(trans, "Rx interrupt\n"); 691 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
698 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 692 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
699 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 693 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
@@ -733,7 +727,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
733 */ 727 */
734 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) 728 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
735 iwl_write8(trans, CSR_INT_PERIODIC_REG, 729 iwl_write8(trans, CSR_INT_PERIODIC_REG,
736 CSR_INT_PERIODIC_ENA); 730 CSR_INT_PERIODIC_ENA);
737 731
738 isr_stats->rx++; 732 isr_stats->rx++;
739 } 733 }
@@ -782,8 +776,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
782/* Free dram table */ 776/* Free dram table */
783void iwl_free_isr_ict(struct iwl_trans *trans) 777void iwl_free_isr_ict(struct iwl_trans *trans)
784{ 778{
785 struct iwl_trans_pcie *trans_pcie = 779 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
786 IWL_TRANS_GET_PCIE_TRANS(trans);
787 780
788 if (trans_pcie->ict_tbl) { 781 if (trans_pcie->ict_tbl) {
789 dma_free_coherent(trans->dev, ICT_SIZE, 782 dma_free_coherent(trans->dev, ICT_SIZE,
@@ -802,8 +795,7 @@ void iwl_free_isr_ict(struct iwl_trans *trans)
802 */ 795 */
803int iwl_alloc_isr_ict(struct iwl_trans *trans) 796int iwl_alloc_isr_ict(struct iwl_trans *trans)
804{ 797{
805 struct iwl_trans_pcie *trans_pcie = 798 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
806 IWL_TRANS_GET_PCIE_TRANS(trans);
807 799
808 trans_pcie->ict_tbl = 800 trans_pcie->ict_tbl =
809 dma_alloc_coherent(trans->dev, ICT_SIZE, 801 dma_alloc_coherent(trans->dev, ICT_SIZE,
@@ -837,10 +829,9 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans)
837 */ 829 */
838void iwl_reset_ict(struct iwl_trans *trans) 830void iwl_reset_ict(struct iwl_trans *trans)
839{ 831{
832 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
840 u32 val; 833 u32 val;
841 unsigned long flags; 834 unsigned long flags;
842 struct iwl_trans_pcie *trans_pcie =
843 IWL_TRANS_GET_PCIE_TRANS(trans);
844 835
845 if (!trans_pcie->ict_tbl) 836 if (!trans_pcie->ict_tbl)
846 return; 837 return;
@@ -868,9 +859,7 @@ void iwl_reset_ict(struct iwl_trans *trans)
868/* Device is going down disable ict interrupt usage */ 859/* Device is going down disable ict interrupt usage */
869void iwl_disable_ict(struct iwl_trans *trans) 860void iwl_disable_ict(struct iwl_trans *trans)
870{ 861{
871 struct iwl_trans_pcie *trans_pcie = 862 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
872 IWL_TRANS_GET_PCIE_TRANS(trans);
873
874 unsigned long flags; 863 unsigned long flags;
875 864
876 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 865 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
@@ -878,24 +867,23 @@ void iwl_disable_ict(struct iwl_trans *trans)
878 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 867 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
879} 868}
880 869
870/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
881static irqreturn_t iwl_isr(int irq, void *data) 871static irqreturn_t iwl_isr(int irq, void *data)
882{ 872{
883 struct iwl_trans *trans = data; 873 struct iwl_trans *trans = data;
884 struct iwl_trans_pcie *trans_pcie; 874 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
885 u32 inta, inta_mask; 875 u32 inta, inta_mask;
886 unsigned long flags;
887#ifdef CONFIG_IWLWIFI_DEBUG 876#ifdef CONFIG_IWLWIFI_DEBUG
888 u32 inta_fh; 877 u32 inta_fh;
889#endif 878#endif
879
880 lockdep_assert_held(&trans_pcie->irq_lock);
881
890 if (!trans) 882 if (!trans)
891 return IRQ_NONE; 883 return IRQ_NONE;
892 884
893 trace_iwlwifi_dev_irq(trans->dev); 885 trace_iwlwifi_dev_irq(trans->dev);
894 886
895 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
896
897 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
898
899 /* Disable (but don't clear!) interrupts here to avoid 887 /* Disable (but don't clear!) interrupts here to avoid
900 * back-to-back ISRs and sporadic interrupts from our NIC. 888 * back-to-back ISRs and sporadic interrupts from our NIC.
901 * If we have something to service, the tasklet will re-enable ints. 889 * If we have something to service, the tasklet will re-enable ints.
@@ -918,7 +906,7 @@ static irqreturn_t iwl_isr(int irq, void *data)
918 /* Hardware disappeared. It might have already raised 906 /* Hardware disappeared. It might have already raised
919 * an interrupt */ 907 * an interrupt */
920 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); 908 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
921 goto unplugged; 909 return IRQ_HANDLED;
922 } 910 }
923 911
924#ifdef CONFIG_IWLWIFI_DEBUG 912#ifdef CONFIG_IWLWIFI_DEBUG
@@ -934,21 +922,16 @@ static irqreturn_t iwl_isr(int irq, void *data)
934 if (likely(inta)) 922 if (likely(inta))
935 tasklet_schedule(&trans_pcie->irq_tasklet); 923 tasklet_schedule(&trans_pcie->irq_tasklet);
936 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && 924 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
937 !trans_pcie->inta) 925 !trans_pcie->inta)
938 iwl_enable_interrupts(trans); 926 iwl_enable_interrupts(trans);
939 927
940 unplugged: 928none:
941 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
942 return IRQ_HANDLED;
943
944 none:
945 /* re-enable interrupts here since we don't have anything to service. */ 929 /* re-enable interrupts here since we don't have anything to service. */
946 /* only Re-enable if disabled by irq and no schedules tasklet. */ 930 /* only Re-enable if disabled by irq and no schedules tasklet. */
947 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && 931 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
948 !trans_pcie->inta) 932 !trans_pcie->inta)
949 iwl_enable_interrupts(trans); 933 iwl_enable_interrupts(trans);
950 934
951 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
952 return IRQ_NONE; 935 return IRQ_NONE;
953} 936}
954 937
@@ -974,15 +957,19 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
974 957
975 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 958 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
976 959
960 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
961
977 /* dram interrupt table not set yet, 962 /* dram interrupt table not set yet,
978 * use legacy interrupt. 963 * use legacy interrupt.
979 */ 964 */
980 if (!trans_pcie->use_ict) 965 if (unlikely(!trans_pcie->use_ict)) {
981 return iwl_isr(irq, data); 966 irqreturn_t ret = iwl_isr(irq, data);
967 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
968 return ret;
969 }
982 970
983 trace_iwlwifi_dev_irq(trans->dev); 971 trace_iwlwifi_dev_irq(trans->dev);
984 972
985 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
986 973
987 /* Disable (but don't clear!) interrupts here to avoid 974 /* Disable (but don't clear!) interrupts here to avoid
988 * back-to-back ISRs and sporadic interrupts from our NIC. 975 * back-to-back ISRs and sporadic interrupts from our NIC.
@@ -1036,7 +1023,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
1036 1023
1037 inta = (0xff & val) | ((0xff00 & val) << 16); 1024 inta = (0xff & val) | ((0xff00 & val) << 16);
1038 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n", 1025 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
1039 inta, inta_mask, val); 1026 inta, inta_mask, val);
1040 1027
1041 inta &= trans_pcie->inta_mask; 1028 inta &= trans_pcie->inta_mask;
1042 trans_pcie->inta |= inta; 1029 trans_pcie->inta |= inta;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 79c6b91417f..d1950838f17 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -70,15 +70,12 @@
70 70
71#include "iwl-drv.h" 71#include "iwl-drv.h"
72#include "iwl-trans.h" 72#include "iwl-trans.h"
73#include "iwl-trans-pcie-int.h"
74#include "iwl-csr.h" 73#include "iwl-csr.h"
75#include "iwl-prph.h" 74#include "iwl-prph.h"
76#include "iwl-eeprom.h"
77#include "iwl-agn-hw.h" 75#include "iwl-agn-hw.h"
76#include "internal.h"
78/* FIXME: need to abstract out TX command (once we know what it looks like) */ 77/* FIXME: need to abstract out TX command (once we know what it looks like) */
79#include "iwl-commands.h" 78#include "dvm/commands.h"
80
81#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
82 79
83#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \ 80#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
84 (((1<<trans->cfg->base_params->num_of_queues) - 1) &\ 81 (((1<<trans->cfg->base_params->num_of_queues) - 1) &\
@@ -86,8 +83,7 @@
86 83
87static int iwl_trans_rx_alloc(struct iwl_trans *trans) 84static int iwl_trans_rx_alloc(struct iwl_trans *trans)
88{ 85{
89 struct iwl_trans_pcie *trans_pcie = 86 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
90 IWL_TRANS_GET_PCIE_TRANS(trans);
91 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 87 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
92 struct device *dev = trans->dev; 88 struct device *dev = trans->dev;
93 89
@@ -114,7 +110,7 @@ static int iwl_trans_rx_alloc(struct iwl_trans *trans)
114 110
115err_rb_stts: 111err_rb_stts:
116 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, 112 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
117 rxq->bd, rxq->bd_dma); 113 rxq->bd, rxq->bd_dma);
118 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); 114 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
119 rxq->bd = NULL; 115 rxq->bd = NULL;
120err_bd: 116err_bd:
@@ -123,8 +119,7 @@ err_bd:
123 119
124static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans) 120static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
125{ 121{
126 struct iwl_trans_pcie *trans_pcie = 122 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
127 IWL_TRANS_GET_PCIE_TRANS(trans);
128 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 123 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
129 int i; 124 int i;
130 125
@@ -134,8 +129,8 @@ static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
134 * to an SKB, so we need to unmap and free potential storage */ 129 * to an SKB, so we need to unmap and free potential storage */
135 if (rxq->pool[i].page != NULL) { 130 if (rxq->pool[i].page != NULL) {
136 dma_unmap_page(trans->dev, rxq->pool[i].page_dma, 131 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
137 PAGE_SIZE << trans_pcie->rx_page_order, 132 PAGE_SIZE << trans_pcie->rx_page_order,
138 DMA_FROM_DEVICE); 133 DMA_FROM_DEVICE);
139 __free_pages(rxq->pool[i].page, 134 __free_pages(rxq->pool[i].page,
140 trans_pcie->rx_page_order); 135 trans_pcie->rx_page_order);
141 rxq->pool[i].page = NULL; 136 rxq->pool[i].page = NULL;
@@ -193,8 +188,7 @@ static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
193 188
194static int iwl_rx_init(struct iwl_trans *trans) 189static int iwl_rx_init(struct iwl_trans *trans)
195{ 190{
196 struct iwl_trans_pcie *trans_pcie = 191 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
197 IWL_TRANS_GET_PCIE_TRANS(trans);
198 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 192 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
199 193
200 int i, err; 194 int i, err;
@@ -236,10 +230,8 @@ static int iwl_rx_init(struct iwl_trans *trans)
236 230
237static void iwl_trans_pcie_rx_free(struct iwl_trans *trans) 231static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
238{ 232{
239 struct iwl_trans_pcie *trans_pcie = 233 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
240 IWL_TRANS_GET_PCIE_TRANS(trans);
241 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 234 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
242
243 unsigned long flags; 235 unsigned long flags;
244 236
245 /*if rxq->bd is NULL, it means that nothing has been allocated, 237 /*if rxq->bd is NULL, it means that nothing has been allocated,
@@ -274,11 +266,11 @@ static int iwl_trans_rx_stop(struct iwl_trans *trans)
274 /* stop Rx DMA */ 266 /* stop Rx DMA */
275 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 267 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
276 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, 268 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
277 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); 269 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
278} 270}
279 271
280static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans, 272static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
281 struct iwl_dma_ptr *ptr, size_t size) 273 struct iwl_dma_ptr *ptr, size_t size)
282{ 274{
283 if (WARN_ON(ptr->addr)) 275 if (WARN_ON(ptr->addr))
284 return -EINVAL; 276 return -EINVAL;
@@ -291,8 +283,8 @@ static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
291 return 0; 283 return 0;
292} 284}
293 285
294static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans, 286static void iwlagn_free_dma_ptr(struct iwl_trans *trans,
295 struct iwl_dma_ptr *ptr) 287 struct iwl_dma_ptr *ptr)
296{ 288{
297 if (unlikely(!ptr->addr)) 289 if (unlikely(!ptr->addr))
298 return; 290 return;
@@ -304,8 +296,13 @@ static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
304static void iwl_trans_pcie_queue_stuck_timer(unsigned long data) 296static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
305{ 297{
306 struct iwl_tx_queue *txq = (void *)data; 298 struct iwl_tx_queue *txq = (void *)data;
299 struct iwl_queue *q = &txq->q;
307 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; 300 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
308 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); 301 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
302 u32 scd_sram_addr = trans_pcie->scd_base_addr +
303 SCD_TX_STTS_MEM_LOWER_BOUND + (16 * txq->q.id);
304 u8 buf[16];
305 int i;
309 306
310 spin_lock(&txq->lock); 307 spin_lock(&txq->lock);
311 /* check if triggered erroneously */ 308 /* check if triggered erroneously */
@@ -315,26 +312,59 @@ static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
315 } 312 }
316 spin_unlock(&txq->lock); 313 spin_unlock(&txq->lock);
317 314
318
319 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, 315 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
320 jiffies_to_msecs(trans_pcie->wd_timeout)); 316 jiffies_to_msecs(trans_pcie->wd_timeout));
321 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", 317 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
322 txq->q.read_ptr, txq->q.write_ptr); 318 txq->q.read_ptr, txq->q.write_ptr);
323 IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n", 319
324 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq->q.id)) 320 iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
325 & (TFD_QUEUE_SIZE_MAX - 1), 321
326 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq->q.id))); 322 iwl_print_hex_error(trans, buf, sizeof(buf));
323
324 for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
325 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
326 iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
327
328 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
329 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
330 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
331 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
332 u32 tbl_dw =
333 iwl_read_targ_mem(trans,
334 trans_pcie->scd_base_addr +
335 SCD_TRANS_TBL_OFFSET_QUEUE(i));
336
337 if (i & 0x1)
338 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
339 else
340 tbl_dw = tbl_dw & 0x0000FFFF;
341
342 IWL_ERR(trans,
343 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
344 i, active ? "" : "in", fifo, tbl_dw,
345 iwl_read_prph(trans,
346 SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
347 iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
348 }
349
350 for (i = q->read_ptr; i != q->write_ptr;
351 i = iwl_queue_inc_wrap(i, q->n_bd)) {
352 struct iwl_tx_cmd *tx_cmd =
353 (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
354 IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
355 get_unaligned_le32(&tx_cmd->scratch));
356 }
327 357
328 iwl_op_mode_nic_error(trans->op_mode); 358 iwl_op_mode_nic_error(trans->op_mode);
329} 359}
330 360
331static int iwl_trans_txq_alloc(struct iwl_trans *trans, 361static int iwl_trans_txq_alloc(struct iwl_trans *trans,
332 struct iwl_tx_queue *txq, int slots_num, 362 struct iwl_tx_queue *txq, int slots_num,
333 u32 txq_id) 363 u32 txq_id)
334{ 364{
365 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
335 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; 366 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
336 int i; 367 int i;
337 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
338 368
339 if (WARN_ON(txq->entries || txq->tfds)) 369 if (WARN_ON(txq->entries || txq->tfds))
340 return -EINVAL; 370 return -EINVAL;
@@ -435,7 +465,7 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
435 465
436 spin_lock_bh(&txq->lock); 466 spin_lock_bh(&txq->lock);
437 while (q->write_ptr != q->read_ptr) { 467 while (q->write_ptr != q->read_ptr) {
438 iwlagn_txq_free_tfd(trans, txq, dma_dir); 468 iwl_txq_free_tfd(trans, txq, dma_dir);
439 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 469 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
440 } 470 }
441 spin_unlock_bh(&txq->lock); 471 spin_unlock_bh(&txq->lock);
@@ -455,6 +485,7 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
455 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; 485 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
456 struct device *dev = trans->dev; 486 struct device *dev = trans->dev;
457 int i; 487 int i;
488
458 if (WARN_ON(!txq)) 489 if (WARN_ON(!txq))
459 return; 490 return;
460 491
@@ -574,11 +605,11 @@ error:
574} 605}
575static int iwl_tx_init(struct iwl_trans *trans) 606static int iwl_tx_init(struct iwl_trans *trans)
576{ 607{
608 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
577 int ret; 609 int ret;
578 int txq_id, slots_num; 610 int txq_id, slots_num;
579 unsigned long flags; 611 unsigned long flags;
580 bool alloc = false; 612 bool alloc = false;
581 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
582 613
583 if (!trans_pcie->txq) { 614 if (!trans_pcie->txq) {
584 ret = iwl_trans_tx_alloc(trans); 615 ret = iwl_trans_tx_alloc(trans);
@@ -643,10 +674,9 @@ static void iwl_set_pwr_vmain(struct iwl_trans *trans)
643 674
644static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans) 675static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
645{ 676{
677 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
646 int pos; 678 int pos;
647 u16 pci_lnk_ctl; 679 u16 pci_lnk_ctl;
648 struct iwl_trans_pcie *trans_pcie =
649 IWL_TRANS_GET_PCIE_TRANS(trans);
650 680
651 struct pci_dev *pci_dev = trans_pcie->pci_dev; 681 struct pci_dev *pci_dev = trans_pcie->pci_dev;
652 682
@@ -700,14 +730,14 @@ static int iwl_apm_init(struct iwl_trans *trans)
700 730
701 /* Disable L0S exit timer (platform NMI Work/Around) */ 731 /* Disable L0S exit timer (platform NMI Work/Around) */
702 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 732 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
703 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 733 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
704 734
705 /* 735 /*
706 * Disable L0s without affecting L1; 736 * Disable L0s without affecting L1;
707 * don't wait for ICH L0s (ICH bug W/A) 737 * don't wait for ICH L0s (ICH bug W/A)
708 */ 738 */
709 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 739 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
710 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 740 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
711 741
712 /* Set FH wait threshold to maximum (HW error during stress W/A) */ 742 /* Set FH wait threshold to maximum (HW error during stress W/A) */
713 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); 743 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
@@ -717,7 +747,7 @@ static int iwl_apm_init(struct iwl_trans *trans)
717 * wake device's PCI Express link L1a -> L0s 747 * wake device's PCI Express link L1a -> L0s
718 */ 748 */
719 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 749 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
720 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 750 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
721 751
722 iwl_apm_config(trans); 752 iwl_apm_config(trans);
723 753
@@ -738,8 +768,8 @@ static int iwl_apm_init(struct iwl_trans *trans)
738 * and accesses to uCode SRAM. 768 * and accesses to uCode SRAM.
739 */ 769 */
740 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 770 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
741 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 771 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
742 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 772 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
743 if (ret < 0) { 773 if (ret < 0) {
744 IWL_DEBUG_INFO(trans, "Failed to init the card\n"); 774 IWL_DEBUG_INFO(trans, "Failed to init the card\n");
745 goto out; 775 goto out;
@@ -773,8 +803,8 @@ static int iwl_apm_stop_master(struct iwl_trans *trans)
773 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 803 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
774 804
775 ret = iwl_poll_bit(trans, CSR_RESET, 805 ret = iwl_poll_bit(trans, CSR_RESET,
776 CSR_RESET_REG_FLAG_MASTER_DISABLED, 806 CSR_RESET_REG_FLAG_MASTER_DISABLED,
777 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 807 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
778 if (ret) 808 if (ret)
779 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); 809 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
780 810
@@ -816,8 +846,7 @@ static int iwl_nic_init(struct iwl_trans *trans)
816 iwl_apm_init(trans); 846 iwl_apm_init(trans);
817 847
818 /* Set interrupt coalescing calibration timer to default (512 usecs) */ 848 /* Set interrupt coalescing calibration timer to default (512 usecs) */
819 iwl_write8(trans, CSR_INT_COALESCING, 849 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
820 IWL_HOST_INT_CALIB_TIMEOUT_DEF);
821 850
822 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 851 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
823 852
@@ -836,8 +865,8 @@ static int iwl_nic_init(struct iwl_trans *trans)
836 865
837 if (trans->cfg->base_params->shadow_reg_enable) { 866 if (trans->cfg->base_params->shadow_reg_enable) {
838 /* enable shadow regs in HW */ 867 /* enable shadow regs in HW */
839 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 868 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
840 0x800FFFFF); 869 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
841 } 870 }
842 871
843 return 0; 872 return 0;
@@ -851,13 +880,13 @@ static int iwl_set_hw_ready(struct iwl_trans *trans)
851 int ret; 880 int ret;
852 881
853 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 882 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
854 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); 883 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
855 884
856 /* See if we got it */ 885 /* See if we got it */
857 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, 886 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
858 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 887 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
859 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 888 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
860 HW_READY_TIMEOUT); 889 HW_READY_TIMEOUT);
861 890
862 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); 891 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
863 return ret; 892 return ret;
@@ -877,11 +906,11 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
877 906
878 /* If HW is not ready, prepare the conditions to check again */ 907 /* If HW is not ready, prepare the conditions to check again */
879 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 908 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
880 CSR_HW_IF_CONFIG_REG_PREPARE); 909 CSR_HW_IF_CONFIG_REG_PREPARE);
881 910
882 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, 911 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
883 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 912 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
884 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); 913 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
885 914
886 if (ret < 0) 915 if (ret < 0)
887 return ret; 916 return ret;
@@ -908,32 +937,33 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
908 trans_pcie->ucode_write_complete = false; 937 trans_pcie->ucode_write_complete = false;
909 938
910 iwl_write_direct32(trans, 939 iwl_write_direct32(trans,
911 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 940 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
912 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); 941 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
913 942
914 iwl_write_direct32(trans, 943 iwl_write_direct32(trans,
915 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr); 944 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
945 dst_addr);
916 946
917 iwl_write_direct32(trans, 947 iwl_write_direct32(trans,
918 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), 948 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
919 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 949 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
920 950
921 iwl_write_direct32(trans, 951 iwl_write_direct32(trans,
922 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), 952 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
923 (iwl_get_dma_hi_addr(phy_addr) 953 (iwl_get_dma_hi_addr(phy_addr)
924 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); 954 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
925 955
926 iwl_write_direct32(trans, 956 iwl_write_direct32(trans,
927 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), 957 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
928 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | 958 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
929 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | 959 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
930 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); 960 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
931 961
932 iwl_write_direct32(trans, 962 iwl_write_direct32(trans,
933 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 963 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
934 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 964 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
935 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 965 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
936 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 966 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
937 967
938 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", 968 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
939 section_num); 969 section_num);
@@ -1016,15 +1046,12 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1016 1046
1017/* 1047/*
1018 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask 1048 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
1019 * must be called under the irq lock and with MAC access
1020 */ 1049 */
1021static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask) 1050static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
1022{ 1051{
1023 struct iwl_trans_pcie __maybe_unused *trans_pcie = 1052 struct iwl_trans_pcie __maybe_unused *trans_pcie =
1024 IWL_TRANS_GET_PCIE_TRANS(trans); 1053 IWL_TRANS_GET_PCIE_TRANS(trans);
1025 1054
1026 lockdep_assert_held(&trans_pcie->irq_lock);
1027
1028 iwl_write_prph(trans, SCD_TXFACT, mask); 1055 iwl_write_prph(trans, SCD_TXFACT, mask);
1029} 1056}
1030 1057
@@ -1032,11 +1059,12 @@ static void iwl_tx_start(struct iwl_trans *trans)
1032{ 1059{
1033 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1060 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1034 u32 a; 1061 u32 a;
1035 unsigned long flags;
1036 int i, chan; 1062 int i, chan;
1037 u32 reg_val; 1063 u32 reg_val;
1038 1064
1039 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 1065 /* make sure all queue are not stopped/used */
1066 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
1067 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
1040 1068
1041 trans_pcie->scd_base_addr = 1069 trans_pcie->scd_base_addr =
1042 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); 1070 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
@@ -1063,64 +1091,30 @@ static void iwl_tx_start(struct iwl_trans *trans)
1063 */ 1091 */
1064 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 1092 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
1065 1093
1094 for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
1095 int fifo = trans_pcie->setup_q_to_fifo[i];
1096
1097 iwl_trans_pcie_txq_enable(trans, i, fifo, IWL_INVALID_STATION,
1098 IWL_TID_NON_QOS, SCD_FRAME_LIMIT, 0);
1099 }
1100
1101 /* Activate all Tx DMA/FIFO channels */
1102 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
1103
1066 /* Enable DMA channel */ 1104 /* Enable DMA channel */
1067 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) 1105 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
1068 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 1106 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
1069 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 1107 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1070 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 1108 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1071 1109
1072 /* Update FH chicken bits */ 1110 /* Update FH chicken bits */
1073 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); 1111 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
1074 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, 1112 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
1075 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 1113 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1076 1114
1077 iwl_write_prph(trans, SCD_QUEUECHAIN_SEL,
1078 SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie));
1079 iwl_write_prph(trans, SCD_AGGR_SEL, 0);
1080
1081 /* initiate the queues */
1082 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
1083 iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0);
1084 iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8));
1085 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
1086 SCD_CONTEXT_QUEUE_OFFSET(i), 0);
1087 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
1088 SCD_CONTEXT_QUEUE_OFFSET(i) +
1089 sizeof(u32),
1090 ((SCD_WIN_SIZE <<
1091 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1092 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1093 ((SCD_FRAME_LIMIT <<
1094 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1095 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1096 }
1097
1098 iwl_write_prph(trans, SCD_INTERRUPT_MASK,
1099 IWL_MASK(0, trans->cfg->base_params->num_of_queues));
1100
1101 /* Activate all Tx DMA/FIFO channels */
1102 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
1103
1104 iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0);
1105
1106 /* make sure all queue are not stopped/used */
1107 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
1108 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
1109
1110 for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
1111 int fifo = trans_pcie->setup_q_to_fifo[i];
1112
1113 set_bit(i, trans_pcie->queue_used);
1114
1115 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
1116 fifo, true);
1117 }
1118
1119 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1120
1121 /* Enable L1-Active */ 1115 /* Enable L1-Active */
1122 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 1116 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
1123 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 1117 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1124} 1118}
1125 1119
1126static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans) 1120static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
@@ -1134,9 +1128,9 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
1134 */ 1128 */
1135static int iwl_trans_tx_stop(struct iwl_trans *trans) 1129static int iwl_trans_tx_stop(struct iwl_trans *trans)
1136{ 1130{
1131 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1137 int ch, txq_id, ret; 1132 int ch, txq_id, ret;
1138 unsigned long flags; 1133 unsigned long flags;
1139 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1140 1134
1141 /* Turn off all Tx DMA fifos */ 1135 /* Turn off all Tx DMA fifos */
1142 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 1136 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
@@ -1148,13 +1142,13 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
1148 iwl_write_direct32(trans, 1142 iwl_write_direct32(trans,
1149 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 1143 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
1150 ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG, 1144 ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
1151 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1145 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
1152 1000);
1153 if (ret < 0) 1146 if (ret < 0)
1154 IWL_ERR(trans, "Failing on timeout while stopping" 1147 IWL_ERR(trans,
1155 " DMA channel %d [0x%08x]", ch, 1148 "Failing on timeout while stopping DMA channel %d [0x%08x]",
1156 iwl_read_direct32(trans, 1149 ch,
1157 FH_TSSR_TX_STATUS_REG)); 1150 iwl_read_direct32(trans,
1151 FH_TSSR_TX_STATUS_REG));
1158 } 1152 }
1159 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 1153 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1160 1154
@@ -1173,8 +1167,8 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
1173 1167
1174static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) 1168static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1175{ 1169{
1176 unsigned long flags;
1177 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1170 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1171 unsigned long flags;
1178 1172
1179 /* tell the device to stop sending interrupts */ 1173 /* tell the device to stop sending interrupts */
1180 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 1174 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
@@ -1204,7 +1198,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1204 1198
1205 /* Make sure (redundant) we've released our request to stay awake */ 1199 /* Make sure (redundant) we've released our request to stay awake */
1206 iwl_clear_bit(trans, CSR_GP_CNTRL, 1200 iwl_clear_bit(trans, CSR_GP_CNTRL,
1207 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1201 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1208 1202
1209 /* Stop the device, and put it in low power state */ 1203 /* Stop the device, and put it in low power state */
1210 iwl_apm_stop(trans); 1204 iwl_apm_stop(trans);
@@ -1273,13 +1267,27 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1273 1267
1274 spin_lock(&txq->lock); 1268 spin_lock(&txq->lock);
1275 1269
1270 /* In AGG mode, the index in the ring must correspond to the WiFi
1271 * sequence number. This is a HW requirements to help the SCD to parse
1272 * the BA.
1273 * Check here that the packets are in the right place on the ring.
1274 */
1275#ifdef CONFIG_IWLWIFI_DEBUG
1276 wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1277 WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
1278 ((wifi_seq & 0xff) != q->write_ptr),
1279 "Q: %d WiFi Seq %d tfdNum %d",
1280 txq_id, wifi_seq, q->write_ptr);
1281#endif
1282
1276 /* Set up driver data for this TFD */ 1283 /* Set up driver data for this TFD */
1277 txq->entries[q->write_ptr].skb = skb; 1284 txq->entries[q->write_ptr].skb = skb;
1278 txq->entries[q->write_ptr].cmd = dev_cmd; 1285 txq->entries[q->write_ptr].cmd = dev_cmd;
1279 1286
1280 dev_cmd->hdr.cmd = REPLY_TX; 1287 dev_cmd->hdr.cmd = REPLY_TX;
1281 dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 1288 dev_cmd->hdr.sequence =
1282 INDEX_TO_SEQ(q->write_ptr))); 1289 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1290 INDEX_TO_SEQ(q->write_ptr)));
1283 1291
1284 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 1292 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1285 out_meta = &txq->entries[q->write_ptr].meta; 1293 out_meta = &txq->entries[q->write_ptr].meta;
@@ -1344,7 +1352,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1344 1352
1345 /* take back ownership of DMA buffer to enable update */ 1353 /* take back ownership of DMA buffer to enable update */
1346 dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen, 1354 dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
1347 DMA_BIDIRECTIONAL); 1355 DMA_BIDIRECTIONAL);
1348 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 1356 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1349 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 1357 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1350 1358
@@ -1356,16 +1364,17 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1356 iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); 1364 iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1357 1365
1358 dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen, 1366 dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
1359 DMA_BIDIRECTIONAL); 1367 DMA_BIDIRECTIONAL);
1360 1368
1361 trace_iwlwifi_dev_tx(trans->dev, 1369 trace_iwlwifi_dev_tx(trans->dev,
1362 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], 1370 &txq->tfds[txq->q.write_ptr],
1363 sizeof(struct iwl_tfd), 1371 sizeof(struct iwl_tfd),
1364 &dev_cmd->hdr, firstlen, 1372 &dev_cmd->hdr, firstlen,
1365 skb->data + hdr_len, secondlen); 1373 skb->data + hdr_len, secondlen);
1366 1374
1367 /* start timer if queue currently empty */ 1375 /* start timer if queue currently empty */
1368 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) 1376 if (txq->need_update && q->read_ptr == q->write_ptr &&
1377 trans_pcie->wd_timeout)
1369 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); 1378 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1370 1379
1371 /* Tell device the write index *just past* this latest filled TFD */ 1380 /* Tell device the write index *just past* this latest filled TFD */
@@ -1395,8 +1404,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1395 1404
1396static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) 1405static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1397{ 1406{
1398 struct iwl_trans_pcie *trans_pcie = 1407 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1399 IWL_TRANS_GET_PCIE_TRANS(trans);
1400 int err; 1408 int err;
1401 bool hw_rfkill; 1409 bool hw_rfkill;
1402 1410
@@ -1409,7 +1417,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1409 iwl_alloc_isr_ict(trans); 1417 iwl_alloc_isr_ict(trans);
1410 1418
1411 err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED, 1419 err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
1412 DRV_NAME, trans); 1420 DRV_NAME, trans);
1413 if (err) { 1421 if (err) {
1414 IWL_ERR(trans, "Error allocating IRQ %d\n", 1422 IWL_ERR(trans, "Error allocating IRQ %d\n",
1415 trans_pcie->irq); 1423 trans_pcie->irq);
@@ -1447,9 +1455,9 @@ error:
1447static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans, 1455static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
1448 bool op_mode_leaving) 1456 bool op_mode_leaving)
1449{ 1457{
1458 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1450 bool hw_rfkill; 1459 bool hw_rfkill;
1451 unsigned long flags; 1460 unsigned long flags;
1452 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1453 1461
1454 iwl_apm_stop(trans); 1462 iwl_apm_stop(trans);
1455 1463
@@ -1553,8 +1561,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1553 1561
1554void iwl_trans_pcie_free(struct iwl_trans *trans) 1562void iwl_trans_pcie_free(struct iwl_trans *trans)
1555{ 1563{
1556 struct iwl_trans_pcie *trans_pcie = 1564 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1557 IWL_TRANS_GET_PCIE_TRANS(trans);
1558 1565
1559 iwl_trans_pcie_tx_free(trans); 1566 iwl_trans_pcie_tx_free(trans);
1560#ifndef CONFIG_IWLWIFI_IDI 1567#ifndef CONFIG_IWLWIFI_IDI
@@ -1569,6 +1576,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
1569 iounmap(trans_pcie->hw_base); 1576 iounmap(trans_pcie->hw_base);
1570 pci_release_regions(trans_pcie->pci_dev); 1577 pci_release_regions(trans_pcie->pci_dev);
1571 pci_disable_device(trans_pcie->pci_dev); 1578 pci_disable_device(trans_pcie->pci_dev);
1579 kmem_cache_destroy(trans->dev_cmd_pool);
1572 1580
1573 kfree(trans); 1581 kfree(trans);
1574} 1582}
@@ -1816,8 +1824,8 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
1816}; 1824};
1817 1825
1818static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, 1826static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1819 char __user *user_buf, 1827 char __user *user_buf,
1820 size_t count, loff_t *ppos) 1828 size_t count, loff_t *ppos)
1821{ 1829{
1822 struct iwl_trans *trans = file->private_data; 1830 struct iwl_trans *trans = file->private_data;
1823 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1831 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1853,11 +1861,11 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1853} 1861}
1854 1862
1855static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, 1863static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1856 char __user *user_buf, 1864 char __user *user_buf,
1857 size_t count, loff_t *ppos) { 1865 size_t count, loff_t *ppos)
1866{
1858 struct iwl_trans *trans = file->private_data; 1867 struct iwl_trans *trans = file->private_data;
1859 struct iwl_trans_pcie *trans_pcie = 1868 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1860 IWL_TRANS_GET_PCIE_TRANS(trans);
1861 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 1869 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
1862 char buf[256]; 1870 char buf[256];
1863 int pos = 0; 1871 int pos = 0;
@@ -1881,11 +1889,10 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1881 1889
1882static ssize_t iwl_dbgfs_interrupt_read(struct file *file, 1890static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1883 char __user *user_buf, 1891 char __user *user_buf,
1884 size_t count, loff_t *ppos) { 1892 size_t count, loff_t *ppos)
1885 1893{
1886 struct iwl_trans *trans = file->private_data; 1894 struct iwl_trans *trans = file->private_data;
1887 struct iwl_trans_pcie *trans_pcie = 1895 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1888 IWL_TRANS_GET_PCIE_TRANS(trans);
1889 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 1896 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1890 1897
1891 int pos = 0; 1898 int pos = 0;
@@ -1943,8 +1950,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1943 size_t count, loff_t *ppos) 1950 size_t count, loff_t *ppos)
1944{ 1951{
1945 struct iwl_trans *trans = file->private_data; 1952 struct iwl_trans *trans = file->private_data;
1946 struct iwl_trans_pcie *trans_pcie = 1953 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1947 IWL_TRANS_GET_PCIE_TRANS(trans);
1948 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 1954 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1949 1955
1950 char buf[8]; 1956 char buf[8];
@@ -1964,8 +1970,8 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1964} 1970}
1965 1971
1966static ssize_t iwl_dbgfs_csr_write(struct file *file, 1972static ssize_t iwl_dbgfs_csr_write(struct file *file,
1967 const char __user *user_buf, 1973 const char __user *user_buf,
1968 size_t count, loff_t *ppos) 1974 size_t count, loff_t *ppos)
1969{ 1975{
1970 struct iwl_trans *trans = file->private_data; 1976 struct iwl_trans *trans = file->private_data;
1971 char buf[8]; 1977 char buf[8];
@@ -1985,8 +1991,8 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
1985} 1991}
1986 1992
1987static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, 1993static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1988 char __user *user_buf, 1994 char __user *user_buf,
1989 size_t count, loff_t *ppos) 1995 size_t count, loff_t *ppos)
1990{ 1996{
1991 struct iwl_trans *trans = file->private_data; 1997 struct iwl_trans *trans = file->private_data;
1992 char *buf; 1998 char *buf;
@@ -2012,7 +2018,9 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
2012 if (!trans->op_mode) 2018 if (!trans->op_mode)
2013 return -EAGAIN; 2019 return -EAGAIN;
2014 2020
2021 local_bh_disable();
2015 iwl_op_mode_nic_error(trans->op_mode); 2022 iwl_op_mode_nic_error(trans->op_mode);
2023 local_bh_enable();
2016 2024
2017 return count; 2025 return count;
2018} 2026}
@@ -2029,7 +2037,7 @@ DEBUGFS_WRITE_FILE_OPS(fw_restart);
2029 * 2037 *
2030 */ 2038 */
2031static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, 2039static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2032 struct dentry *dir) 2040 struct dentry *dir)
2033{ 2041{
2034 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR); 2042 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
2035 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR); 2043 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
@@ -2041,9 +2049,10 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2041} 2049}
2042#else 2050#else
2043static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, 2051static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2044 struct dentry *dir) 2052 struct dentry *dir)
2045{ return 0; } 2053{
2046 2054 return 0;
2055}
2047#endif /*CONFIG_IWLWIFI_DEBUGFS */ 2056#endif /*CONFIG_IWLWIFI_DEBUGFS */
2048 2057
2049static const struct iwl_trans_ops trans_ops_pcie = { 2058static const struct iwl_trans_ops trans_ops_pcie = {
@@ -2060,8 +2069,8 @@ static const struct iwl_trans_ops trans_ops_pcie = {
2060 .tx = iwl_trans_pcie_tx, 2069 .tx = iwl_trans_pcie_tx,
2061 .reclaim = iwl_trans_pcie_reclaim, 2070 .reclaim = iwl_trans_pcie_reclaim,
2062 2071
2063 .tx_agg_disable = iwl_trans_pcie_tx_agg_disable, 2072 .txq_disable = iwl_trans_pcie_txq_disable,
2064 .tx_agg_setup = iwl_trans_pcie_tx_agg_setup, 2073 .txq_enable = iwl_trans_pcie_txq_enable,
2065 2074
2066 .dbgfs_register = iwl_trans_pcie_dbgfs_register, 2075 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
2067 2076
@@ -2084,11 +2093,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2084{ 2093{
2085 struct iwl_trans_pcie *trans_pcie; 2094 struct iwl_trans_pcie *trans_pcie;
2086 struct iwl_trans *trans; 2095 struct iwl_trans *trans;
2096 char cmd_pool_name[100];
2087 u16 pci_cmd; 2097 u16 pci_cmd;
2088 int err; 2098 int err;
2089 2099
2090 trans = kzalloc(sizeof(struct iwl_trans) + 2100 trans = kzalloc(sizeof(struct iwl_trans) +
2091 sizeof(struct iwl_trans_pcie), GFP_KERNEL); 2101 sizeof(struct iwl_trans_pcie), GFP_KERNEL);
2092 2102
2093 if (WARN_ON(!trans)) 2103 if (WARN_ON(!trans))
2094 return NULL; 2104 return NULL;
@@ -2104,7 +2114,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2104 /* W/A - seems to solve weird behavior. We need to remove this if we 2114 /* W/A - seems to solve weird behavior. We need to remove this if we
2105 * don't want to stay in L1 all the time. This wastes a lot of power */ 2115 * don't want to stay in L1 all the time. This wastes a lot of power */
2106 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | 2116 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
2107 PCIE_LINK_STATE_CLKPM); 2117 PCIE_LINK_STATE_CLKPM);
2108 2118
2109 if (pci_enable_device(pdev)) { 2119 if (pci_enable_device(pdev)) {
2110 err = -ENODEV; 2120 err = -ENODEV;
@@ -2120,7 +2130,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2120 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2130 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2121 if (!err) 2131 if (!err)
2122 err = pci_set_consistent_dma_mask(pdev, 2132 err = pci_set_consistent_dma_mask(pdev,
2123 DMA_BIT_MASK(32)); 2133 DMA_BIT_MASK(32));
2124 /* both attempts failed: */ 2134 /* both attempts failed: */
2125 if (err) { 2135 if (err) {
2126 dev_printk(KERN_ERR, &pdev->dev, 2136 dev_printk(KERN_ERR, &pdev->dev,
@@ -2143,13 +2153,13 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2143 } 2153 }
2144 2154
2145 dev_printk(KERN_INFO, &pdev->dev, 2155 dev_printk(KERN_INFO, &pdev->dev,
2146 "pci_resource_len = 0x%08llx\n", 2156 "pci_resource_len = 0x%08llx\n",
2147 (unsigned long long) pci_resource_len(pdev, 0)); 2157 (unsigned long long) pci_resource_len(pdev, 0));
2148 dev_printk(KERN_INFO, &pdev->dev, 2158 dev_printk(KERN_INFO, &pdev->dev,
2149 "pci_resource_base = %p\n", trans_pcie->hw_base); 2159 "pci_resource_base = %p\n", trans_pcie->hw_base);
2150 2160
2151 dev_printk(KERN_INFO, &pdev->dev, 2161 dev_printk(KERN_INFO, &pdev->dev,
2152 "HW Revision ID = 0x%X\n", pdev->revision); 2162 "HW Revision ID = 0x%X\n", pdev->revision);
2153 2163
2154 /* We disable the RETRY_TIMEOUT register (0x41) to keep 2164 /* We disable the RETRY_TIMEOUT register (0x41) to keep
2155 * PCI Tx retries from interfering with C3 CPU state */ 2165 * PCI Tx retries from interfering with C3 CPU state */
@@ -2158,7 +2168,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2158 err = pci_enable_msi(pdev); 2168 err = pci_enable_msi(pdev);
2159 if (err) 2169 if (err)
2160 dev_printk(KERN_ERR, &pdev->dev, 2170 dev_printk(KERN_ERR, &pdev->dev,
2161 "pci_enable_msi failed(0X%x)", err); 2171 "pci_enable_msi failed(0X%x)", err);
2162 2172
2163 trans->dev = &pdev->dev; 2173 trans->dev = &pdev->dev;
2164 trans_pcie->irq = pdev->irq; 2174 trans_pcie->irq = pdev->irq;
@@ -2180,8 +2190,25 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2180 init_waitqueue_head(&trans->wait_command_queue); 2190 init_waitqueue_head(&trans->wait_command_queue);
2181 spin_lock_init(&trans->reg_lock); 2191 spin_lock_init(&trans->reg_lock);
2182 2192
2193 snprintf(cmd_pool_name, sizeof(cmd_pool_name), "iwl_cmd_pool:%s",
2194 dev_name(trans->dev));
2195
2196 trans->dev_cmd_headroom = 0;
2197 trans->dev_cmd_pool =
2198 kmem_cache_create(cmd_pool_name,
2199 sizeof(struct iwl_device_cmd)
2200 + trans->dev_cmd_headroom,
2201 sizeof(void *),
2202 SLAB_HWCACHE_ALIGN,
2203 NULL);
2204
2205 if (!trans->dev_cmd_pool)
2206 goto out_pci_disable_msi;
2207
2183 return trans; 2208 return trans;
2184 2209
2210out_pci_disable_msi:
2211 pci_disable_msi(pdev);
2185out_pci_release_regions: 2212out_pci_release_regions:
2186 pci_release_regions(pdev); 2213 pci_release_regions(pdev);
2187out_pci_disable_device: 2214out_pci_disable_device:
@@ -2190,4 +2217,3 @@ out_no_pci:
2190 kfree(trans); 2217 kfree(trans);
2191 return NULL; 2218 return NULL;
2192} 2219}
2193
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index a8750238ee0..6baf8deef51 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -34,11 +34,10 @@
34#include "iwl-csr.h" 34#include "iwl-csr.h"
35#include "iwl-prph.h" 35#include "iwl-prph.h"
36#include "iwl-io.h" 36#include "iwl-io.h"
37#include "iwl-agn-hw.h"
38#include "iwl-op-mode.h" 37#include "iwl-op-mode.h"
39#include "iwl-trans-pcie-int.h" 38#include "internal.h"
40/* FIXME: need to abstract out TX command (once we know what it looks like) */ 39/* FIXME: need to abstract out TX command (once we know what it looks like) */
41#include "iwl-commands.h" 40#include "dvm/commands.h"
42 41
43#define IWL_TX_CRC_SIZE 4 42#define IWL_TX_CRC_SIZE 4
44#define IWL_TX_DELIMITER_SIZE 4 43#define IWL_TX_DELIMITER_SIZE 4
@@ -47,12 +46,11 @@
47 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 46 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
48 */ 47 */
49void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, 48void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
50 struct iwl_tx_queue *txq, 49 struct iwl_tx_queue *txq,
51 u16 byte_cnt) 50 u16 byte_cnt)
52{ 51{
53 struct iwlagn_scd_bc_tbl *scd_bc_tbl; 52 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
54 struct iwl_trans_pcie *trans_pcie = 53 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
55 IWL_TRANS_GET_PCIE_TRANS(trans);
56 int write_ptr = txq->q.write_ptr; 54 int write_ptr = txq->q.write_ptr;
57 int txq_id = txq->q.id; 55 int txq_id = txq->q.id;
58 u8 sec_ctl = 0; 56 u8 sec_ctl = 0;
@@ -178,8 +176,8 @@ static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
178 return tfd->num_tbs & 0x1f; 176 return tfd->num_tbs & 0x1f;
179} 177}
180 178
181static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta, 179static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
182 struct iwl_tfd *tfd, enum dma_data_direction dma_dir) 180 struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
183{ 181{
184 int i; 182 int i;
185 int num_tbs; 183 int num_tbs;
@@ -209,7 +207,7 @@ static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
209} 207}
210 208
211/** 209/**
212 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 210 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
213 * @trans - transport private data 211 * @trans - transport private data
214 * @txq - tx queue 212 * @txq - tx queue
215 * @dma_dir - the direction of the DMA mapping 213 * @dma_dir - the direction of the DMA mapping
@@ -217,8 +215,8 @@ static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
217 * Does NOT advance any TFD circular buffer read/write indexes 215 * Does NOT advance any TFD circular buffer read/write indexes
218 * Does NOT free the TFD itself (which is within circular buffer) 216 * Does NOT free the TFD itself (which is within circular buffer)
219 */ 217 */
220void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, 218void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
221 enum dma_data_direction dma_dir) 219 enum dma_data_direction dma_dir)
222{ 220{
223 struct iwl_tfd *tfd_tmp = txq->tfds; 221 struct iwl_tfd *tfd_tmp = txq->tfds;
224 222
@@ -229,8 +227,8 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
229 lockdep_assert_held(&txq->lock); 227 lockdep_assert_held(&txq->lock);
230 228
231 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ 229 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
232 iwlagn_unmap_tfd(trans, &txq->entries[idx].meta, 230 iwl_unmap_tfd(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
233 &tfd_tmp[rd_ptr], dma_dir); 231 dma_dir);
234 232
235 /* free SKB */ 233 /* free SKB */
236 if (txq->entries) { 234 if (txq->entries) {
@@ -270,7 +268,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
270 /* Each TFD can point to a maximum 20 Tx buffers */ 268 /* Each TFD can point to a maximum 20 Tx buffers */
271 if (num_tbs >= IWL_NUM_OF_TBS) { 269 if (num_tbs >= IWL_NUM_OF_TBS) {
272 IWL_ERR(trans, "Error can not send more than %d chunks\n", 270 IWL_ERR(trans, "Error can not send more than %d chunks\n",
273 IWL_NUM_OF_TBS); 271 IWL_NUM_OF_TBS);
274 return -EINVAL; 272 return -EINVAL;
275 } 273 }
276 274
@@ -279,7 +277,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
279 277
280 if (unlikely(addr & ~IWL_TX_DMA_MASK)) 278 if (unlikely(addr & ~IWL_TX_DMA_MASK))
281 IWL_ERR(trans, "Unaligned address = %llx\n", 279 IWL_ERR(trans, "Unaligned address = %llx\n",
282 (unsigned long long)addr); 280 (unsigned long long)addr);
283 281
284 iwl_tfd_set_tb(tfd, num_tbs, addr, len); 282 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
285 283
@@ -382,16 +380,14 @@ static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
382 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; 380 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
383} 381}
384 382
385static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid, 383static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
386 u16 txq_id) 384 u16 txq_id)
387{ 385{
386 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
388 u32 tbl_dw_addr; 387 u32 tbl_dw_addr;
389 u32 tbl_dw; 388 u32 tbl_dw;
390 u16 scd_q2ratid; 389 u16 scd_q2ratid;
391 390
392 struct iwl_trans_pcie *trans_pcie =
393 IWL_TRANS_GET_PCIE_TRANS(trans);
394
395 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 391 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
396 392
397 tbl_dw_addr = trans_pcie->scd_base_addr + 393 tbl_dw_addr = trans_pcie->scd_base_addr +
@@ -409,7 +405,7 @@ static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
409 return 0; 405 return 0;
410} 406}
411 407
412static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id) 408static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id)
413{ 409{
414 /* Simply stop the queue, but don't change any configuration; 410 /* Simply stop the queue, but don't change any configuration;
415 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ 411 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
@@ -419,102 +415,87 @@ static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
419 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 415 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
420} 416}
421 417
422void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, 418void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
423 int txq_id, u32 index) 419 int sta_id, int tid, int frame_limit, u16 ssn)
424{
425 IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d\n", txq_id, index & 0xff);
426 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
427 (index & 0xff) | (txq_id << 8));
428 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
429}
430
431void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
432 struct iwl_tx_queue *txq,
433 int tx_fifo_id, bool active)
434{
435 int txq_id = txq->q.id;
436
437 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
438 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
439 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
440 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
441 SCD_QUEUE_STTS_REG_MSK);
442
443 if (active)
444 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d\n",
445 txq_id, tx_fifo_id);
446 else
447 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
448}
449
450void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int txq_id, int fifo,
451 int sta_id, int tid, int frame_limit, u16 ssn)
452{ 420{
453 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 421 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
454 unsigned long flags;
455 u16 ra_tid = BUILD_RAxTID(sta_id, tid);
456 422
457 if (test_and_set_bit(txq_id, trans_pcie->queue_used)) 423 if (test_and_set_bit(txq_id, trans_pcie->queue_used))
458 WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 424 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
459 425
460 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
461
462 /* Stop this Tx queue before configuring it */ 426 /* Stop this Tx queue before configuring it */
463 iwlagn_tx_queue_stop_scheduler(trans, txq_id); 427 iwl_txq_set_inactive(trans, txq_id);
464 428
465 /* Map receiver-address / traffic-ID to this queue */ 429 /* Set this queue as a chain-building queue unless it is CMD queue */
466 iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id); 430 if (txq_id != trans_pcie->cmd_queue)
431 iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
467 432
468 /* Set this queue as a chain-building queue */ 433 /* If this queue is mapped to a certain station: it is an AGG queue */
469 iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id)); 434 if (sta_id != IWL_INVALID_STATION) {
435 u16 ra_tid = BUILD_RAxTID(sta_id, tid);
470 436
471 /* enable aggregations for the queue */ 437 /* Map receiver-address / traffic-ID to this queue */
472 iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); 438 iwl_txq_set_ratid_map(trans, ra_tid, txq_id);
439
440 /* enable aggregations for the queue */
441 iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
442 } else {
443 /*
444 * disable aggregations for the queue, this will also make the
445 * ra_tid mapping configuration irrelevant since it is now a
446 * non-AGG queue.
447 */
448 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
449 }
473 450
474 /* Place first TFD at index corresponding to start sequence number. 451 /* Place first TFD at index corresponding to start sequence number.
475 * Assumes that ssn_idx is valid (!= 0xFFF) */ 452 * Assumes that ssn_idx is valid (!= 0xFFF) */
476 trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff); 453 trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
477 trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff); 454 trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
478 iwl_trans_set_wr_ptrs(trans, txq_id, ssn); 455
456 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
457 (ssn & 0xff) | (txq_id << 8));
458 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
479 459
480 /* Set up Tx window size and frame limit for this queue */ 460 /* Set up Tx window size and frame limit for this queue */
481 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + 461 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
462 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
463 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
482 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 464 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
483 ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 465 ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
484 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 466 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
485 ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 467 ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
486 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 468 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
487 469
488 iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
489
490 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ 470 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
491 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 471 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
492 fifo, true); 472 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
493 473 (fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
494 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 474 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
475 SCD_QUEUE_STTS_REG_MSK);
476 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
477 txq_id, fifo, ssn & 0xff);
495} 478}
496 479
497void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int txq_id) 480void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
498{ 481{
499 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 482 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
483 u16 rd_ptr, wr_ptr;
484 int n_bd = trans_pcie->txq[txq_id].q.n_bd;
500 485
501 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { 486 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
502 WARN_ONCE(1, "queue %d not used", txq_id); 487 WARN_ONCE(1, "queue %d not used", txq_id);
503 return; 488 return;
504 } 489 }
505 490
506 iwlagn_tx_queue_stop_scheduler(trans, txq_id); 491 rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1);
507 492 wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id));
508 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
509 493
510 trans_pcie->txq[txq_id].q.read_ptr = 0; 494 WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]",
511 trans_pcie->txq[txq_id].q.write_ptr = 0; 495 txq_id, rd_ptr, wr_ptr);
512 iwl_trans_set_wr_ptrs(trans, txq_id, 0);
513 496
514 iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, BIT(txq_id)); 497 iwl_txq_set_inactive(trans, txq_id);
515 498 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
516 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
517 0, false);
518} 499}
519 500
520/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 501/*************** HOST COMMAND QUEUE FUNCTIONS *****/
@@ -615,13 +596,13 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
615 } 596 }
616 597
617 IWL_DEBUG_HC(trans, 598 IWL_DEBUG_HC(trans,
618 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 599 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
619 trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd), 600 trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
620 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size, 601 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
621 q->write_ptr, idx, trans_pcie->cmd_queue); 602 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
622 603
623 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size, 604 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
624 DMA_BIDIRECTIONAL); 605 DMA_BIDIRECTIONAL);
625 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { 606 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
626 idx = -ENOMEM; 607 idx = -ENOMEM;
627 goto out; 608 goto out;
@@ -630,8 +611,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
630 dma_unmap_addr_set(out_meta, mapping, phys_addr); 611 dma_unmap_addr_set(out_meta, mapping, phys_addr);
631 dma_unmap_len_set(out_meta, len, copy_size); 612 dma_unmap_len_set(out_meta, len, copy_size);
632 613
633 iwlagn_txq_attach_buf_to_tfd(trans, txq, 614 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
634 phys_addr, copy_size, 1);
635#ifdef CONFIG_IWLWIFI_DEVICE_TRACING 615#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
636 trace_bufs[0] = &out_cmd->hdr; 616 trace_bufs[0] = &out_cmd->hdr;
637 trace_lens[0] = copy_size; 617 trace_lens[0] = copy_size;
@@ -643,13 +623,12 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
643 continue; 623 continue;
644 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)) 624 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
645 continue; 625 continue;
646 phys_addr = dma_map_single(trans->dev, 626 phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i],
647 (void *)cmd->data[i],
648 cmd->len[i], DMA_BIDIRECTIONAL); 627 cmd->len[i], DMA_BIDIRECTIONAL);
649 if (dma_mapping_error(trans->dev, phys_addr)) { 628 if (dma_mapping_error(trans->dev, phys_addr)) {
650 iwlagn_unmap_tfd(trans, out_meta, 629 iwl_unmap_tfd(trans, out_meta,
651 &txq->tfds[q->write_ptr], 630 &txq->tfds[q->write_ptr],
652 DMA_BIDIRECTIONAL); 631 DMA_BIDIRECTIONAL);
653 idx = -ENOMEM; 632 idx = -ENOMEM;
654 goto out; 633 goto out;
655 } 634 }
@@ -723,9 +702,10 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
723 lockdep_assert_held(&txq->lock); 702 lockdep_assert_held(&txq->lock);
724 703
725 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) { 704 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
726 IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), " 705 IWL_ERR(trans,
727 "index %d is out of range [0-%d] %d %d.\n", __func__, 706 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
728 txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr); 707 __func__, txq_id, idx, q->n_bd,
708 q->write_ptr, q->read_ptr);
729 return; 709 return;
730 } 710 }
731 711
@@ -733,8 +713,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
733 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 713 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
734 714
735 if (nfreed++ > 0) { 715 if (nfreed++ > 0) {
736 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx, 716 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
737 q->write_ptr, q->read_ptr); 717 idx, q->write_ptr, q->read_ptr);
738 iwl_op_mode_nic_error(trans->op_mode); 718 iwl_op_mode_nic_error(trans->op_mode);
739 } 719 }
740 720
@@ -771,9 +751,9 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
771 * in the queue management code. */ 751 * in the queue management code. */
772 if (WARN(txq_id != trans_pcie->cmd_queue, 752 if (WARN(txq_id != trans_pcie->cmd_queue,
773 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 753 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
774 txq_id, trans_pcie->cmd_queue, sequence, 754 txq_id, trans_pcie->cmd_queue, sequence,
775 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr, 755 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
776 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) { 756 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
777 iwl_print_hex_error(trans, pkt, 32); 757 iwl_print_hex_error(trans, pkt, 32);
778 return; 758 return;
779 } 759 }
@@ -784,8 +764,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
784 cmd = txq->entries[cmd_index].cmd; 764 cmd = txq->entries[cmd_index].cmd;
785 meta = &txq->entries[cmd_index].meta; 765 meta = &txq->entries[cmd_index].meta;
786 766
787 iwlagn_unmap_tfd(trans, meta, &txq->tfds[index], 767 iwl_unmap_tfd(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
788 DMA_BIDIRECTIONAL);
789 768
790 /* Input error checking is done when commands are added to queue. */ 769 /* Input error checking is done when commands are added to queue. */
791 if (meta->flags & CMD_WANT_SKB) { 770 if (meta->flags & CMD_WANT_SKB) {
@@ -870,8 +849,9 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
870 } 849 }
871 850
872 ret = wait_event_timeout(trans->wait_command_queue, 851 ret = wait_event_timeout(trans->wait_command_queue,
873 !test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status), 852 !test_bit(STATUS_HCMD_ACTIVE,
874 HOST_COMPLETE_TIMEOUT); 853 &trans_pcie->status),
854 HOST_COMPLETE_TIMEOUT);
875 if (!ret) { 855 if (!ret) {
876 if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { 856 if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
877 struct iwl_tx_queue *txq = 857 struct iwl_tx_queue *txq =
@@ -956,10 +936,10 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
956 936
957 if ((index >= q->n_bd) || 937 if ((index >= q->n_bd) ||
958 (iwl_queue_used(q, last_to_free) == 0)) { 938 (iwl_queue_used(q, last_to_free) == 0)) {
959 IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), " 939 IWL_ERR(trans,
960 "last_to_free %d is out of range [0-%d] %d %d.\n", 940 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
961 __func__, txq_id, last_to_free, q->n_bd, 941 __func__, txq_id, last_to_free, q->n_bd,
962 q->write_ptr, q->read_ptr); 942 q->write_ptr, q->read_ptr);
963 return 0; 943 return 0;
964 } 944 }
965 945
@@ -979,7 +959,7 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
979 959
980 iwlagn_txq_inval_byte_cnt_tbl(trans, txq); 960 iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
981 961
982 iwlagn_txq_free_tfd(trans, txq, DMA_TO_DEVICE); 962 iwl_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
983 freed++; 963 freed++;
984 } 964 }
985 965
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 2fa879b015b..f4a203049fb 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -435,24 +435,40 @@ static int lbs_add_wpa_tlv(u8 *tlv, const u8 *ie, u8 ie_len)
435 * Set Channel 435 * Set Channel
436 */ 436 */
437 437
438static int lbs_cfg_set_channel(struct wiphy *wiphy, 438static int lbs_cfg_set_monitor_channel(struct wiphy *wiphy,
439 struct net_device *netdev, 439 struct ieee80211_channel *channel,
440 struct ieee80211_channel *channel, 440 enum nl80211_channel_type channel_type)
441 enum nl80211_channel_type channel_type)
442{ 441{
443 struct lbs_private *priv = wiphy_priv(wiphy); 442 struct lbs_private *priv = wiphy_priv(wiphy);
444 int ret = -ENOTSUPP; 443 int ret = -ENOTSUPP;
445 444
446 lbs_deb_enter_args(LBS_DEB_CFG80211, "iface %s freq %d, type %d", 445 lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d",
447 netdev_name(netdev), channel->center_freq, channel_type); 446 channel->center_freq, channel_type);
448 447
449 if (channel_type != NL80211_CHAN_NO_HT) 448 if (channel_type != NL80211_CHAN_NO_HT)
450 goto out; 449 goto out;
451 450
452 if (netdev == priv->mesh_dev) 451 ret = lbs_set_channel(priv, channel->hw_value);
453 ret = lbs_mesh_set_channel(priv, channel->hw_value); 452
454 else 453 out:
455 ret = lbs_set_channel(priv, channel->hw_value); 454 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
455 return ret;
456}
457
458static int lbs_cfg_set_mesh_channel(struct wiphy *wiphy,
459 struct net_device *netdev,
460 struct ieee80211_channel *channel)
461{
462 struct lbs_private *priv = wiphy_priv(wiphy);
463 int ret = -ENOTSUPP;
464
465 lbs_deb_enter_args(LBS_DEB_CFG80211, "iface %s freq %d",
466 netdev_name(netdev), channel->center_freq);
467
468 if (netdev != priv->mesh_dev)
469 goto out;
470
471 ret = lbs_mesh_set_channel(priv, channel->hw_value);
456 472
457 out: 473 out:
458 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); 474 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
@@ -2029,7 +2045,8 @@ static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
2029 */ 2045 */
2030 2046
2031static struct cfg80211_ops lbs_cfg80211_ops = { 2047static struct cfg80211_ops lbs_cfg80211_ops = {
2032 .set_channel = lbs_cfg_set_channel, 2048 .set_monitor_channel = lbs_cfg_set_monitor_channel,
2049 .libertas_set_mesh_channel = lbs_cfg_set_mesh_channel,
2033 .scan = lbs_cfg_scan, 2050 .scan = lbs_cfg_scan,
2034 .connect = lbs_cfg_connect, 2051 .connect = lbs_cfg_connect,
2035 .disconnect = lbs_cfg_disconnect, 2052 .disconnect = lbs_cfg_disconnect,
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index a06cc283e23..668dd27616a 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -483,7 +483,7 @@ static ssize_t lbs_rdmac_write(struct file *file,
483 res = -EFAULT; 483 res = -EFAULT;
484 goto out_unlock; 484 goto out_unlock;
485 } 485 }
486 priv->mac_offset = simple_strtoul((char *)buf, NULL, 16); 486 priv->mac_offset = simple_strtoul(buf, NULL, 16);
487 res = count; 487 res = count;
488out_unlock: 488out_unlock:
489 free_page(addr); 489 free_page(addr);
@@ -565,7 +565,7 @@ static ssize_t lbs_rdbbp_write(struct file *file,
565 res = -EFAULT; 565 res = -EFAULT;
566 goto out_unlock; 566 goto out_unlock;
567 } 567 }
568 priv->bbp_offset = simple_strtoul((char *)buf, NULL, 16); 568 priv->bbp_offset = simple_strtoul(buf, NULL, 16);
569 res = count; 569 res = count;
570out_unlock: 570out_unlock:
571 free_page(addr); 571 free_page(addr);
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 672005430ac..60996ce89f7 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -58,6 +58,7 @@ struct lbs_private {
58 uint16_t mesh_tlv; 58 uint16_t mesh_tlv;
59 u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1]; 59 u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1];
60 u8 mesh_ssid_len; 60 u8 mesh_ssid_len;
61 u8 mesh_channel;
61#endif 62#endif
62 63
63 /* Debugfs */ 64 /* Debugfs */
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index cd3b0d40061..64b7dc5de12 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -302,7 +302,7 @@ error:
302static void if_usb_disconnect(struct usb_interface *intf) 302static void if_usb_disconnect(struct usb_interface *intf)
303{ 303{
304 struct if_usb_card *cardp = usb_get_intfdata(intf); 304 struct if_usb_card *cardp = usb_get_intfdata(intf);
305 struct lbs_private *priv = (struct lbs_private *) cardp->priv; 305 struct lbs_private *priv = cardp->priv;
306 306
307 lbs_deb_enter(LBS_DEB_MAIN); 307 lbs_deb_enter(LBS_DEB_MAIN);
308 308
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index e87c031b298..97807751ebc 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -131,16 +131,13 @@ static int lbs_mesh_config(struct lbs_private *priv, uint16_t action,
131 131
132int lbs_mesh_set_channel(struct lbs_private *priv, u8 channel) 132int lbs_mesh_set_channel(struct lbs_private *priv, u8 channel)
133{ 133{
134 priv->mesh_channel = channel;
134 return lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, channel); 135 return lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, channel);
135} 136}
136 137
137static uint16_t lbs_mesh_get_channel(struct lbs_private *priv) 138static uint16_t lbs_mesh_get_channel(struct lbs_private *priv)
138{ 139{
139 struct wireless_dev *mesh_wdev = priv->mesh_dev->ieee80211_ptr; 140 return priv->mesh_channel ?: 1;
140 if (mesh_wdev->channel)
141 return mesh_wdev->channel->hw_value;
142 else
143 return 1;
144} 141}
145 142
146/*************************************************************************** 143/***************************************************************************
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 19a5a92dd77..d576dd6665d 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -253,7 +253,7 @@ lbtf_deb_leave(LBTF_DEB_MAIN);
253static void if_usb_disconnect(struct usb_interface *intf) 253static void if_usb_disconnect(struct usb_interface *intf)
254{ 254{
255 struct if_usb_card *cardp = usb_get_intfdata(intf); 255 struct if_usb_card *cardp = usb_get_intfdata(intf);
256 struct lbtf_private *priv = (struct lbtf_private *) cardp->priv; 256 struct lbtf_private *priv = cardp->priv;
257 257
258 lbtf_deb_enter(LBTF_DEB_MAIN); 258 lbtf_deb_enter(LBTF_DEB_MAIN);
259 259
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index a0b7cfd3468..f578d0b2172 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -571,7 +571,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
571 skb_dequeue(&data->pending); 571 skb_dequeue(&data->pending);
572 } 572 }
573 573
574 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 574 skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
575 if (skb == NULL) 575 if (skb == NULL)
576 goto nla_put_failure; 576 goto nla_put_failure;
577 577
@@ -678,8 +678,7 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
678 continue; 678 continue;
679 679
680 if (data2->idle || !data2->started || 680 if (data2->idle || !data2->started ||
681 !hwsim_ps_rx_ok(data2, skb) || 681 !hwsim_ps_rx_ok(data2, skb) || !data2->channel ||
682 !data->channel || !data2->channel ||
683 data->channel->center_freq != data2->channel->center_freq || 682 data->channel->center_freq != data2->channel->center_freq ||
684 !(data->group & data2->group)) 683 !(data->group & data2->group))
685 continue; 684 continue;
@@ -1486,7 +1485,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
1486 struct mac80211_hwsim_data *data2; 1485 struct mac80211_hwsim_data *data2;
1487 struct ieee80211_tx_info *txi; 1486 struct ieee80211_tx_info *txi;
1488 struct hwsim_tx_rate *tx_attempts; 1487 struct hwsim_tx_rate *tx_attempts;
1489 struct sk_buff __user *ret_skb; 1488 unsigned long ret_skb_ptr;
1490 struct sk_buff *skb, *tmp; 1489 struct sk_buff *skb, *tmp;
1491 struct mac_address *src; 1490 struct mac_address *src;
1492 unsigned int hwsim_flags; 1491 unsigned int hwsim_flags;
@@ -1504,8 +1503,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
1504 info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER]); 1503 info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER]);
1505 hwsim_flags = nla_get_u32(info->attrs[HWSIM_ATTR_FLAGS]); 1504 hwsim_flags = nla_get_u32(info->attrs[HWSIM_ATTR_FLAGS]);
1506 1505
1507 ret_skb = (struct sk_buff __user *) 1506 ret_skb_ptr = nla_get_u64(info->attrs[HWSIM_ATTR_COOKIE]);
1508 (unsigned long) nla_get_u64(info->attrs[HWSIM_ATTR_COOKIE]);
1509 1507
1510 data2 = get_hwsim_data_ref_from_addr(src); 1508 data2 = get_hwsim_data_ref_from_addr(src);
1511 1509
@@ -1514,7 +1512,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
1514 1512
1515 /* look for the skb matching the cookie passed back from user */ 1513 /* look for the skb matching the cookie passed back from user */
1516 skb_queue_walk_safe(&data2->pending, skb, tmp) { 1514 skb_queue_walk_safe(&data2->pending, skb, tmp) {
1517 if (skb == ret_skb) { 1515 if ((unsigned long)skb == ret_skb_ptr) {
1518 skb_unlink(skb, &data2->pending); 1516 skb_unlink(skb, &data2->pending);
1519 found = true; 1517 found = true;
1520 break; 1518 break;
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index fe8ebfebcc0..e535c937628 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -101,8 +101,7 @@ int mwifiex_ret_11n_delba(struct mwifiex_private *priv,
101{ 101{
102 int tid; 102 int tid;
103 struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl; 103 struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
104 struct host_cmd_ds_11n_delba *del_ba = 104 struct host_cmd_ds_11n_delba *del_ba = &resp->params.del_ba;
105 (struct host_cmd_ds_11n_delba *) &resp->params.del_ba;
106 uint16_t del_ba_param_set = le16_to_cpu(del_ba->del_ba_param_set); 105 uint16_t del_ba_param_set = le16_to_cpu(del_ba->del_ba_param_set);
107 106
108 tid = del_ba_param_set >> DELBA_TID_POS; 107 tid = del_ba_param_set >> DELBA_TID_POS;
@@ -147,8 +146,7 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
147 struct host_cmd_ds_command *resp) 146 struct host_cmd_ds_command *resp)
148{ 147{
149 int tid; 148 int tid;
150 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = 149 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
151 (struct host_cmd_ds_11n_addba_rsp *) &resp->params.add_ba_rsp;
152 struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl; 150 struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
153 151
154 add_ba_rsp->ssn = cpu_to_le16((le16_to_cpu(add_ba_rsp->ssn)) 152 add_ba_rsp->ssn = cpu_to_le16((le16_to_cpu(add_ba_rsp->ssn))
@@ -412,7 +410,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
412 410
413 memcpy((u8 *) bss_co_2040 + 411 memcpy((u8 *) bss_co_2040 +
414 sizeof(struct mwifiex_ie_types_header), 412 sizeof(struct mwifiex_ie_types_header),
415 (u8 *) bss_desc->bcn_bss_co_2040 + 413 bss_desc->bcn_bss_co_2040 +
416 sizeof(struct ieee_types_header), 414 sizeof(struct ieee_types_header),
417 le16_to_cpu(bss_co_2040->header.len)); 415 le16_to_cpu(bss_co_2040->header.len));
418 416
@@ -426,10 +424,8 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
426 ext_cap->header.type = cpu_to_le16(WLAN_EID_EXT_CAPABILITY); 424 ext_cap->header.type = cpu_to_le16(WLAN_EID_EXT_CAPABILITY);
427 ext_cap->header.len = cpu_to_le16(sizeof(ext_cap->ext_cap)); 425 ext_cap->header.len = cpu_to_le16(sizeof(ext_cap->ext_cap));
428 426
429 memcpy((u8 *) ext_cap + 427 memcpy((u8 *)ext_cap + sizeof(struct mwifiex_ie_types_header),
430 sizeof(struct mwifiex_ie_types_header), 428 bss_desc->bcn_ext_cap + sizeof(struct ieee_types_header),
431 (u8 *) bss_desc->bcn_ext_cap +
432 sizeof(struct ieee_types_header),
433 le16_to_cpu(ext_cap->header.len)); 429 le16_to_cpu(ext_cap->header.len));
434 430
435 *buffer += sizeof(struct mwifiex_ie_types_extcap); 431 *buffer += sizeof(struct mwifiex_ie_types_extcap);
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 77646d777dc..28366e9211f 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -105,8 +105,7 @@ static inline u8 mwifiex_space_avail_for_new_ba_stream(
105 priv = adapter->priv[i]; 105 priv = adapter->priv[i];
106 if (priv) 106 if (priv)
107 ba_stream_num += mwifiex_wmm_list_len( 107 ba_stream_num += mwifiex_wmm_list_len(
108 (struct list_head *) 108 &priv->tx_ba_stream_tbl_ptr);
109 &priv->tx_ba_stream_tbl_ptr);
110 } 109 }
111 110
112 return ((ba_stream_num < 111 return ((ba_stream_num <
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 900ee129e82..591ccd33f83 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -297,9 +297,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
297 */ 297 */
298int mwifiex_cmd_11n_addba_req(struct host_cmd_ds_command *cmd, void *data_buf) 298int mwifiex_cmd_11n_addba_req(struct host_cmd_ds_command *cmd, void *data_buf)
299{ 299{
300 struct host_cmd_ds_11n_addba_req *add_ba_req = 300 struct host_cmd_ds_11n_addba_req *add_ba_req = &cmd->params.add_ba_req;
301 (struct host_cmd_ds_11n_addba_req *)
302 &cmd->params.add_ba_req;
303 301
304 cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_REQ); 302 cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_REQ);
305 cmd->size = cpu_to_le16(sizeof(*add_ba_req) + S_DS_GEN); 303 cmd->size = cpu_to_le16(sizeof(*add_ba_req) + S_DS_GEN);
@@ -321,9 +319,7 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
321 struct host_cmd_ds_11n_addba_req 319 struct host_cmd_ds_11n_addba_req
322 *cmd_addba_req) 320 *cmd_addba_req)
323{ 321{
324 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = 322 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &cmd->params.add_ba_rsp;
325 (struct host_cmd_ds_11n_addba_rsp *)
326 &cmd->params.add_ba_rsp;
327 u8 tid; 323 u8 tid;
328 int win_size; 324 int win_size;
329 uint16_t block_ack_param_set; 325 uint16_t block_ack_param_set;
@@ -368,8 +364,7 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
368 */ 364 */
369int mwifiex_cmd_11n_delba(struct host_cmd_ds_command *cmd, void *data_buf) 365int mwifiex_cmd_11n_delba(struct host_cmd_ds_command *cmd, void *data_buf)
370{ 366{
371 struct host_cmd_ds_11n_delba *del_ba = (struct host_cmd_ds_11n_delba *) 367 struct host_cmd_ds_11n_delba *del_ba = &cmd->params.del_ba;
372 &cmd->params.del_ba;
373 368
374 cmd->command = cpu_to_le16(HostCmd_CMD_11N_DELBA); 369 cmd->command = cpu_to_le16(HostCmd_CMD_11N_DELBA);
375 cmd->size = cpu_to_le16(sizeof(*del_ba) + S_DS_GEN); 370 cmd->size = cpu_to_le16(sizeof(*del_ba) + S_DS_GEN);
@@ -399,8 +394,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
399 int start_win, end_win, win_size; 394 int start_win, end_win, win_size;
400 u16 pkt_index; 395 u16 pkt_index;
401 396
402 tbl = mwifiex_11n_get_rx_reorder_tbl((struct mwifiex_private *) priv, 397 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
403 tid, ta);
404 if (!tbl) { 398 if (!tbl) {
405 if (pkt_type != PKT_TYPE_BAR) 399 if (pkt_type != PKT_TYPE_BAR)
406 mwifiex_process_rx_packet(priv->adapter, payload); 400 mwifiex_process_rx_packet(priv->adapter, payload);
@@ -521,9 +515,7 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
521int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv, 515int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
522 struct host_cmd_ds_command *resp) 516 struct host_cmd_ds_command *resp)
523{ 517{
524 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = 518 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
525 (struct host_cmd_ds_11n_addba_rsp *)
526 &resp->params.add_ba_rsp;
527 int tid, win_size; 519 int tid, win_size;
528 struct mwifiex_rx_reorder_tbl *tbl; 520 struct mwifiex_rx_reorder_tbl *tbl;
529 uint16_t block_ack_param_set; 521 uint16_t block_ack_param_set;
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 5c7fd185373..3af88b8cfcb 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -170,7 +170,9 @@ mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
170 if (!priv->sec_info.wep_enabled) 170 if (!priv->sec_info.wep_enabled)
171 return 0; 171 return 0;
172 172
173 if (mwifiex_set_encode(priv, NULL, 0, key_index, NULL, 0)) { 173 if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) {
174 priv->wep_key_curr_index = key_index;
175 } else if (mwifiex_set_encode(priv, NULL, 0, key_index, NULL, 0)) {
174 wiphy_err(wiphy, "set default Tx key index\n"); 176 wiphy_err(wiphy, "set default Tx key index\n");
175 return -EFAULT; 177 return -EFAULT;
176 } 178 }
@@ -187,9 +189,25 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
187 struct key_params *params) 189 struct key_params *params)
188{ 190{
189 struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev); 191 struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
192 struct mwifiex_wep_key *wep_key;
190 const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 193 const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
191 const u8 *peer_mac = pairwise ? mac_addr : bc_mac; 194 const u8 *peer_mac = pairwise ? mac_addr : bc_mac;
192 195
196 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP &&
197 (params->cipher == WLAN_CIPHER_SUITE_WEP40 ||
198 params->cipher == WLAN_CIPHER_SUITE_WEP104)) {
199 if (params->key && params->key_len) {
200 wep_key = &priv->wep_key[key_index];
201 memset(wep_key, 0, sizeof(struct mwifiex_wep_key));
202 memcpy(wep_key->key_material, params->key,
203 params->key_len);
204 wep_key->key_index = key_index;
205 wep_key->key_length = params->key_len;
206 priv->sec_info.wep_enabled = 1;
207 }
208 return 0;
209 }
210
193 if (mwifiex_set_encode(priv, params->key, params->key_len, 211 if (mwifiex_set_encode(priv, params->key, params->key_len,
194 key_index, peer_mac, 0)) { 212 key_index, peer_mac, 0)) {
195 wiphy_err(wiphy, "crypto keys added\n"); 213 wiphy_err(wiphy, "crypto keys added\n");
@@ -242,13 +260,13 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
242 flag = 1; 260 flag = 1;
243 first_chan = (u32) ch->hw_value; 261 first_chan = (u32) ch->hw_value;
244 next_chan = first_chan; 262 next_chan = first_chan;
245 max_pwr = ch->max_power; 263 max_pwr = ch->max_reg_power;
246 no_of_parsed_chan = 1; 264 no_of_parsed_chan = 1;
247 continue; 265 continue;
248 } 266 }
249 267
250 if (ch->hw_value == next_chan + 1 && 268 if (ch->hw_value == next_chan + 1 &&
251 ch->max_power == max_pwr) { 269 ch->max_reg_power == max_pwr) {
252 next_chan++; 270 next_chan++;
253 no_of_parsed_chan++; 271 no_of_parsed_chan++;
254 } else { 272 } else {
@@ -259,7 +277,7 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
259 no_of_triplet++; 277 no_of_triplet++;
260 first_chan = (u32) ch->hw_value; 278 first_chan = (u32) ch->hw_value;
261 next_chan = first_chan; 279 next_chan = first_chan;
262 max_pwr = ch->max_power; 280 max_pwr = ch->max_reg_power;
263 no_of_parsed_chan = 1; 281 no_of_parsed_chan = 1;
264 } 282 }
265 } 283 }
@@ -384,13 +402,13 @@ mwifiex_set_rf_channel(struct mwifiex_private *priv,
384 cfp.freq = chan->center_freq; 402 cfp.freq = chan->center_freq;
385 cfp.channel = ieee80211_frequency_to_channel(chan->center_freq); 403 cfp.channel = ieee80211_frequency_to_channel(chan->center_freq);
386 404
387 if (mwifiex_bss_set_channel(priv, &cfp)) 405 if (priv->bss_type == MWIFIEX_BSS_TYPE_STA) {
388 return -EFAULT; 406 if (mwifiex_bss_set_channel(priv, &cfp))
389 407 return -EFAULT;
390 if (priv->bss_type == MWIFIEX_BSS_TYPE_STA)
391 return mwifiex_drv_change_adhoc_chan(priv, cfp.channel); 408 return mwifiex_drv_change_adhoc_chan(priv, cfp.channel);
392 else 409 }
393 return mwifiex_uap_set_channel(priv, cfp.channel); 410
411 return 0;
394} 412}
395 413
396/* 414/*
@@ -962,12 +980,25 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
962 return -EINVAL; 980 return -EINVAL;
963 } 981 }
964 982
983 bss_cfg->channel =
984 (u8)ieee80211_frequency_to_channel(params->channel->center_freq);
985 bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
986
987 if (mwifiex_set_rf_channel(priv, params->channel,
988 params->channel_type)) {
989 kfree(bss_cfg);
990 wiphy_err(wiphy, "Failed to set band config information!\n");
991 return -1;
992 }
993
965 if (mwifiex_set_secure_params(priv, bss_cfg, params)) { 994 if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
966 kfree(bss_cfg); 995 kfree(bss_cfg);
967 wiphy_err(wiphy, "Failed to parse secuirty parameters!\n"); 996 wiphy_err(wiphy, "Failed to parse secuirty parameters!\n");
968 return -1; 997 return -1;
969 } 998 }
970 999
1000 mwifiex_set_ht_params(priv, bss_cfg, params);
1001
971 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP, 1002 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
972 HostCmd_ACT_GEN_SET, 0, NULL)) { 1003 HostCmd_ACT_GEN_SET, 0, NULL)) {
973 wiphy_err(wiphy, "Failed to stop the BSS\n"); 1004 wiphy_err(wiphy, "Failed to stop the BSS\n");
@@ -991,6 +1022,16 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
991 return -1; 1022 return -1;
992 } 1023 }
993 1024
1025 if (priv->sec_info.wep_enabled)
1026 priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
1027 else
1028 priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
1029
1030 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL,
1031 HostCmd_ACT_GEN_SET, 0,
1032 &priv->curr_pkt_filter))
1033 return -1;
1034
994 return 0; 1035 return 0;
995} 1036}
996 1037
@@ -1382,7 +1423,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev,
1382 1423
1383 priv->user_scan_cfg->chan_list[i].scan_time = 0; 1424 priv->user_scan_cfg->chan_list[i].scan_time = 0;
1384 } 1425 }
1385 if (mwifiex_set_user_scan_ioctl(priv, priv->user_scan_cfg)) 1426 if (mwifiex_scan_networks(priv, priv->user_scan_cfg))
1386 return -EFAULT; 1427 return -EFAULT;
1387 1428
1388 if (request->ie && request->ie_len) { 1429 if (request->ie && request->ie_len) {
@@ -1703,7 +1744,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
1703 1744
1704 memcpy(wiphy->perm_addr, priv->curr_addr, ETH_ALEN); 1745 memcpy(wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
1705 wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; 1746 wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
1706 wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_CUSTOM_REGULATORY; 1747 wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME;
1707 1748
1708 /* Reserve space for mwifiex specific private data for BSS */ 1749 /* Reserve space for mwifiex specific private data for BSS */
1709 wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv); 1750 wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
@@ -1714,7 +1755,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
1714 wdev_priv = wiphy_priv(wiphy); 1755 wdev_priv = wiphy_priv(wiphy);
1715 *(unsigned long *)wdev_priv = (unsigned long)adapter; 1756 *(unsigned long *)wdev_priv = (unsigned long)adapter;
1716 1757
1717 set_wiphy_dev(wiphy, (struct device *)priv->adapter->dev); 1758 set_wiphy_dev(wiphy, priv->adapter->dev);
1718 1759
1719 ret = wiphy_register(wiphy); 1760 ret = wiphy_register(wiphy);
1720 if (ret < 0) { 1761 if (ret < 0) {
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 51e023ec1de..c68adec3cc8 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -578,6 +578,7 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
578 } else { 578 } else {
579 adapter->cmd_queued = cmd_node; 579 adapter->cmd_queued = cmd_node;
580 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); 580 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
581 queue_work(adapter->workqueue, &adapter->main_work);
581 } 582 }
582 583
583 return ret; 584 return ret;
@@ -1102,7 +1103,8 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
1102 &resp->params.opt_hs_cfg; 1103 &resp->params.opt_hs_cfg;
1103 uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions); 1104 uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions);
1104 1105
1105 if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE)) { 1106 if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) &&
1107 adapter->iface_type == MWIFIEX_SDIO) {
1106 mwifiex_hs_activated_event(priv, true); 1108 mwifiex_hs_activated_event(priv, true);
1107 return 0; 1109 return 0;
1108 } else { 1110 } else {
@@ -1114,6 +1116,9 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
1114 } 1116 }
1115 if (conditions != HOST_SLEEP_CFG_CANCEL) { 1117 if (conditions != HOST_SLEEP_CFG_CANCEL) {
1116 adapter->is_hs_configured = true; 1118 adapter->is_hs_configured = true;
1119 if (adapter->iface_type == MWIFIEX_USB ||
1120 adapter->iface_type == MWIFIEX_PCIE)
1121 mwifiex_hs_activated_event(priv, true);
1117 } else { 1122 } else {
1118 adapter->is_hs_configured = false; 1123 adapter->is_hs_configured = false;
1119 if (adapter->hs_activated) 1124 if (adapter->hs_activated)
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 561452a5c81..ffb6cdfdb79 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -124,6 +124,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
124#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45) 124#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45)
125#define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48) 125#define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48)
126#define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51) 126#define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51)
127#define TLV_TYPE_UAP_WEP_KEY (PROPRIETARY_TLV_BASE_ID + 59)
127#define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60) 128#define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60)
128#define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64) 129#define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64)
129#define TLV_TYPE_UAP_AKMP (PROPRIETARY_TLV_BASE_ID + 65) 130#define TLV_TYPE_UAP_AKMP (PROPRIETARY_TLV_BASE_ID + 65)
@@ -162,6 +163,12 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
162 163
163#define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11)) 164#define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
164 165
166#define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \
167 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \
168 IEEE80211_HT_CAP_SM_PS)
169
170#define MWIFIEX_DEF_AMPDU IEEE80211_HT_AMPDU_PARM_FACTOR
171
165/* dev_cap bitmap 172/* dev_cap bitmap
166 * BIT 173 * BIT
167 * 0-16 reserved 174 * 0-16 reserved
@@ -219,6 +226,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
219#define HostCmd_CMD_RF_REG_ACCESS 0x001b 226#define HostCmd_CMD_RF_REG_ACCESS 0x001b
220#define HostCmd_CMD_PMIC_REG_ACCESS 0x00ad 227#define HostCmd_CMD_PMIC_REG_ACCESS 0x00ad
221#define HostCmd_CMD_802_11_RF_CHANNEL 0x001d 228#define HostCmd_CMD_802_11_RF_CHANNEL 0x001d
229#define HostCmd_CMD_RF_TX_PWR 0x001e
222#define HostCmd_CMD_802_11_DEAUTHENTICATE 0x0024 230#define HostCmd_CMD_802_11_DEAUTHENTICATE 0x0024
223#define HostCmd_CMD_MAC_CONTROL 0x0028 231#define HostCmd_CMD_MAC_CONTROL 0x0028
224#define HostCmd_CMD_802_11_AD_HOC_START 0x002b 232#define HostCmd_CMD_802_11_AD_HOC_START 0x002b
@@ -869,6 +877,13 @@ struct host_cmd_ds_txpwr_cfg {
869 __le32 mode; 877 __le32 mode;
870} __packed; 878} __packed;
871 879
880struct host_cmd_ds_rf_tx_pwr {
881 __le16 action;
882 __le16 cur_level;
883 u8 max_power;
884 u8 min_power;
885} __packed;
886
872struct mwifiex_bcn_param { 887struct mwifiex_bcn_param {
873 u8 bssid[ETH_ALEN]; 888 u8 bssid[ETH_ALEN];
874 u8 rssi; 889 u8 rssi;
@@ -1195,6 +1210,13 @@ struct host_cmd_tlv_passphrase {
1195 u8 passphrase[0]; 1210 u8 passphrase[0];
1196} __packed; 1211} __packed;
1197 1212
1213struct host_cmd_tlv_wep_key {
1214 struct host_cmd_tlv tlv;
1215 u8 key_index;
1216 u8 is_default;
1217 u8 key[1];
1218};
1219
1198struct host_cmd_tlv_auth_type { 1220struct host_cmd_tlv_auth_type {
1199 struct host_cmd_tlv tlv; 1221 struct host_cmd_tlv tlv;
1200 u8 auth_type; 1222 u8 auth_type;
@@ -1347,6 +1369,7 @@ struct host_cmd_ds_command {
1347 struct host_cmd_ds_tx_rate_query tx_rate; 1369 struct host_cmd_ds_tx_rate_query tx_rate;
1348 struct host_cmd_ds_tx_rate_cfg tx_rate_cfg; 1370 struct host_cmd_ds_tx_rate_cfg tx_rate_cfg;
1349 struct host_cmd_ds_txpwr_cfg txp_cfg; 1371 struct host_cmd_ds_txpwr_cfg txp_cfg;
1372 struct host_cmd_ds_rf_tx_pwr txp;
1350 struct host_cmd_ds_802_11_ps_mode_enh psmode_enh; 1373 struct host_cmd_ds_802_11_ps_mode_enh psmode_enh;
1351 struct host_cmd_ds_802_11_hs_cfg_enh opt_hs_cfg; 1374 struct host_cmd_ds_802_11_hs_cfg_enh opt_hs_cfg;
1352 struct host_cmd_ds_802_11_scan scan; 1375 struct host_cmd_ds_802_11_scan scan;
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index 383820a52be..8374e33f195 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -225,29 +225,46 @@ int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
225 struct cfg80211_ap_settings *params) 225 struct cfg80211_ap_settings *params)
226{ 226{
227 struct mwifiex_ie *beacon_ie = NULL, *pr_ie = NULL; 227 struct mwifiex_ie *beacon_ie = NULL, *pr_ie = NULL;
228 struct mwifiex_ie *ar_ie = NULL, *rsn_ie = NULL; 228 struct mwifiex_ie *ar_ie = NULL, *gen_ie = NULL;
229 struct ieee_types_header *ie = NULL; 229 struct ieee_types_header *rsn_ie = NULL, *wpa_ie = NULL;
230 u16 beacon_idx = MWIFIEX_AUTO_IDX_MASK, pr_idx = MWIFIEX_AUTO_IDX_MASK; 230 u16 beacon_idx = MWIFIEX_AUTO_IDX_MASK, pr_idx = MWIFIEX_AUTO_IDX_MASK;
231 u16 ar_idx = MWIFIEX_AUTO_IDX_MASK, rsn_idx = MWIFIEX_AUTO_IDX_MASK; 231 u16 ar_idx = MWIFIEX_AUTO_IDX_MASK, rsn_idx = MWIFIEX_AUTO_IDX_MASK;
232 u16 mask; 232 u16 mask, ie_len = 0;
233 const u8 *vendor_ie;
233 int ret = 0; 234 int ret = 0;
234 235
235 if (params->beacon.tail && params->beacon.tail_len) { 236 if (params->beacon.tail && params->beacon.tail_len) {
236 ie = (void *)cfg80211_find_ie(WLAN_EID_RSN, params->beacon.tail, 237 gen_ie = kzalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
237 params->beacon.tail_len); 238 if (!gen_ie)
238 if (ie) { 239 return -ENOMEM;
239 rsn_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL); 240 gen_ie->ie_index = cpu_to_le16(rsn_idx);
240 if (!rsn_ie) 241 mask = MGMT_MASK_BEACON | MGMT_MASK_PROBE_RESP |
241 return -ENOMEM; 242 MGMT_MASK_ASSOC_RESP;
242 243 gen_ie->mgmt_subtype_mask = cpu_to_le16(mask);
243 rsn_ie->ie_index = cpu_to_le16(rsn_idx); 244
244 mask = MGMT_MASK_BEACON | MGMT_MASK_PROBE_RESP | 245 rsn_ie = (void *)cfg80211_find_ie(WLAN_EID_RSN,
245 MGMT_MASK_ASSOC_RESP; 246 params->beacon.tail,
246 rsn_ie->mgmt_subtype_mask = cpu_to_le16(mask); 247 params->beacon.tail_len);
247 rsn_ie->ie_length = cpu_to_le16(ie->len + 2); 248 if (rsn_ie) {
248 memcpy(rsn_ie->ie_buffer, ie, ie->len + 2); 249 memcpy(gen_ie->ie_buffer, rsn_ie, rsn_ie->len + 2);
249 250 ie_len = rsn_ie->len + 2;
250 if (mwifiex_update_uap_custom_ie(priv, rsn_ie, &rsn_idx, 251 gen_ie->ie_length = cpu_to_le16(ie_len);
252 }
253
254 vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
255 WLAN_OUI_TYPE_MICROSOFT_WPA,
256 params->beacon.tail,
257 params->beacon.tail_len);
258 if (vendor_ie) {
259 wpa_ie = (struct ieee_types_header *)vendor_ie;
260 memcpy(gen_ie->ie_buffer + ie_len,
261 wpa_ie, wpa_ie->len + 2);
262 ie_len += wpa_ie->len + 2;
263 gen_ie->ie_length = cpu_to_le16(ie_len);
264 }
265
266 if (rsn_ie || wpa_ie) {
267 if (mwifiex_update_uap_custom_ie(priv, gen_ie, &rsn_idx,
251 NULL, NULL, 268 NULL, NULL,
252 NULL, NULL)) { 269 NULL, NULL)) {
253 ret = -1; 270 ret = -1;
@@ -320,7 +337,7 @@ done:
320 kfree(beacon_ie); 337 kfree(beacon_ie);
321 kfree(pr_ie); 338 kfree(pr_ie);
322 kfree(ar_ie); 339 kfree(ar_ie);
323 kfree(rsn_ie); 340 kfree(gen_ie);
324 341
325 return ret; 342 return ret;
326} 343}
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index c1cb004db91..b543a4d82ff 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -57,6 +57,69 @@ static int mwifiex_add_bss_prio_tbl(struct mwifiex_private *priv)
57 return 0; 57 return 0;
58} 58}
59 59
60static void scan_delay_timer_fn(unsigned long data)
61{
62 struct mwifiex_private *priv = (struct mwifiex_private *)data;
63 struct mwifiex_adapter *adapter = priv->adapter;
64 struct cmd_ctrl_node *cmd_node, *tmp_node;
65 unsigned long flags;
66
67 if (!mwifiex_wmm_lists_empty(adapter)) {
68 if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
69 /*
70 * Abort scan operation by cancelling all pending scan
71 * command
72 */
73 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
74 list_for_each_entry_safe(cmd_node, tmp_node,
75 &adapter->scan_pending_q,
76 list) {
77 list_del(&cmd_node->list);
78 cmd_node->wait_q_enabled = false;
79 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
80 }
81 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
82 flags);
83
84 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
85 adapter->scan_processing = false;
86 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock,
87 flags);
88
89 if (priv->user_scan_cfg) {
90 dev_dbg(priv->adapter->dev,
91 "info: %s: scan aborted\n", __func__);
92 cfg80211_scan_done(priv->scan_request, 1);
93 priv->scan_request = NULL;
94 kfree(priv->user_scan_cfg);
95 priv->user_scan_cfg = NULL;
96 }
97 } else {
98 /*
99 * Tx data queue is still not empty, delay scan
100 * operation further by 20msec.
101 */
102 mod_timer(&priv->scan_delay_timer, jiffies +
103 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
104 adapter->scan_delay_cnt++;
105 }
106 queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
107 } else {
108 /*
109 * Tx data queue is empty. Get scan command from scan_pending_q
110 * and put to cmd_pending_q to resume scan operation
111 */
112 adapter->scan_delay_cnt = 0;
113 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
114 cmd_node = list_first_entry(&adapter->scan_pending_q,
115 struct cmd_ctrl_node, list);
116 list_del(&cmd_node->list);
117 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
118
119 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
120 }
121}
122
60/* 123/*
61 * This function initializes the private structure and sets default 124 * This function initializes the private structure and sets default
62 * values to the members. 125 * values to the members.
@@ -136,6 +199,9 @@ static int mwifiex_init_priv(struct mwifiex_private *priv)
136 199
137 priv->scan_block = false; 200 priv->scan_block = false;
138 201
202 setup_timer(&priv->scan_delay_timer, scan_delay_timer_fn,
203 (unsigned long)priv);
204
139 return mwifiex_add_bss_prio_tbl(priv); 205 return mwifiex_add_bss_prio_tbl(priv);
140} 206}
141 207
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index e6be6ee7595..9f088fb88cb 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -21,6 +21,7 @@
21#define _MWIFIEX_IOCTL_H_ 21#define _MWIFIEX_IOCTL_H_
22 22
23#include <net/mac80211.h> 23#include <net/mac80211.h>
24#include <net/lib80211.h>
24 25
25enum { 26enum {
26 MWIFIEX_SCAN_TYPE_UNCHANGED = 0, 27 MWIFIEX_SCAN_TYPE_UNCHANGED = 0,
@@ -71,6 +72,13 @@ struct wpa_param {
71 u8 passphrase[MWIFIEX_WPA_PASSHPHRASE_LEN]; 72 u8 passphrase[MWIFIEX_WPA_PASSHPHRASE_LEN];
72}; 73};
73 74
75struct wep_key {
76 u8 key_index;
77 u8 is_default;
78 u16 length;
79 u8 key[WLAN_KEY_LEN_WEP104];
80};
81
74#define KEY_MGMT_ON_HOST 0x03 82#define KEY_MGMT_ON_HOST 0x03
75#define MWIFIEX_AUTH_MODE_AUTO 0xFF 83#define MWIFIEX_AUTH_MODE_AUTO 0xFF
76#define BAND_CONFIG_MANUAL 0x00 84#define BAND_CONFIG_MANUAL 0x00
@@ -90,6 +98,8 @@ struct mwifiex_uap_bss_param {
90 u16 key_mgmt; 98 u16 key_mgmt;
91 u16 key_mgmt_operation; 99 u16 key_mgmt_operation;
92 struct wpa_param wpa_cfg; 100 struct wpa_param wpa_cfg;
101 struct wep_key wep_cfg[NUM_WEP_KEYS];
102 struct ieee80211_ht_cap ht_cap;
93}; 103};
94 104
95enum { 105enum {
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index d6b4fb04011..82e63cee1e9 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -1349,22 +1349,16 @@ static int mwifiex_deauthenticate_infra(struct mwifiex_private *priv, u8 *mac)
1349{ 1349{
1350 u8 mac_address[ETH_ALEN]; 1350 u8 mac_address[ETH_ALEN];
1351 int ret; 1351 int ret;
1352 u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
1353 1352
1354 if (mac) { 1353 if (!mac || is_zero_ether_addr(mac))
1355 if (!memcmp(mac, zero_mac, sizeof(zero_mac))) 1354 memcpy(mac_address,
1356 memcpy((u8 *) &mac_address, 1355 priv->curr_bss_params.bss_descriptor.mac_address,
1357 (u8 *) &priv->curr_bss_params.bss_descriptor. 1356 ETH_ALEN);
1358 mac_address, ETH_ALEN); 1357 else
1359 else 1358 memcpy(mac_address, mac, ETH_ALEN);
1360 memcpy((u8 *) &mac_address, (u8 *) mac, ETH_ALEN);
1361 } else {
1362 memcpy((u8 *) &mac_address, (u8 *) &priv->curr_bss_params.
1363 bss_descriptor.mac_address, ETH_ALEN);
1364 }
1365 1359
1366 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_DEAUTHENTICATE, 1360 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_DEAUTHENTICATE,
1367 HostCmd_ACT_GEN_SET, 0, &mac_address); 1361 HostCmd_ACT_GEN_SET, 0, mac_address);
1368 1362
1369 return ret; 1363 return ret;
1370} 1364}
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 3192855c31c..f0219efc895 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -190,7 +190,8 @@ process_start:
190 adapter->tx_lock_flag) 190 adapter->tx_lock_flag)
191 break; 191 break;
192 192
193 if (adapter->scan_processing || adapter->data_sent || 193 if ((adapter->scan_processing &&
194 !adapter->scan_delay_cnt) || adapter->data_sent ||
194 mwifiex_wmm_lists_empty(adapter)) { 195 mwifiex_wmm_lists_empty(adapter)) {
195 if (adapter->cmd_sent || adapter->curr_cmd || 196 if (adapter->cmd_sent || adapter->curr_cmd ||
196 (!is_command_pending(adapter))) 197 (!is_command_pending(adapter)))
@@ -244,8 +245,8 @@ process_start:
244 } 245 }
245 } 246 }
246 247
247 if (!adapter->scan_processing && !adapter->data_sent && 248 if ((!adapter->scan_processing || adapter->scan_delay_cnt) &&
248 !mwifiex_wmm_lists_empty(adapter)) { 249 !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter)) {
249 mwifiex_wmm_process_tx(adapter); 250 mwifiex_wmm_process_tx(adapter);
250 if (adapter->hs_activated) { 251 if (adapter->hs_activated) {
251 adapter->is_hs_configured = false; 252 adapter->is_hs_configured = false;
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index bd3b0bf94b9..7cd95cc99a8 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -79,14 +79,17 @@ enum {
79 79
80#define SCAN_BEACON_ENTRY_PAD 6 80#define SCAN_BEACON_ENTRY_PAD 6
81 81
82#define MWIFIEX_PASSIVE_SCAN_CHAN_TIME 200 82#define MWIFIEX_PASSIVE_SCAN_CHAN_TIME 110
83#define MWIFIEX_ACTIVE_SCAN_CHAN_TIME 200 83#define MWIFIEX_ACTIVE_SCAN_CHAN_TIME 30
84#define MWIFIEX_SPECIFIC_SCAN_CHAN_TIME 110 84#define MWIFIEX_SPECIFIC_SCAN_CHAN_TIME 30
85 85
86#define SCAN_RSSI(RSSI) (0x100 - ((u8)(RSSI))) 86#define SCAN_RSSI(RSSI) (0x100 - ((u8)(RSSI)))
87 87
88#define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S) 88#define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
89 89
90#define MWIFIEX_MAX_SCAN_DELAY_CNT 50
91#define MWIFIEX_SCAN_DELAY_MSEC 20
92
90#define RSN_GTK_OUI_OFFSET 2 93#define RSN_GTK_OUI_OFFSET 2
91 94
92#define MWIFIEX_OUI_NOT_PRESENT 0 95#define MWIFIEX_OUI_NOT_PRESENT 0
@@ -482,6 +485,7 @@ struct mwifiex_private {
482 u16 proberesp_idx; 485 u16 proberesp_idx;
483 u16 assocresp_idx; 486 u16 assocresp_idx;
484 u16 rsn_idx; 487 u16 rsn_idx;
488 struct timer_list scan_delay_timer;
485}; 489};
486 490
487enum mwifiex_ba_status { 491enum mwifiex_ba_status {
@@ -686,6 +690,7 @@ struct mwifiex_adapter {
686 struct completion fw_load; 690 struct completion fw_load;
687 u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; 691 u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
688 u16 max_mgmt_ie_index; 692 u16 max_mgmt_ie_index;
693 u8 scan_delay_cnt;
689}; 694};
690 695
691int mwifiex_init_lock_list(struct mwifiex_adapter *adapter); 696int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -835,6 +840,9 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
835int mwifiex_set_secure_params(struct mwifiex_private *priv, 840int mwifiex_set_secure_params(struct mwifiex_private *priv,
836 struct mwifiex_uap_bss_param *bss_config, 841 struct mwifiex_uap_bss_param *bss_config,
837 struct cfg80211_ap_settings *params); 842 struct cfg80211_ap_settings *params);
843void mwifiex_set_ht_params(struct mwifiex_private *priv,
844 struct mwifiex_uap_bss_param *bss_cfg,
845 struct cfg80211_ap_settings *params);
838 846
839/* 847/*
840 * This function checks if the queuing is RA based or not. 848 * This function checks if the queuing is RA based or not.
@@ -941,8 +949,8 @@ int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
941 struct mwifiex_rate_cfg *rate); 949 struct mwifiex_rate_cfg *rate);
942int mwifiex_request_scan(struct mwifiex_private *priv, 950int mwifiex_request_scan(struct mwifiex_private *priv,
943 struct cfg80211_ssid *req_ssid); 951 struct cfg80211_ssid *req_ssid);
944int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv, 952int mwifiex_scan_networks(struct mwifiex_private *priv,
945 struct mwifiex_user_scan_cfg *scan_req); 953 const struct mwifiex_user_scan_cfg *user_scan_in);
946int mwifiex_set_radio(struct mwifiex_private *priv, u8 option); 954int mwifiex_set_radio(struct mwifiex_private *priv, u8 option);
947 955
948int mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, u16 channel); 956int mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, u16 channel);
@@ -985,7 +993,6 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
985 993
986int mwifiex_main_process(struct mwifiex_adapter *); 994int mwifiex_main_process(struct mwifiex_adapter *);
987 995
988int mwifiex_uap_set_channel(struct mwifiex_private *priv, int channel);
989int mwifiex_bss_set_channel(struct mwifiex_private *, 996int mwifiex_bss_set_channel(struct mwifiex_private *,
990 struct mwifiex_chan_freq_power *cfp); 997 struct mwifiex_chan_freq_power *cfp);
991int mwifiex_get_bss_info(struct mwifiex_private *, 998int mwifiex_get_bss_info(struct mwifiex_private *,
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 74f04571572..04dc7ca4ac2 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -28,7 +28,10 @@
28/* The maximum number of channels the firmware can scan per command */ 28/* The maximum number of channels the firmware can scan per command */
29#define MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN 14 29#define MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN 14
30 30
31#define MWIFIEX_CHANNELS_PER_SCAN_CMD 4 31#define MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD 4
32#define MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD 15
33#define MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD 27
34#define MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD 35
32 35
33/* Memory needed to store a max sized Channel List TLV for a firmware scan */ 36/* Memory needed to store a max sized Channel List TLV for a firmware scan */
34#define CHAN_TLV_MAX_SIZE (sizeof(struct mwifiex_ie_types_header) \ 37#define CHAN_TLV_MAX_SIZE (sizeof(struct mwifiex_ie_types_header) \
@@ -471,7 +474,7 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
471 * This routine is used for any scan that is not provided with a 474 * This routine is used for any scan that is not provided with a
472 * specific channel list to scan. 475 * specific channel list to scan.
473 */ 476 */
474static void 477static int
475mwifiex_scan_create_channel_list(struct mwifiex_private *priv, 478mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
476 const struct mwifiex_user_scan_cfg 479 const struct mwifiex_user_scan_cfg
477 *user_scan_in, 480 *user_scan_in,
@@ -528,6 +531,7 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
528 } 531 }
529 532
530 } 533 }
534 return chan_idx;
531} 535}
532 536
533/* 537/*
@@ -727,6 +731,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
727 u32 num_probes; 731 u32 num_probes;
728 u32 ssid_len; 732 u32 ssid_len;
729 u32 chan_idx; 733 u32 chan_idx;
734 u32 chan_num;
730 u32 scan_type; 735 u32 scan_type;
731 u16 scan_dur; 736 u16 scan_dur;
732 u8 channel; 737 u8 channel;
@@ -850,7 +855,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
850 if (*filtered_scan) 855 if (*filtered_scan)
851 *max_chan_per_scan = MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN; 856 *max_chan_per_scan = MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN;
852 else 857 else
853 *max_chan_per_scan = MWIFIEX_CHANNELS_PER_SCAN_CMD; 858 *max_chan_per_scan = MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD;
854 859
855 /* If the input config or adapter has the number of Probes set, 860 /* If the input config or adapter has the number of Probes set,
856 add tlv */ 861 add tlv */
@@ -962,13 +967,28 @@ mwifiex_config_scan(struct mwifiex_private *priv,
962 dev_dbg(adapter->dev, 967 dev_dbg(adapter->dev,
963 "info: Scan: Scanning current channel only\n"); 968 "info: Scan: Scanning current channel only\n");
964 } 969 }
965 970 chan_num = chan_idx;
966 } else { 971 } else {
967 dev_dbg(adapter->dev, 972 dev_dbg(adapter->dev,
968 "info: Scan: Creating full region channel list\n"); 973 "info: Scan: Creating full region channel list\n");
969 mwifiex_scan_create_channel_list(priv, user_scan_in, 974 chan_num = mwifiex_scan_create_channel_list(priv, user_scan_in,
970 scan_chan_list, 975 scan_chan_list,
971 *filtered_scan); 976 *filtered_scan);
977 }
978
979 /*
980 * In associated state we will reduce the number of channels scanned per
981 * scan command to avoid any traffic delay/loss. This number is decided
982 * based on total number of channels to be scanned due to constraints
983 * of command buffers.
984 */
985 if (priv->media_connected) {
986 if (chan_num < MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD)
987 *max_chan_per_scan = 1;
988 else if (chan_num < MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD)
989 *max_chan_per_scan = 2;
990 else if (chan_num < MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD)
991 *max_chan_per_scan = 3;
972 } 992 }
973} 993}
974 994
@@ -1014,14 +1034,12 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter,
1014 case TLV_TYPE_TSFTIMESTAMP: 1034 case TLV_TYPE_TSFTIMESTAMP:
1015 dev_dbg(adapter->dev, "info: SCAN_RESP: TSF " 1035 dev_dbg(adapter->dev, "info: SCAN_RESP: TSF "
1016 "timestamp TLV, len = %d\n", tlv_len); 1036 "timestamp TLV, len = %d\n", tlv_len);
1017 *tlv_data = (struct mwifiex_ie_types_data *) 1037 *tlv_data = current_tlv;
1018 current_tlv;
1019 break; 1038 break;
1020 case TLV_TYPE_CHANNELBANDLIST: 1039 case TLV_TYPE_CHANNELBANDLIST:
1021 dev_dbg(adapter->dev, "info: SCAN_RESP: channel" 1040 dev_dbg(adapter->dev, "info: SCAN_RESP: channel"
1022 " band list TLV, len = %d\n", tlv_len); 1041 " band list TLV, len = %d\n", tlv_len);
1023 *tlv_data = (struct mwifiex_ie_types_data *) 1042 *tlv_data = current_tlv;
1024 current_tlv;
1025 break; 1043 break;
1026 default: 1044 default:
1027 dev_err(adapter->dev, 1045 dev_err(adapter->dev,
@@ -1226,15 +1244,15 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
1226 bss_entry->beacon_buf); 1244 bss_entry->beacon_buf);
1227 break; 1245 break;
1228 case WLAN_EID_BSS_COEX_2040: 1246 case WLAN_EID_BSS_COEX_2040:
1229 bss_entry->bcn_bss_co_2040 = (u8 *) (current_ptr + 1247 bss_entry->bcn_bss_co_2040 = current_ptr +
1230 sizeof(struct ieee_types_header)); 1248 sizeof(struct ieee_types_header);
1231 bss_entry->bss_co_2040_offset = (u16) (current_ptr + 1249 bss_entry->bss_co_2040_offset = (u16) (current_ptr +
1232 sizeof(struct ieee_types_header) - 1250 sizeof(struct ieee_types_header) -
1233 bss_entry->beacon_buf); 1251 bss_entry->beacon_buf);
1234 break; 1252 break;
1235 case WLAN_EID_EXT_CAPABILITY: 1253 case WLAN_EID_EXT_CAPABILITY:
1236 bss_entry->bcn_ext_cap = (u8 *) (current_ptr + 1254 bss_entry->bcn_ext_cap = current_ptr +
1237 sizeof(struct ieee_types_header)); 1255 sizeof(struct ieee_types_header);
1238 bss_entry->ext_cap_offset = (u16) (current_ptr + 1256 bss_entry->ext_cap_offset = (u16) (current_ptr +
1239 sizeof(struct ieee_types_header) - 1257 sizeof(struct ieee_types_header) -
1240 bss_entry->beacon_buf); 1258 bss_entry->beacon_buf);
@@ -1276,8 +1294,8 @@ mwifiex_radio_type_to_band(u8 radio_type)
1276 * order to send the appropriate scan commands to firmware to populate or 1294 * order to send the appropriate scan commands to firmware to populate or
1277 * update the internal driver scan table. 1295 * update the internal driver scan table.
1278 */ 1296 */
1279static int mwifiex_scan_networks(struct mwifiex_private *priv, 1297int mwifiex_scan_networks(struct mwifiex_private *priv,
1280 const struct mwifiex_user_scan_cfg *user_scan_in) 1298 const struct mwifiex_user_scan_cfg *user_scan_in)
1281{ 1299{
1282 int ret = 0; 1300 int ret = 0;
1283 struct mwifiex_adapter *adapter = priv->adapter; 1301 struct mwifiex_adapter *adapter = priv->adapter;
@@ -1342,6 +1360,7 @@ static int mwifiex_scan_networks(struct mwifiex_private *priv,
1342 adapter->cmd_queued = cmd_node; 1360 adapter->cmd_queued = cmd_node;
1343 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, 1361 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
1344 true); 1362 true);
1363 queue_work(adapter->workqueue, &adapter->main_work);
1345 } else { 1364 } else {
1346 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, 1365 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1347 flags); 1366 flags);
@@ -1358,26 +1377,6 @@ static int mwifiex_scan_networks(struct mwifiex_private *priv,
1358} 1377}
1359 1378
1360/* 1379/*
1361 * Sends IOCTL request to start a scan with user configurations.
1362 *
1363 * This function allocates the IOCTL request buffer, fills it
1364 * with requisite parameters and calls the IOCTL handler.
1365 *
1366 * Upon completion, it also generates a wireless event to notify
1367 * applications.
1368 */
1369int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv,
1370 struct mwifiex_user_scan_cfg *scan_req)
1371{
1372 int status;
1373
1374 status = mwifiex_scan_networks(priv, scan_req);
1375 queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
1376
1377 return status;
1378}
1379
1380/*
1381 * This function prepares a scan command to be sent to the firmware. 1380 * This function prepares a scan command to be sent to the firmware.
1382 * 1381 *
1383 * This uses the scan command configuration sent to the command processing 1382 * This uses the scan command configuration sent to the command processing
@@ -1683,8 +1682,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1683 goto done; 1682 goto done;
1684 } 1683 }
1685 if (element_id == WLAN_EID_DS_PARAMS) { 1684 if (element_id == WLAN_EID_DS_PARAMS) {
1686 channel = *(u8 *) (current_ptr + 1685 channel = *(current_ptr + sizeof(struct ieee_types_header));
1687 sizeof(struct ieee_types_header));
1688 break; 1686 break;
1689 } 1687 }
1690 1688
@@ -1772,14 +1770,23 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1772 priv->user_scan_cfg = NULL; 1770 priv->user_scan_cfg = NULL;
1773 } 1771 }
1774 } else { 1772 } else {
1775 /* Get scan command from scan_pending_q and put to 1773 if (!mwifiex_wmm_lists_empty(adapter)) {
1776 cmd_pending_q */ 1774 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1777 cmd_node = list_first_entry(&adapter->scan_pending_q, 1775 flags);
1778 struct cmd_ctrl_node, list); 1776 adapter->scan_delay_cnt = 1;
1779 list_del(&cmd_node->list); 1777 mod_timer(&priv->scan_delay_timer, jiffies +
1780 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); 1778 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
1781 1779 } else {
1782 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); 1780 /* Get scan command from scan_pending_q and put to
1781 cmd_pending_q */
1782 cmd_node = list_first_entry(&adapter->scan_pending_q,
1783 struct cmd_ctrl_node, list);
1784 list_del(&cmd_node->list);
1785 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1786 flags);
1787 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
1788 true);
1789 }
1783 } 1790 }
1784 1791
1785done: 1792done:
@@ -2010,12 +2017,11 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
2010 2017
2011 if (curr_bss->bcn_bss_co_2040) 2018 if (curr_bss->bcn_bss_co_2040)
2012 curr_bss->bcn_bss_co_2040 = 2019 curr_bss->bcn_bss_co_2040 =
2013 (u8 *) (curr_bss->beacon_buf + 2020 (curr_bss->beacon_buf + curr_bss->bss_co_2040_offset);
2014 curr_bss->bss_co_2040_offset);
2015 2021
2016 if (curr_bss->bcn_ext_cap) 2022 if (curr_bss->bcn_ext_cap)
2017 curr_bss->bcn_ext_cap = (u8 *) (curr_bss->beacon_buf + 2023 curr_bss->bcn_ext_cap = curr_bss->beacon_buf +
2018 curr_bss->ext_cap_offset); 2024 curr_bss->ext_cap_offset;
2019} 2025}
2020 2026
2021/* 2027/*
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 40e025da6bc..b9cd9ed48c4 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -260,6 +260,23 @@ static int mwifiex_cmd_tx_power_cfg(struct host_cmd_ds_command *cmd,
260} 260}
261 261
262/* 262/*
263 * This function prepares command to get RF Tx power.
264 */
265static int mwifiex_cmd_rf_tx_power(struct mwifiex_private *priv,
266 struct host_cmd_ds_command *cmd,
267 u16 cmd_action, void *data_buf)
268{
269 struct host_cmd_ds_rf_tx_pwr *txp = &cmd->params.txp;
270
271 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_rf_tx_pwr)
272 + S_DS_GEN);
273 cmd->command = cpu_to_le16(HostCmd_CMD_RF_TX_PWR);
274 txp->action = cpu_to_le16(cmd_action);
275
276 return 0;
277}
278
279/*
263 * This function prepares command to set Host Sleep configuration. 280 * This function prepares command to set Host Sleep configuration.
264 * 281 *
265 * Preparation includes - 282 * Preparation includes -
@@ -793,8 +810,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
793 struct host_cmd_ds_mac_reg_access *mac_reg; 810 struct host_cmd_ds_mac_reg_access *mac_reg;
794 811
795 cmd->size = cpu_to_le16(sizeof(*mac_reg) + S_DS_GEN); 812 cmd->size = cpu_to_le16(sizeof(*mac_reg) + S_DS_GEN);
796 mac_reg = (struct host_cmd_ds_mac_reg_access *) &cmd-> 813 mac_reg = &cmd->params.mac_reg;
797 params.mac_reg;
798 mac_reg->action = cpu_to_le16(cmd_action); 814 mac_reg->action = cpu_to_le16(cmd_action);
799 mac_reg->offset = 815 mac_reg->offset =
800 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset)); 816 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
@@ -806,8 +822,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
806 struct host_cmd_ds_bbp_reg_access *bbp_reg; 822 struct host_cmd_ds_bbp_reg_access *bbp_reg;
807 823
808 cmd->size = cpu_to_le16(sizeof(*bbp_reg) + S_DS_GEN); 824 cmd->size = cpu_to_le16(sizeof(*bbp_reg) + S_DS_GEN);
809 bbp_reg = (struct host_cmd_ds_bbp_reg_access *) 825 bbp_reg = &cmd->params.bbp_reg;
810 &cmd->params.bbp_reg;
811 bbp_reg->action = cpu_to_le16(cmd_action); 826 bbp_reg->action = cpu_to_le16(cmd_action);
812 bbp_reg->offset = 827 bbp_reg->offset =
813 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset)); 828 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
@@ -819,8 +834,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
819 struct host_cmd_ds_rf_reg_access *rf_reg; 834 struct host_cmd_ds_rf_reg_access *rf_reg;
820 835
821 cmd->size = cpu_to_le16(sizeof(*rf_reg) + S_DS_GEN); 836 cmd->size = cpu_to_le16(sizeof(*rf_reg) + S_DS_GEN);
822 rf_reg = (struct host_cmd_ds_rf_reg_access *) 837 rf_reg = &cmd->params.rf_reg;
823 &cmd->params.rf_reg;
824 rf_reg->action = cpu_to_le16(cmd_action); 838 rf_reg->action = cpu_to_le16(cmd_action);
825 rf_reg->offset = cpu_to_le16((u16) le32_to_cpu(reg_rw->offset)); 839 rf_reg->offset = cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
826 rf_reg->value = (u8) le32_to_cpu(reg_rw->value); 840 rf_reg->value = (u8) le32_to_cpu(reg_rw->value);
@@ -831,8 +845,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
831 struct host_cmd_ds_pmic_reg_access *pmic_reg; 845 struct host_cmd_ds_pmic_reg_access *pmic_reg;
832 846
833 cmd->size = cpu_to_le16(sizeof(*pmic_reg) + S_DS_GEN); 847 cmd->size = cpu_to_le16(sizeof(*pmic_reg) + S_DS_GEN);
834 pmic_reg = (struct host_cmd_ds_pmic_reg_access *) &cmd-> 848 pmic_reg = &cmd->params.pmic_reg;
835 params.pmic_reg;
836 pmic_reg->action = cpu_to_le16(cmd_action); 849 pmic_reg->action = cpu_to_le16(cmd_action);
837 pmic_reg->offset = 850 pmic_reg->offset =
838 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset)); 851 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
@@ -844,8 +857,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
844 struct host_cmd_ds_rf_reg_access *cau_reg; 857 struct host_cmd_ds_rf_reg_access *cau_reg;
845 858
846 cmd->size = cpu_to_le16(sizeof(*cau_reg) + S_DS_GEN); 859 cmd->size = cpu_to_le16(sizeof(*cau_reg) + S_DS_GEN);
847 cau_reg = (struct host_cmd_ds_rf_reg_access *) 860 cau_reg = &cmd->params.rf_reg;
848 &cmd->params.rf_reg;
849 cau_reg->action = cpu_to_le16(cmd_action); 861 cau_reg->action = cpu_to_le16(cmd_action);
850 cau_reg->offset = 862 cau_reg->offset =
851 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset)); 863 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
@@ -856,7 +868,6 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
856 { 868 {
857 struct mwifiex_ds_read_eeprom *rd_eeprom = data_buf; 869 struct mwifiex_ds_read_eeprom *rd_eeprom = data_buf;
858 struct host_cmd_ds_802_11_eeprom_access *cmd_eeprom = 870 struct host_cmd_ds_802_11_eeprom_access *cmd_eeprom =
859 (struct host_cmd_ds_802_11_eeprom_access *)
860 &cmd->params.eeprom; 871 &cmd->params.eeprom;
861 872
862 cmd->size = cpu_to_le16(sizeof(*cmd_eeprom) + S_DS_GEN); 873 cmd->size = cpu_to_le16(sizeof(*cmd_eeprom) + S_DS_GEN);
@@ -1055,6 +1066,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
1055 ret = mwifiex_cmd_tx_power_cfg(cmd_ptr, cmd_action, 1066 ret = mwifiex_cmd_tx_power_cfg(cmd_ptr, cmd_action,
1056 data_buf); 1067 data_buf);
1057 break; 1068 break;
1069 case HostCmd_CMD_RF_TX_PWR:
1070 ret = mwifiex_cmd_rf_tx_power(priv, cmd_ptr, cmd_action,
1071 data_buf);
1072 break;
1058 case HostCmd_CMD_802_11_PS_MODE_ENH: 1073 case HostCmd_CMD_802_11_PS_MODE_ENH:
1059 ret = mwifiex_cmd_enh_power_mode(priv, cmd_ptr, cmd_action, 1074 ret = mwifiex_cmd_enh_power_mode(priv, cmd_ptr, cmd_action,
1060 (uint16_t)cmd_oid, data_buf); 1075 (uint16_t)cmd_oid, data_buf);
@@ -1283,7 +1298,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1283 priv->data_rate = 0; 1298 priv->data_rate = 0;
1284 1299
1285 /* get tx power */ 1300 /* get tx power */
1286 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_TXPWR_CFG, 1301 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_RF_TX_PWR,
1287 HostCmd_ACT_GEN_GET, 0, NULL); 1302 HostCmd_ACT_GEN_GET, 0, NULL);
1288 if (ret) 1303 if (ret)
1289 return -1; 1304 return -1;
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index a79ed9bd969..78fc352c85c 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -227,7 +227,7 @@ static int mwifiex_ret_get_log(struct mwifiex_private *priv,
227 struct mwifiex_ds_get_stats *stats) 227 struct mwifiex_ds_get_stats *stats)
228{ 228{
229 struct host_cmd_ds_802_11_get_log *get_log = 229 struct host_cmd_ds_802_11_get_log *get_log =
230 (struct host_cmd_ds_802_11_get_log *) &resp->params.get_log; 230 &resp->params.get_log;
231 231
232 if (stats) { 232 if (stats) {
233 stats->mcast_tx_frame = le32_to_cpu(get_log->mcast_tx_frame); 233 stats->mcast_tx_frame = le32_to_cpu(get_log->mcast_tx_frame);
@@ -282,7 +282,7 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
282 u32 i; 282 u32 i;
283 int ret = 0; 283 int ret = 0;
284 284
285 tlv_buf = (u8 *) ((u8 *) rate_cfg) + 285 tlv_buf = ((u8 *)rate_cfg) +
286 sizeof(struct host_cmd_ds_tx_rate_cfg); 286 sizeof(struct host_cmd_ds_tx_rate_cfg);
287 tlv_buf_len = *(u16 *) (tlv_buf + sizeof(u16)); 287 tlv_buf_len = *(u16 *) (tlv_buf + sizeof(u16));
288 288
@@ -451,6 +451,30 @@ static int mwifiex_ret_tx_power_cfg(struct mwifiex_private *priv,
451} 451}
452 452
453/* 453/*
454 * This function handles the command response of get RF Tx power.
455 */
456static int mwifiex_ret_rf_tx_power(struct mwifiex_private *priv,
457 struct host_cmd_ds_command *resp)
458{
459 struct host_cmd_ds_rf_tx_pwr *txp = &resp->params.txp;
460 u16 action = le16_to_cpu(txp->action);
461
462 priv->tx_power_level = le16_to_cpu(txp->cur_level);
463
464 if (action == HostCmd_ACT_GEN_GET) {
465 priv->max_tx_power_level = txp->max_power;
466 priv->min_tx_power_level = txp->min_power;
467 }
468
469 dev_dbg(priv->adapter->dev,
470 "Current TxPower Level=%d, Max Power=%d, Min Power=%d\n",
471 priv->tx_power_level, priv->max_tx_power_level,
472 priv->min_tx_power_level);
473
474 return 0;
475}
476
477/*
454 * This function handles the command response of set/get MAC address. 478 * This function handles the command response of set/get MAC address.
455 * 479 *
456 * Handling includes saving the MAC address in driver. 480 * Handling includes saving the MAC address in driver.
@@ -679,39 +703,33 @@ static int mwifiex_ret_reg_access(u16 type, struct host_cmd_ds_command *resp,
679 eeprom = data_buf; 703 eeprom = data_buf;
680 switch (type) { 704 switch (type) {
681 case HostCmd_CMD_MAC_REG_ACCESS: 705 case HostCmd_CMD_MAC_REG_ACCESS:
682 r.mac = (struct host_cmd_ds_mac_reg_access *) 706 r.mac = &resp->params.mac_reg;
683 &resp->params.mac_reg;
684 reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.mac->offset)); 707 reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.mac->offset));
685 reg_rw->value = r.mac->value; 708 reg_rw->value = r.mac->value;
686 break; 709 break;
687 case HostCmd_CMD_BBP_REG_ACCESS: 710 case HostCmd_CMD_BBP_REG_ACCESS:
688 r.bbp = (struct host_cmd_ds_bbp_reg_access *) 711 r.bbp = &resp->params.bbp_reg;
689 &resp->params.bbp_reg;
690 reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.bbp->offset)); 712 reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.bbp->offset));
691 reg_rw->value = cpu_to_le32((u32) r.bbp->value); 713 reg_rw->value = cpu_to_le32((u32) r.bbp->value);
692 break; 714 break;
693 715
694 case HostCmd_CMD_RF_REG_ACCESS: 716 case HostCmd_CMD_RF_REG_ACCESS:
695 r.rf = (struct host_cmd_ds_rf_reg_access *) 717 r.rf = &resp->params.rf_reg;
696 &resp->params.rf_reg;
697 reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.rf->offset)); 718 reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.rf->offset));
698 reg_rw->value = cpu_to_le32((u32) r.bbp->value); 719 reg_rw->value = cpu_to_le32((u32) r.bbp->value);
699 break; 720 break;
700 case HostCmd_CMD_PMIC_REG_ACCESS: 721 case HostCmd_CMD_PMIC_REG_ACCESS:
701 r.pmic = (struct host_cmd_ds_pmic_reg_access *) 722 r.pmic = &resp->params.pmic_reg;
702 &resp->params.pmic_reg;
703 reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.pmic->offset)); 723 reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.pmic->offset));
704 reg_rw->value = cpu_to_le32((u32) r.pmic->value); 724 reg_rw->value = cpu_to_le32((u32) r.pmic->value);
705 break; 725 break;
706 case HostCmd_CMD_CAU_REG_ACCESS: 726 case HostCmd_CMD_CAU_REG_ACCESS:
707 r.rf = (struct host_cmd_ds_rf_reg_access *) 727 r.rf = &resp->params.rf_reg;
708 &resp->params.rf_reg;
709 reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.rf->offset)); 728 reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.rf->offset));
710 reg_rw->value = cpu_to_le32((u32) r.rf->value); 729 reg_rw->value = cpu_to_le32((u32) r.rf->value);
711 break; 730 break;
712 case HostCmd_CMD_802_11_EEPROM_ACCESS: 731 case HostCmd_CMD_802_11_EEPROM_ACCESS:
713 r.eeprom = (struct host_cmd_ds_802_11_eeprom_access *) 732 r.eeprom = &resp->params.eeprom;
714 &resp->params.eeprom;
715 pr_debug("info: EEPROM read len=%x\n", r.eeprom->byte_count); 733 pr_debug("info: EEPROM read len=%x\n", r.eeprom->byte_count);
716 if (le16_to_cpu(eeprom->byte_count) < 734 if (le16_to_cpu(eeprom->byte_count) <
717 le16_to_cpu(r.eeprom->byte_count)) { 735 le16_to_cpu(r.eeprom->byte_count)) {
@@ -787,7 +805,7 @@ static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
787 struct mwifiex_ds_misc_subsc_evt *sub_event) 805 struct mwifiex_ds_misc_subsc_evt *sub_event)
788{ 806{
789 struct host_cmd_ds_802_11_subsc_evt *cmd_sub_event = 807 struct host_cmd_ds_802_11_subsc_evt *cmd_sub_event =
790 (struct host_cmd_ds_802_11_subsc_evt *)&resp->params.subsc_evt; 808 &resp->params.subsc_evt;
791 809
792 /* For every subscribe event command (Get/Set/Clear), FW reports the 810 /* For every subscribe event command (Get/Set/Clear), FW reports the
793 * current set of subscribed events*/ 811 * current set of subscribed events*/
@@ -847,6 +865,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
847 case HostCmd_CMD_TXPWR_CFG: 865 case HostCmd_CMD_TXPWR_CFG:
848 ret = mwifiex_ret_tx_power_cfg(priv, resp); 866 ret = mwifiex_ret_tx_power_cfg(priv, resp);
849 break; 867 break;
868 case HostCmd_CMD_RF_TX_PWR:
869 ret = mwifiex_ret_rf_tx_power(priv, resp);
870 break;
850 case HostCmd_CMD_802_11_PS_MODE_ENH: 871 case HostCmd_CMD_802_11_PS_MODE_ENH:
851 ret = mwifiex_ret_enh_power_mode(priv, resp, data_buf); 872 ret = mwifiex_ret_enh_power_mode(priv, resp, data_buf);
852 break; 873 break;
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 11e731f3581..b8614a82546 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -422,7 +422,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
422 422
423 if (len != -1) { 423 if (len != -1) {
424 sinfo.filled = STATION_INFO_ASSOC_REQ_IES; 424 sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
425 sinfo.assoc_req_ies = (u8 *)&event->data[len]; 425 sinfo.assoc_req_ies = &event->data[len];
426 len = (u8 *)sinfo.assoc_req_ies - 426 len = (u8 *)sinfo.assoc_req_ies -
427 (u8 *)&event->frame_control; 427 (u8 *)&event->frame_control;
428 sinfo.assoc_req_ies_len = 428 sinfo.assoc_req_ies_len =
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 106c449477b..f2fd2423214 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -66,9 +66,6 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
66 dev_dbg(adapter->dev, "cmd pending\n"); 66 dev_dbg(adapter->dev, "cmd pending\n");
67 atomic_inc(&adapter->cmd_pending); 67 atomic_inc(&adapter->cmd_pending);
68 68
69 /* Status pending, wake up main process */
70 queue_work(adapter->workqueue, &adapter->main_work);
71
72 /* Wait for completion */ 69 /* Wait for completion */
73 wait_event_interruptible(adapter->cmd_wait_q.wait, 70 wait_event_interruptible(adapter->cmd_wait_q.wait,
74 *(cmd_queued->condition)); 71 *(cmd_queued->condition));
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 89f9a2a45de..f40e93fe894 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -26,6 +26,7 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
26 struct mwifiex_uap_bss_param *bss_config, 26 struct mwifiex_uap_bss_param *bss_config,
27 struct cfg80211_ap_settings *params) { 27 struct cfg80211_ap_settings *params) {
28 int i; 28 int i;
29 struct mwifiex_wep_key wep_key;
29 30
30 if (!params->privacy) { 31 if (!params->privacy) {
31 bss_config->protocol = PROTOCOL_NO_SECURITY; 32 bss_config->protocol = PROTOCOL_NO_SECURITY;
@@ -65,7 +66,7 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
65 } 66 }
66 if (params->crypto.wpa_versions & 67 if (params->crypto.wpa_versions &
67 NL80211_WPA_VERSION_2) { 68 NL80211_WPA_VERSION_2) {
68 bss_config->protocol = PROTOCOL_WPA2; 69 bss_config->protocol |= PROTOCOL_WPA2;
69 bss_config->key_mgmt = KEY_MGMT_EAP; 70 bss_config->key_mgmt = KEY_MGMT_EAP;
70 } 71 }
71 break; 72 break;
@@ -77,7 +78,7 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
77 } 78 }
78 if (params->crypto.wpa_versions & 79 if (params->crypto.wpa_versions &
79 NL80211_WPA_VERSION_2) { 80 NL80211_WPA_VERSION_2) {
80 bss_config->protocol = PROTOCOL_WPA2; 81 bss_config->protocol |= PROTOCOL_WPA2;
81 bss_config->key_mgmt = KEY_MGMT_PSK; 82 bss_config->key_mgmt = KEY_MGMT_PSK;
82 } 83 }
83 break; 84 break;
@@ -91,10 +92,19 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
91 case WLAN_CIPHER_SUITE_WEP104: 92 case WLAN_CIPHER_SUITE_WEP104:
92 break; 93 break;
93 case WLAN_CIPHER_SUITE_TKIP: 94 case WLAN_CIPHER_SUITE_TKIP:
94 bss_config->wpa_cfg.pairwise_cipher_wpa = CIPHER_TKIP; 95 if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1)
96 bss_config->wpa_cfg.pairwise_cipher_wpa |=
97 CIPHER_TKIP;
98 if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2)
99 bss_config->wpa_cfg.pairwise_cipher_wpa2 |=
100 CIPHER_TKIP;
95 break; 101 break;
96 case WLAN_CIPHER_SUITE_CCMP: 102 case WLAN_CIPHER_SUITE_CCMP:
97 bss_config->wpa_cfg.pairwise_cipher_wpa2 = 103 if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1)
104 bss_config->wpa_cfg.pairwise_cipher_wpa |=
105 CIPHER_AES_CCMP;
106 if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2)
107 bss_config->wpa_cfg.pairwise_cipher_wpa2 |=
98 CIPHER_AES_CCMP; 108 CIPHER_AES_CCMP;
99 default: 109 default:
100 break; 110 break;
@@ -104,6 +114,27 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
104 switch (params->crypto.cipher_group) { 114 switch (params->crypto.cipher_group) {
105 case WLAN_CIPHER_SUITE_WEP40: 115 case WLAN_CIPHER_SUITE_WEP40:
106 case WLAN_CIPHER_SUITE_WEP104: 116 case WLAN_CIPHER_SUITE_WEP104:
117 if (priv->sec_info.wep_enabled) {
118 bss_config->protocol = PROTOCOL_STATIC_WEP;
119 bss_config->key_mgmt = KEY_MGMT_NONE;
120 bss_config->wpa_cfg.length = 0;
121
122 for (i = 0; i < NUM_WEP_KEYS; i++) {
123 wep_key = priv->wep_key[i];
124 bss_config->wep_cfg[i].key_index = i;
125
126 if (priv->wep_key_curr_index == i)
127 bss_config->wep_cfg[i].is_default = 1;
128 else
129 bss_config->wep_cfg[i].is_default = 0;
130
131 bss_config->wep_cfg[i].length =
132 wep_key.key_length;
133 memcpy(&bss_config->wep_cfg[i].key,
134 &wep_key.key_material,
135 wep_key.key_length);
136 }
137 }
107 break; 138 break;
108 case WLAN_CIPHER_SUITE_TKIP: 139 case WLAN_CIPHER_SUITE_TKIP:
109 bss_config->wpa_cfg.group_cipher = CIPHER_TKIP; 140 bss_config->wpa_cfg.group_cipher = CIPHER_TKIP;
@@ -118,6 +149,33 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
118 return 0; 149 return 0;
119} 150}
120 151
152/* This function updates 11n related parameters from IE and sets them into
153 * bss_config structure.
154 */
155void
156mwifiex_set_ht_params(struct mwifiex_private *priv,
157 struct mwifiex_uap_bss_param *bss_cfg,
158 struct cfg80211_ap_settings *params)
159{
160 const u8 *ht_ie;
161
162 if (!ISSUPP_11NENABLED(priv->adapter->fw_cap_info))
163 return;
164
165 ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, params->beacon.tail,
166 params->beacon.tail_len);
167 if (ht_ie) {
168 memcpy(&bss_cfg->ht_cap, ht_ie + 2,
169 sizeof(struct ieee80211_ht_cap));
170 } else {
171 memset(&bss_cfg->ht_cap , 0, sizeof(struct ieee80211_ht_cap));
172 bss_cfg->ht_cap.cap_info = cpu_to_le16(MWIFIEX_DEF_HT_CAP);
173 bss_cfg->ht_cap.ampdu_params_info = MWIFIEX_DEF_AMPDU;
174 }
175
176 return;
177}
178
121/* This function initializes some of mwifiex_uap_bss_param variables. 179/* This function initializes some of mwifiex_uap_bss_param variables.
122 * This helps FW in ignoring invalid values. These values may or may not 180 * This helps FW in ignoring invalid values. These values may or may not
123 * be get updated to valid ones at later stage. 181 * be get updated to valid ones at later stage.
@@ -135,6 +193,120 @@ void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config)
135} 193}
136 194
137/* This function parses BSS related parameters from structure 195/* This function parses BSS related parameters from structure
196 * and prepares TLVs specific to WPA/WPA2 security.
197 * These TLVs are appended to command buffer.
198 */
199static void
200mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
201{
202 struct host_cmd_tlv_pwk_cipher *pwk_cipher;
203 struct host_cmd_tlv_gwk_cipher *gwk_cipher;
204 struct host_cmd_tlv_passphrase *passphrase;
205 struct host_cmd_tlv_akmp *tlv_akmp;
206 struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
207 u16 cmd_size = *param_size;
208 u8 *tlv = *tlv_buf;
209
210 tlv_akmp = (struct host_cmd_tlv_akmp *)tlv;
211 tlv_akmp->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AKMP);
212 tlv_akmp->tlv.len = cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) -
213 sizeof(struct host_cmd_tlv));
214 tlv_akmp->key_mgmt_operation = cpu_to_le16(bss_cfg->key_mgmt_operation);
215 tlv_akmp->key_mgmt = cpu_to_le16(bss_cfg->key_mgmt);
216 cmd_size += sizeof(struct host_cmd_tlv_akmp);
217 tlv += sizeof(struct host_cmd_tlv_akmp);
218
219 if (bss_cfg->wpa_cfg.pairwise_cipher_wpa & VALID_CIPHER_BITMAP) {
220 pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
221 pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
222 pwk_cipher->tlv.len =
223 cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
224 sizeof(struct host_cmd_tlv));
225 pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA);
226 pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa;
227 cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
228 tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
229 }
230
231 if (bss_cfg->wpa_cfg.pairwise_cipher_wpa2 & VALID_CIPHER_BITMAP) {
232 pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
233 pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
234 pwk_cipher->tlv.len =
235 cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
236 sizeof(struct host_cmd_tlv));
237 pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA2);
238 pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa2;
239 cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
240 tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
241 }
242
243 if (bss_cfg->wpa_cfg.group_cipher & VALID_CIPHER_BITMAP) {
244 gwk_cipher = (struct host_cmd_tlv_gwk_cipher *)tlv;
245 gwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER);
246 gwk_cipher->tlv.len =
247 cpu_to_le16(sizeof(struct host_cmd_tlv_gwk_cipher) -
248 sizeof(struct host_cmd_tlv));
249 gwk_cipher->cipher = bss_cfg->wpa_cfg.group_cipher;
250 cmd_size += sizeof(struct host_cmd_tlv_gwk_cipher);
251 tlv += sizeof(struct host_cmd_tlv_gwk_cipher);
252 }
253
254 if (bss_cfg->wpa_cfg.length) {
255 passphrase = (struct host_cmd_tlv_passphrase *)tlv;
256 passphrase->tlv.type = cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE);
257 passphrase->tlv.len = cpu_to_le16(bss_cfg->wpa_cfg.length);
258 memcpy(passphrase->passphrase, bss_cfg->wpa_cfg.passphrase,
259 bss_cfg->wpa_cfg.length);
260 cmd_size += sizeof(struct host_cmd_tlv) +
261 bss_cfg->wpa_cfg.length;
262 tlv += sizeof(struct host_cmd_tlv) + bss_cfg->wpa_cfg.length;
263 }
264
265 *param_size = cmd_size;
266 *tlv_buf = tlv;
267
268 return;
269}
270
271/* This function parses BSS related parameters from structure
272 * and prepares TLVs specific to WEP encryption.
273 * These TLVs are appended to command buffer.
274 */
275static void
276mwifiex_uap_bss_wep(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
277{
278 struct host_cmd_tlv_wep_key *wep_key;
279 u16 cmd_size = *param_size;
280 int i;
281 u8 *tlv = *tlv_buf;
282 struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
283
284 for (i = 0; i < NUM_WEP_KEYS; i++) {
285 if (bss_cfg->wep_cfg[i].length &&
286 (bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP40 ||
287 bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP104)) {
288 wep_key = (struct host_cmd_tlv_wep_key *)tlv;
289 wep_key->tlv.type = cpu_to_le16(TLV_TYPE_UAP_WEP_KEY);
290 wep_key->tlv.len =
291 cpu_to_le16(bss_cfg->wep_cfg[i].length + 2);
292 wep_key->key_index = bss_cfg->wep_cfg[i].key_index;
293 wep_key->is_default = bss_cfg->wep_cfg[i].is_default;
294 memcpy(wep_key->key, bss_cfg->wep_cfg[i].key,
295 bss_cfg->wep_cfg[i].length);
296 cmd_size += sizeof(struct host_cmd_tlv) + 2 +
297 bss_cfg->wep_cfg[i].length;
298 tlv += sizeof(struct host_cmd_tlv) + 2 +
299 bss_cfg->wep_cfg[i].length;
300 }
301 }
302
303 *param_size = cmd_size;
304 *tlv_buf = tlv;
305
306 return;
307}
308
309/* This function parses BSS related parameters from structure
138 * and prepares TLVs. These TLVs are appended to command buffer. 310 * and prepares TLVs. These TLVs are appended to command buffer.
139*/ 311*/
140static int 312static int
@@ -148,12 +320,9 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
148 struct host_cmd_tlv_frag_threshold *frag_threshold; 320 struct host_cmd_tlv_frag_threshold *frag_threshold;
149 struct host_cmd_tlv_rts_threshold *rts_threshold; 321 struct host_cmd_tlv_rts_threshold *rts_threshold;
150 struct host_cmd_tlv_retry_limit *retry_limit; 322 struct host_cmd_tlv_retry_limit *retry_limit;
151 struct host_cmd_tlv_pwk_cipher *pwk_cipher;
152 struct host_cmd_tlv_gwk_cipher *gwk_cipher;
153 struct host_cmd_tlv_encrypt_protocol *encrypt_protocol; 323 struct host_cmd_tlv_encrypt_protocol *encrypt_protocol;
154 struct host_cmd_tlv_auth_type *auth_type; 324 struct host_cmd_tlv_auth_type *auth_type;
155 struct host_cmd_tlv_passphrase *passphrase; 325 struct mwifiex_ie_types_htcap *htcap;
156 struct host_cmd_tlv_akmp *tlv_akmp;
157 struct mwifiex_uap_bss_param *bss_cfg = cmd_buf; 326 struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
158 u16 cmd_size = *param_size; 327 u16 cmd_size = *param_size;
159 328
@@ -243,70 +412,11 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
243 } 412 }
244 if ((bss_cfg->protocol & PROTOCOL_WPA) || 413 if ((bss_cfg->protocol & PROTOCOL_WPA) ||
245 (bss_cfg->protocol & PROTOCOL_WPA2) || 414 (bss_cfg->protocol & PROTOCOL_WPA2) ||
246 (bss_cfg->protocol & PROTOCOL_EAP)) { 415 (bss_cfg->protocol & PROTOCOL_EAP))
247 tlv_akmp = (struct host_cmd_tlv_akmp *)tlv; 416 mwifiex_uap_bss_wpa(&tlv, cmd_buf, &cmd_size);
248 tlv_akmp->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AKMP); 417 else
249 tlv_akmp->tlv.len = 418 mwifiex_uap_bss_wep(&tlv, cmd_buf, &cmd_size);
250 cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) - 419
251 sizeof(struct host_cmd_tlv));
252 tlv_akmp->key_mgmt_operation =
253 cpu_to_le16(bss_cfg->key_mgmt_operation);
254 tlv_akmp->key_mgmt = cpu_to_le16(bss_cfg->key_mgmt);
255 cmd_size += sizeof(struct host_cmd_tlv_akmp);
256 tlv += sizeof(struct host_cmd_tlv_akmp);
257
258 if (bss_cfg->wpa_cfg.pairwise_cipher_wpa &
259 VALID_CIPHER_BITMAP) {
260 pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
261 pwk_cipher->tlv.type =
262 cpu_to_le16(TLV_TYPE_PWK_CIPHER);
263 pwk_cipher->tlv.len = cpu_to_le16(
264 sizeof(struct host_cmd_tlv_pwk_cipher) -
265 sizeof(struct host_cmd_tlv));
266 pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA);
267 pwk_cipher->cipher =
268 bss_cfg->wpa_cfg.pairwise_cipher_wpa;
269 cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
270 tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
271 }
272 if (bss_cfg->wpa_cfg.pairwise_cipher_wpa2 &
273 VALID_CIPHER_BITMAP) {
274 pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
275 pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
276 pwk_cipher->tlv.len = cpu_to_le16(
277 sizeof(struct host_cmd_tlv_pwk_cipher) -
278 sizeof(struct host_cmd_tlv));
279 pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA2);
280 pwk_cipher->cipher =
281 bss_cfg->wpa_cfg.pairwise_cipher_wpa2;
282 cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
283 tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
284 }
285 if (bss_cfg->wpa_cfg.group_cipher & VALID_CIPHER_BITMAP) {
286 gwk_cipher = (struct host_cmd_tlv_gwk_cipher *)tlv;
287 gwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER);
288 gwk_cipher->tlv.len = cpu_to_le16(
289 sizeof(struct host_cmd_tlv_gwk_cipher) -
290 sizeof(struct host_cmd_tlv));
291 gwk_cipher->cipher = bss_cfg->wpa_cfg.group_cipher;
292 cmd_size += sizeof(struct host_cmd_tlv_gwk_cipher);
293 tlv += sizeof(struct host_cmd_tlv_gwk_cipher);
294 }
295 if (bss_cfg->wpa_cfg.length) {
296 passphrase = (struct host_cmd_tlv_passphrase *)tlv;
297 passphrase->tlv.type =
298 cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE);
299 passphrase->tlv.len =
300 cpu_to_le16(bss_cfg->wpa_cfg.length);
301 memcpy(passphrase->passphrase,
302 bss_cfg->wpa_cfg.passphrase,
303 bss_cfg->wpa_cfg.length);
304 cmd_size += sizeof(struct host_cmd_tlv) +
305 bss_cfg->wpa_cfg.length;
306 tlv += sizeof(struct host_cmd_tlv) +
307 bss_cfg->wpa_cfg.length;
308 }
309 }
310 if ((bss_cfg->auth_mode <= WLAN_AUTH_SHARED_KEY) || 420 if ((bss_cfg->auth_mode <= WLAN_AUTH_SHARED_KEY) ||
311 (bss_cfg->auth_mode == MWIFIEX_AUTH_MODE_AUTO)) { 421 (bss_cfg->auth_mode == MWIFIEX_AUTH_MODE_AUTO)) {
312 auth_type = (struct host_cmd_tlv_auth_type *)tlv; 422 auth_type = (struct host_cmd_tlv_auth_type *)tlv;
@@ -330,6 +440,25 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
330 tlv += sizeof(struct host_cmd_tlv_encrypt_protocol); 440 tlv += sizeof(struct host_cmd_tlv_encrypt_protocol);
331 } 441 }
332 442
443 if (bss_cfg->ht_cap.cap_info) {
444 htcap = (struct mwifiex_ie_types_htcap *)tlv;
445 htcap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
446 htcap->header.len =
447 cpu_to_le16(sizeof(struct ieee80211_ht_cap));
448 htcap->ht_cap.cap_info = bss_cfg->ht_cap.cap_info;
449 htcap->ht_cap.ampdu_params_info =
450 bss_cfg->ht_cap.ampdu_params_info;
451 memcpy(&htcap->ht_cap.mcs, &bss_cfg->ht_cap.mcs,
452 sizeof(struct ieee80211_mcs_info));
453 htcap->ht_cap.extended_ht_cap_info =
454 bss_cfg->ht_cap.extended_ht_cap_info;
455 htcap->ht_cap.tx_BF_cap_info = bss_cfg->ht_cap.tx_BF_cap_info;
456 htcap->ht_cap.antenna_selection_info =
457 bss_cfg->ht_cap.antenna_selection_info;
458 cmd_size += sizeof(struct mwifiex_ie_types_htcap);
459 tlv += sizeof(struct mwifiex_ie_types_htcap);
460 }
461
333 *param_size = cmd_size; 462 *param_size = cmd_size;
334 463
335 return 0; 464 return 0;
@@ -421,33 +550,3 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
421 550
422 return 0; 551 return 0;
423} 552}
424
425/* This function sets the RF channel for AP.
426 *
427 * This function populates channel information in AP config structure
428 * and sends command to configure channel information in AP.
429 */
430int mwifiex_uap_set_channel(struct mwifiex_private *priv, int channel)
431{
432 struct mwifiex_uap_bss_param *bss_cfg;
433 struct wiphy *wiphy = priv->wdev->wiphy;
434
435 bss_cfg = kzalloc(sizeof(struct mwifiex_uap_bss_param), GFP_KERNEL);
436 if (!bss_cfg)
437 return -ENOMEM;
438
439 mwifiex_set_sys_config_invalid_data(bss_cfg);
440 bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
441 bss_cfg->channel = channel;
442
443 if (mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_SYS_CONFIG,
444 HostCmd_ACT_GEN_SET,
445 UAP_BSS_PARAMS_I, bss_cfg)) {
446 wiphy_err(wiphy, "Failed to set the uAP channel\n");
447 kfree(bss_cfg);
448 return -1;
449 }
450
451 kfree(bss_cfg);
452 return 0;
453}
diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c
index f7b15b8934f..e15675585fb 100644
--- a/drivers/net/wireless/orinoco/cfg.c
+++ b/drivers/net/wireless/orinoco/cfg.c
@@ -160,10 +160,9 @@ static int orinoco_scan(struct wiphy *wiphy, struct net_device *dev,
160 return err; 160 return err;
161} 161}
162 162
163static int orinoco_set_channel(struct wiphy *wiphy, 163static int orinoco_set_monitor_channel(struct wiphy *wiphy,
164 struct net_device *netdev, 164 struct ieee80211_channel *chan,
165 struct ieee80211_channel *chan, 165 enum nl80211_channel_type channel_type)
166 enum nl80211_channel_type channel_type)
167{ 166{
168 struct orinoco_private *priv = wiphy_priv(wiphy); 167 struct orinoco_private *priv = wiphy_priv(wiphy);
169 int err = 0; 168 int err = 0;
@@ -286,7 +285,7 @@ static int orinoco_set_wiphy_params(struct wiphy *wiphy, u32 changed)
286 285
287const struct cfg80211_ops orinoco_cfg_ops = { 286const struct cfg80211_ops orinoco_cfg_ops = {
288 .change_virtual_intf = orinoco_change_vif, 287 .change_virtual_intf = orinoco_change_vif,
289 .set_channel = orinoco_set_channel, 288 .set_monitor_channel = orinoco_set_monitor_channel,
290 .scan = orinoco_scan, 289 .scan = orinoco_scan,
291 .set_wiphy_params = orinoco_set_wiphy_params, 290 .set_wiphy_params = orinoco_set_wiphy_params,
292}; 291};
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index fa8ce510478..636daf2860c 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -905,7 +905,7 @@ int p54_read_eeprom(struct ieee80211_hw *dev)
905 905
906 while (eeprom_size) { 906 while (eeprom_size) {
907 blocksize = min(eeprom_size, maxblocksize); 907 blocksize = min(eeprom_size, maxblocksize);
908 ret = p54_download_eeprom(priv, (void *) (eeprom + offset), 908 ret = p54_download_eeprom(priv, eeprom + offset,
909 offset, blocksize); 909 offset, blocksize);
910 if (unlikely(ret)) 910 if (unlikely(ret))
911 goto free; 911 goto free;
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 18e82b31afa..9ba85106eec 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -478,7 +478,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
478 478
479 if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) { 479 if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) {
480 memcpy(&body->longbow.curve_data, 480 memcpy(&body->longbow.curve_data,
481 (void *) entry + sizeof(__le16), 481 entry + sizeof(__le16),
482 priv->curve_data->entry_size); 482 priv->curve_data->entry_size);
483 } else { 483 } else {
484 struct p54_scan_body *chan = &body->normal; 484 struct p54_scan_body *chan = &body->normal;
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 266d45bf86f..799e148d037 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -455,7 +455,7 @@ islpci_eth_receive(islpci_private *priv)
455 "Error mapping DMA address\n"); 455 "Error mapping DMA address\n");
456 456
457 /* free the skbuf structure before aborting */ 457 /* free the skbuf structure before aborting */
458 dev_kfree_skb_irq((struct sk_buff *) skb); 458 dev_kfree_skb_irq(skb);
459 skb = NULL; 459 skb = NULL;
460 break; 460 break;
461 } 461 }
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 86a738bf591..598ca1cafb9 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -1849,7 +1849,7 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
1849 pr_debug("ray_cs: interrupt for *dev=%p\n", dev); 1849 pr_debug("ray_cs: interrupt for *dev=%p\n", dev);
1850 1850
1851 local = netdev_priv(dev); 1851 local = netdev_priv(dev);
1852 link = (struct pcmcia_device *)local->finder; 1852 link = local->finder;
1853 if (!pcmcia_dev_present(link)) { 1853 if (!pcmcia_dev_present(link)) {
1854 pr_debug( 1854 pr_debug(
1855 "ray_cs interrupt from device not present or suspended.\n"); 1855 "ray_cs interrupt from device not present or suspended.\n");
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 299c3879582..c7548da6573 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -99,6 +99,14 @@ config RT2800PCI_RT53XX
99 rt2800pci driver. 99 rt2800pci driver.
100 Supported chips: RT5390 100 Supported chips: RT5390
101 101
102config RT2800PCI_RT3290
103 bool "rt2800pci - Include support for rt3290 devices (EXPERIMENTAL)"
104 depends on EXPERIMENTAL
105 default y
106 ---help---
107 This adds support for rt3290 wireless chipset family to the
108 rt2800pci driver.
109 Supported chips: RT3290
102endif 110endif
103 111
104config RT2500USB 112config RT2500USB
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 9348521e083..e252e9bafd0 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -51,6 +51,7 @@
51 * RF3320 2.4G 1T1R(RT3350/RT3370/RT3390) 51 * RF3320 2.4G 1T1R(RT3350/RT3370/RT3390)
52 * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392) 52 * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
53 * RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662) 53 * RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
54 * RF5360 2.4G 1T1R
54 * RF5370 2.4G 1T1R 55 * RF5370 2.4G 1T1R
55 * RF5390 2.4G 1T1R 56 * RF5390 2.4G 1T1R
56 */ 57 */
@@ -67,9 +68,12 @@
67#define RF3320 0x000b 68#define RF3320 0x000b
68#define RF3322 0x000c 69#define RF3322 0x000c
69#define RF3053 0x000d 70#define RF3053 0x000d
71#define RF3290 0x3290
72#define RF5360 0x5360
70#define RF5370 0x5370 73#define RF5370 0x5370
71#define RF5372 0x5372 74#define RF5372 0x5372
72#define RF5390 0x5390 75#define RF5390 0x5390
76#define RF5392 0x5392
73 77
74/* 78/*
75 * Chipset revisions. 79 * Chipset revisions.
@@ -114,6 +118,12 @@
114 * Registers. 118 * Registers.
115 */ 119 */
116 120
121
122/*
123 * MAC_CSR0_3290: MAC_CSR0 for RT3290 to identity MAC version number.
124 */
125#define MAC_CSR0_3290 0x0000
126
117/* 127/*
118 * E2PROM_CSR: PCI EEPROM control register. 128 * E2PROM_CSR: PCI EEPROM control register.
119 * RELOAD: Write 1 to reload eeprom content. 129 * RELOAD: Write 1 to reload eeprom content.
@@ -130,6 +140,150 @@
130#define E2PROM_CSR_RELOAD FIELD32(0x00000080) 140#define E2PROM_CSR_RELOAD FIELD32(0x00000080)
131 141
132/* 142/*
143 * CMB_CTRL_CFG
144 */
145#define CMB_CTRL 0x0020
146#define AUX_OPT_BIT0 FIELD32(0x00000001)
147#define AUX_OPT_BIT1 FIELD32(0x00000002)
148#define AUX_OPT_BIT2 FIELD32(0x00000004)
149#define AUX_OPT_BIT3 FIELD32(0x00000008)
150#define AUX_OPT_BIT4 FIELD32(0x00000010)
151#define AUX_OPT_BIT5 FIELD32(0x00000020)
152#define AUX_OPT_BIT6 FIELD32(0x00000040)
153#define AUX_OPT_BIT7 FIELD32(0x00000080)
154#define AUX_OPT_BIT8 FIELD32(0x00000100)
155#define AUX_OPT_BIT9 FIELD32(0x00000200)
156#define AUX_OPT_BIT10 FIELD32(0x00000400)
157#define AUX_OPT_BIT11 FIELD32(0x00000800)
158#define AUX_OPT_BIT12 FIELD32(0x00001000)
159#define AUX_OPT_BIT13 FIELD32(0x00002000)
160#define AUX_OPT_BIT14 FIELD32(0x00004000)
161#define AUX_OPT_BIT15 FIELD32(0x00008000)
162#define LDO25_LEVEL FIELD32(0x00030000)
163#define LDO25_LARGEA FIELD32(0x00040000)
164#define LDO25_FRC_ON FIELD32(0x00080000)
165#define CMB_RSV FIELD32(0x00300000)
166#define XTAL_RDY FIELD32(0x00400000)
167#define PLL_LD FIELD32(0x00800000)
168#define LDO_CORE_LEVEL FIELD32(0x0F000000)
169#define LDO_BGSEL FIELD32(0x30000000)
170#define LDO3_EN FIELD32(0x40000000)
171#define LDO0_EN FIELD32(0x80000000)
172
173/*
174 * EFUSE_CSR_3290: RT3290 EEPROM
175 */
176#define EFUSE_CTRL_3290 0x0024
177
178/*
179 * EFUSE_DATA3 of 3290
180 */
181#define EFUSE_DATA3_3290 0x0028
182
183/*
184 * EFUSE_DATA2 of 3290
185 */
186#define EFUSE_DATA2_3290 0x002c
187
188/*
189 * EFUSE_DATA1 of 3290
190 */
191#define EFUSE_DATA1_3290 0x0030
192
193/*
194 * EFUSE_DATA0 of 3290
195 */
196#define EFUSE_DATA0_3290 0x0034
197
198/*
199 * OSC_CTRL_CFG
200 * Ring oscillator configuration
201 */
202#define OSC_CTRL 0x0038
203#define OSC_REF_CYCLE FIELD32(0x00001fff)
204#define OSC_RSV FIELD32(0x0000e000)
205#define OSC_CAL_CNT FIELD32(0x0fff0000)
206#define OSC_CAL_ACK FIELD32(0x10000000)
207#define OSC_CLK_32K_VLD FIELD32(0x20000000)
208#define OSC_CAL_REQ FIELD32(0x40000000)
209#define OSC_ROSC_EN FIELD32(0x80000000)
210
211/*
212 * COEX_CFG_0
213 */
214#define COEX_CFG0 0x0040
215#define COEX_CFG_ANT FIELD32(0xff000000)
216/*
217 * COEX_CFG_1
218 */
219#define COEX_CFG1 0x0044
220
221/*
222 * COEX_CFG_2
223 */
224#define COEX_CFG2 0x0048
225#define BT_COEX_CFG1 FIELD32(0xff000000)
226#define BT_COEX_CFG0 FIELD32(0x00ff0000)
227#define WL_COEX_CFG1 FIELD32(0x0000ff00)
228#define WL_COEX_CFG0 FIELD32(0x000000ff)
229/*
230 * PLL_CTRL_CFG
231 * PLL configuration register
232 */
233#define PLL_CTRL 0x0050
234#define PLL_RESERVED_INPUT1 FIELD32(0x000000ff)
235#define PLL_RESERVED_INPUT2 FIELD32(0x0000ff00)
236#define PLL_CONTROL FIELD32(0x00070000)
237#define PLL_LPF_R1 FIELD32(0x00080000)
238#define PLL_LPF_C1_CTRL FIELD32(0x00300000)
239#define PLL_LPF_C2_CTRL FIELD32(0x00c00000)
240#define PLL_CP_CURRENT_CTRL FIELD32(0x03000000)
241#define PLL_PFD_DELAY_CTRL FIELD32(0x0c000000)
242#define PLL_LOCK_CTRL FIELD32(0x70000000)
243#define PLL_VBGBK_EN FIELD32(0x80000000)
244
245
246/*
247 * WLAN_CTRL_CFG
248 * RT3290 wlan configuration
249 */
250#define WLAN_FUN_CTRL 0x0080
251#define WLAN_EN FIELD32(0x00000001)
252#define WLAN_CLK_EN FIELD32(0x00000002)
253#define WLAN_RSV1 FIELD32(0x00000004)
254#define WLAN_RESET FIELD32(0x00000008)
255#define PCIE_APP0_CLK_REQ FIELD32(0x00000010)
256#define FRC_WL_ANT_SET FIELD32(0x00000020)
257#define INV_TR_SW0 FIELD32(0x00000040)
258#define WLAN_GPIO_IN_BIT0 FIELD32(0x00000100)
259#define WLAN_GPIO_IN_BIT1 FIELD32(0x00000200)
260#define WLAN_GPIO_IN_BIT2 FIELD32(0x00000400)
261#define WLAN_GPIO_IN_BIT3 FIELD32(0x00000800)
262#define WLAN_GPIO_IN_BIT4 FIELD32(0x00001000)
263#define WLAN_GPIO_IN_BIT5 FIELD32(0x00002000)
264#define WLAN_GPIO_IN_BIT6 FIELD32(0x00004000)
265#define WLAN_GPIO_IN_BIT7 FIELD32(0x00008000)
266#define WLAN_GPIO_IN_BIT_ALL FIELD32(0x0000ff00)
267#define WLAN_GPIO_OUT_BIT0 FIELD32(0x00010000)
268#define WLAN_GPIO_OUT_BIT1 FIELD32(0x00020000)
269#define WLAN_GPIO_OUT_BIT2 FIELD32(0x00040000)
270#define WLAN_GPIO_OUT_BIT3 FIELD32(0x00050000)
271#define WLAN_GPIO_OUT_BIT4 FIELD32(0x00100000)
272#define WLAN_GPIO_OUT_BIT5 FIELD32(0x00200000)
273#define WLAN_GPIO_OUT_BIT6 FIELD32(0x00400000)
274#define WLAN_GPIO_OUT_BIT7 FIELD32(0x00800000)
275#define WLAN_GPIO_OUT_BIT_ALL FIELD32(0x00ff0000)
276#define WLAN_GPIO_OUT_OE_BIT0 FIELD32(0x01000000)
277#define WLAN_GPIO_OUT_OE_BIT1 FIELD32(0x02000000)
278#define WLAN_GPIO_OUT_OE_BIT2 FIELD32(0x04000000)
279#define WLAN_GPIO_OUT_OE_BIT3 FIELD32(0x08000000)
280#define WLAN_GPIO_OUT_OE_BIT4 FIELD32(0x10000000)
281#define WLAN_GPIO_OUT_OE_BIT5 FIELD32(0x20000000)
282#define WLAN_GPIO_OUT_OE_BIT6 FIELD32(0x40000000)
283#define WLAN_GPIO_OUT_OE_BIT7 FIELD32(0x80000000)
284#define WLAN_GPIO_OUT_OE_BIT_ALL FIELD32(0xff000000)
285
286/*
133 * AUX_CTRL: Aux/PCI-E related configuration 287 * AUX_CTRL: Aux/PCI-E related configuration
134 */ 288 */
135#define AUX_CTRL 0x10c 289#define AUX_CTRL 0x10c
@@ -1760,9 +1914,11 @@ struct mac_iveiv_entry {
1760/* 1914/*
1761 * BBP 3: RX Antenna 1915 * BBP 3: RX Antenna
1762 */ 1916 */
1763#define BBP3_RX_ADC FIELD8(0x03) 1917#define BBP3_RX_ADC FIELD8(0x03)
1764#define BBP3_RX_ANTENNA FIELD8(0x18) 1918#define BBP3_RX_ANTENNA FIELD8(0x18)
1765#define BBP3_HT40_MINUS FIELD8(0x20) 1919#define BBP3_HT40_MINUS FIELD8(0x20)
1920#define BBP3_ADC_MODE_SWITCH FIELD8(0x40)
1921#define BBP3_ADC_INIT_MODE FIELD8(0x80)
1766 1922
1767/* 1923/*
1768 * BBP 4: Bandwidth 1924 * BBP 4: Bandwidth
@@ -1772,6 +1928,14 @@ struct mac_iveiv_entry {
1772#define BBP4_MAC_IF_CTRL FIELD8(0x40) 1928#define BBP4_MAC_IF_CTRL FIELD8(0x40)
1773 1929
1774/* 1930/*
1931 * BBP 47: Bandwidth
1932 */
1933#define BBP47_TSSI_REPORT_SEL FIELD8(0x03)
1934#define BBP47_TSSI_UPDATE_REQ FIELD8(0x04)
1935#define BBP47_TSSI_TSSI_MODE FIELD8(0x18)
1936#define BBP47_TSSI_ADC6 FIELD8(0x80)
1937
1938/*
1775 * BBP 109 1939 * BBP 109
1776 */ 1940 */
1777#define BBP109_TX0_POWER FIELD8(0x0f) 1941#define BBP109_TX0_POWER FIELD8(0x0f)
@@ -1914,6 +2078,16 @@ struct mac_iveiv_entry {
1914#define RFCSR27_R4 FIELD8(0x40) 2078#define RFCSR27_R4 FIELD8(0x40)
1915 2079
1916/* 2080/*
2081 * RFCSR 29:
2082 */
2083#define RFCSR29_ADC6_TEST FIELD8(0x01)
2084#define RFCSR29_ADC6_INT_TEST FIELD8(0x02)
2085#define RFCSR29_RSSI_RESET FIELD8(0x04)
2086#define RFCSR29_RSSI_ON FIELD8(0x08)
2087#define RFCSR29_RSSI_RIP_CTRL FIELD8(0x30)
2088#define RFCSR29_RSSI_GAIN FIELD8(0xc0)
2089
2090/*
1917 * RFCSR 30: 2091 * RFCSR 30:
1918 */ 2092 */
1919#define RFCSR30_TX_H20M FIELD8(0x02) 2093#define RFCSR30_TX_H20M FIELD8(0x02)
@@ -1944,6 +2118,11 @@ struct mac_iveiv_entry {
1944#define RFCSR49_TX FIELD8(0x3f) 2118#define RFCSR49_TX FIELD8(0x3f)
1945 2119
1946/* 2120/*
2121 * RFCSR 50:
2122 */
2123#define RFCSR50_TX FIELD8(0x3f)
2124
2125/*
1947 * RF registers 2126 * RF registers
1948 */ 2127 */
1949 2128
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index dfc90d34be6..068276ee8af 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -354,16 +354,15 @@ int rt2800_check_firmware(struct rt2x00_dev *rt2x00dev,
354 * of 4kb. Certain USB chipsets however require different firmware, 354 * of 4kb. Certain USB chipsets however require different firmware,
355 * which Ralink only provides attached to the original firmware 355 * which Ralink only provides attached to the original firmware
356 * file. Thus for USB devices, firmware files have a length 356 * file. Thus for USB devices, firmware files have a length
357 * which is a multiple of 4kb. 357 * which is a multiple of 4kb. The firmware for rt3290 chip also
358 * have a length which is a multiple of 4kb.
358 */ 359 */
359 if (rt2x00_is_usb(rt2x00dev)) { 360 if (rt2x00_is_usb(rt2x00dev) || rt2x00_rt(rt2x00dev, RT3290))
360 fw_len = 4096; 361 fw_len = 4096;
361 multiple = true; 362 else
362 } else {
363 fw_len = 8192; 363 fw_len = 8192;
364 multiple = true;
365 }
366 364
365 multiple = true;
367 /* 366 /*
368 * Validate the firmware length 367 * Validate the firmware length
369 */ 368 */
@@ -415,7 +414,8 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
415 return -EBUSY; 414 return -EBUSY;
416 415
417 if (rt2x00_is_pci(rt2x00dev)) { 416 if (rt2x00_is_pci(rt2x00dev)) {
418 if (rt2x00_rt(rt2x00dev, RT3572) || 417 if (rt2x00_rt(rt2x00dev, RT3290) ||
418 rt2x00_rt(rt2x00dev, RT3572) ||
419 rt2x00_rt(rt2x00dev, RT5390) || 419 rt2x00_rt(rt2x00dev, RT5390) ||
420 rt2x00_rt(rt2x00dev, RT5392)) { 420 rt2x00_rt(rt2x00dev, RT5392)) {
421 rt2800_register_read(rt2x00dev, AUX_CTRL, &reg); 421 rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
@@ -851,8 +851,13 @@ int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev)
851{ 851{
852 u32 reg; 852 u32 reg;
853 853
854 rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg); 854 if (rt2x00_rt(rt2x00dev, RT3290)) {
855 return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2); 855 rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
856 return rt2x00_get_field32(reg, WLAN_GPIO_IN_BIT0);
857 } else {
858 rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
859 return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2);
860 }
856} 861}
857EXPORT_SYMBOL_GPL(rt2800_rfkill_poll); 862EXPORT_SYMBOL_GPL(rt2800_rfkill_poll);
858 863
@@ -1935,9 +1940,54 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
1935 rt2800_rfcsr_write(rt2x00dev, 7, rfcsr); 1940 rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
1936} 1941}
1937 1942
1943#define RT3290_POWER_BOUND 0x27
1944#define RT3290_FREQ_OFFSET_BOUND 0x5f
1938#define RT5390_POWER_BOUND 0x27 1945#define RT5390_POWER_BOUND 0x27
1939#define RT5390_FREQ_OFFSET_BOUND 0x5f 1946#define RT5390_FREQ_OFFSET_BOUND 0x5f
1940 1947
1948static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev,
1949 struct ieee80211_conf *conf,
1950 struct rf_channel *rf,
1951 struct channel_info *info)
1952{
1953 u8 rfcsr;
1954
1955 rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
1956 rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
1957 rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
1958 rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf2);
1959 rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
1960
1961 rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
1962 if (info->default_power1 > RT3290_POWER_BOUND)
1963 rt2x00_set_field8(&rfcsr, RFCSR49_TX, RT3290_POWER_BOUND);
1964 else
1965 rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
1966 rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
1967
1968 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
1969 if (rt2x00dev->freq_offset > RT3290_FREQ_OFFSET_BOUND)
1970 rt2x00_set_field8(&rfcsr, RFCSR17_CODE,
1971 RT3290_FREQ_OFFSET_BOUND);
1972 else
1973 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
1974 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
1975
1976 if (rf->channel <= 14) {
1977 if (rf->channel == 6)
1978 rt2800_bbp_write(rt2x00dev, 68, 0x0c);
1979 else
1980 rt2800_bbp_write(rt2x00dev, 68, 0x0b);
1981
1982 if (rf->channel >= 1 && rf->channel <= 6)
1983 rt2800_bbp_write(rt2x00dev, 59, 0x0f);
1984 else if (rf->channel >= 7 && rf->channel <= 11)
1985 rt2800_bbp_write(rt2x00dev, 59, 0x0e);
1986 else if (rf->channel >= 12 && rf->channel <= 14)
1987 rt2800_bbp_write(rt2x00dev, 59, 0x0d);
1988 }
1989}
1990
1941static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev, 1991static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
1942 struct ieee80211_conf *conf, 1992 struct ieee80211_conf *conf,
1943 struct rf_channel *rf, 1993 struct rf_channel *rf,
@@ -1958,7 +2008,22 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
1958 rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1); 2008 rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
1959 rt2800_rfcsr_write(rt2x00dev, 49, rfcsr); 2009 rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
1960 2010
2011 if (rt2x00_rt(rt2x00dev, RT5392)) {
2012 rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
2013 if (info->default_power1 > RT5390_POWER_BOUND)
2014 rt2x00_set_field8(&rfcsr, RFCSR50_TX,
2015 RT5390_POWER_BOUND);
2016 else
2017 rt2x00_set_field8(&rfcsr, RFCSR50_TX,
2018 info->default_power2);
2019 rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
2020 }
2021
1961 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr); 2022 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
2023 if (rt2x00_rt(rt2x00dev, RT5392)) {
2024 rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
2025 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
2026 }
1962 rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1); 2027 rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
1963 rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1); 2028 rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
1964 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1); 2029 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
@@ -2021,15 +2086,6 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
2021 } 2086 }
2022 } 2087 }
2023 } 2088 }
2024
2025 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
2026 rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0);
2027 rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0);
2028 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
2029
2030 rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
2031 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
2032 rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
2033} 2089}
2034 2090
2035static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, 2091static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
@@ -2039,7 +2095,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2039{ 2095{
2040 u32 reg; 2096 u32 reg;
2041 unsigned int tx_pin; 2097 unsigned int tx_pin;
2042 u8 bbp; 2098 u8 bbp, rfcsr;
2043 2099
2044 if (rf->channel <= 14) { 2100 if (rf->channel <= 14) {
2045 info->default_power1 = TXPOWER_G_TO_DEV(info->default_power1); 2101 info->default_power1 = TXPOWER_G_TO_DEV(info->default_power1);
@@ -2060,15 +2116,36 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2060 case RF3052: 2116 case RF3052:
2061 rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info); 2117 rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info);
2062 break; 2118 break;
2119 case RF3290:
2120 rt2800_config_channel_rf3290(rt2x00dev, conf, rf, info);
2121 break;
2122 case RF5360:
2063 case RF5370: 2123 case RF5370:
2064 case RF5372: 2124 case RF5372:
2065 case RF5390: 2125 case RF5390:
2126 case RF5392:
2066 rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info); 2127 rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
2067 break; 2128 break;
2068 default: 2129 default:
2069 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info); 2130 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
2070 } 2131 }
2071 2132
2133 if (rt2x00_rf(rt2x00dev, RF3290) ||
2134 rt2x00_rf(rt2x00dev, RF5360) ||
2135 rt2x00_rf(rt2x00dev, RF5370) ||
2136 rt2x00_rf(rt2x00dev, RF5372) ||
2137 rt2x00_rf(rt2x00dev, RF5390) ||
2138 rt2x00_rf(rt2x00dev, RF5392)) {
2139 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
2140 rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0);
2141 rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0);
2142 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
2143
2144 rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
2145 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
2146 rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
2147 }
2148
2072 /* 2149 /*
2073 * Change BBP settings 2150 * Change BBP settings
2074 */ 2151 */
@@ -2549,9 +2626,12 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
2549 rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1); 2626 rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
2550 rt2800_rfcsr_write(rt2x00dev, 7, rfcsr); 2627 rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
2551 break; 2628 break;
2629 case RF3290:
2630 case RF5360:
2552 case RF5370: 2631 case RF5370:
2553 case RF5372: 2632 case RF5372:
2554 case RF5390: 2633 case RF5390:
2634 case RF5392:
2555 rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr); 2635 rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
2556 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1); 2636 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
2557 rt2800_rfcsr_write(rt2x00dev, 3, rfcsr); 2637 rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
@@ -2682,6 +2762,7 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
2682 if (rt2x00_rt(rt2x00dev, RT3070) || 2762 if (rt2x00_rt(rt2x00dev, RT3070) ||
2683 rt2x00_rt(rt2x00dev, RT3071) || 2763 rt2x00_rt(rt2x00dev, RT3071) ||
2684 rt2x00_rt(rt2x00dev, RT3090) || 2764 rt2x00_rt(rt2x00dev, RT3090) ||
2765 rt2x00_rt(rt2x00dev, RT3290) ||
2685 rt2x00_rt(rt2x00dev, RT3390) || 2766 rt2x00_rt(rt2x00dev, RT3390) ||
2686 rt2x00_rt(rt2x00dev, RT5390) || 2767 rt2x00_rt(rt2x00dev, RT5390) ||
2687 rt2x00_rt(rt2x00dev, RT5392)) 2768 rt2x00_rt(rt2x00dev, RT5392))
@@ -2778,10 +2859,54 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2778 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2); 2859 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
2779 rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg); 2860 rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
2780 2861
2862 if (rt2x00_rt(rt2x00dev, RT3290)) {
2863 rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
2864 if (rt2x00_get_field32(reg, WLAN_EN) == 1) {
2865 rt2x00_set_field32(&reg, PCIE_APP0_CLK_REQ, 1);
2866 rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
2867 }
2868
2869 rt2800_register_read(rt2x00dev, CMB_CTRL, &reg);
2870 if (!(rt2x00_get_field32(reg, LDO0_EN) == 1)) {
2871 rt2x00_set_field32(&reg, LDO0_EN, 1);
2872 rt2x00_set_field32(&reg, LDO_BGSEL, 3);
2873 rt2800_register_write(rt2x00dev, CMB_CTRL, reg);
2874 }
2875
2876 rt2800_register_read(rt2x00dev, OSC_CTRL, &reg);
2877 rt2x00_set_field32(&reg, OSC_ROSC_EN, 1);
2878 rt2x00_set_field32(&reg, OSC_CAL_REQ, 1);
2879 rt2x00_set_field32(&reg, OSC_REF_CYCLE, 0x27);
2880 rt2800_register_write(rt2x00dev, OSC_CTRL, reg);
2881
2882 rt2800_register_read(rt2x00dev, COEX_CFG0, &reg);
2883 rt2x00_set_field32(&reg, COEX_CFG_ANT, 0x5e);
2884 rt2800_register_write(rt2x00dev, COEX_CFG0, reg);
2885
2886 rt2800_register_read(rt2x00dev, COEX_CFG2, &reg);
2887 rt2x00_set_field32(&reg, BT_COEX_CFG1, 0x00);
2888 rt2x00_set_field32(&reg, BT_COEX_CFG0, 0x17);
2889 rt2x00_set_field32(&reg, WL_COEX_CFG1, 0x93);
2890 rt2x00_set_field32(&reg, WL_COEX_CFG0, 0x7f);
2891 rt2800_register_write(rt2x00dev, COEX_CFG2, reg);
2892
2893 rt2800_register_read(rt2x00dev, PLL_CTRL, &reg);
2894 rt2x00_set_field32(&reg, PLL_CONTROL, 1);
2895 rt2800_register_write(rt2x00dev, PLL_CTRL, reg);
2896 }
2897
2781 if (rt2x00_rt(rt2x00dev, RT3071) || 2898 if (rt2x00_rt(rt2x00dev, RT3071) ||
2782 rt2x00_rt(rt2x00dev, RT3090) || 2899 rt2x00_rt(rt2x00dev, RT3090) ||
2900 rt2x00_rt(rt2x00dev, RT3290) ||
2783 rt2x00_rt(rt2x00dev, RT3390)) { 2901 rt2x00_rt(rt2x00dev, RT3390)) {
2784 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); 2902
2903 if (rt2x00_rt(rt2x00dev, RT3290))
2904 rt2800_register_write(rt2x00dev, TX_SW_CFG0,
2905 0x00000404);
2906 else
2907 rt2800_register_write(rt2x00dev, TX_SW_CFG0,
2908 0x00000400);
2909
2785 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); 2910 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
2786 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || 2911 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
2787 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) || 2912 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
@@ -3190,14 +3315,16 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3190 rt2800_wait_bbp_ready(rt2x00dev))) 3315 rt2800_wait_bbp_ready(rt2x00dev)))
3191 return -EACCES; 3316 return -EACCES;
3192 3317
3193 if (rt2x00_rt(rt2x00dev, RT5390) || 3318 if (rt2x00_rt(rt2x00dev, RT3290) ||
3194 rt2x00_rt(rt2x00dev, RT5392)) { 3319 rt2x00_rt(rt2x00dev, RT5390) ||
3320 rt2x00_rt(rt2x00dev, RT5392)) {
3195 rt2800_bbp_read(rt2x00dev, 4, &value); 3321 rt2800_bbp_read(rt2x00dev, 4, &value);
3196 rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1); 3322 rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
3197 rt2800_bbp_write(rt2x00dev, 4, value); 3323 rt2800_bbp_write(rt2x00dev, 4, value);
3198 } 3324 }
3199 3325
3200 if (rt2800_is_305x_soc(rt2x00dev) || 3326 if (rt2800_is_305x_soc(rt2x00dev) ||
3327 rt2x00_rt(rt2x00dev, RT3290) ||
3201 rt2x00_rt(rt2x00dev, RT3572) || 3328 rt2x00_rt(rt2x00dev, RT3572) ||
3202 rt2x00_rt(rt2x00dev, RT5390) || 3329 rt2x00_rt(rt2x00dev, RT5390) ||
3203 rt2x00_rt(rt2x00dev, RT5392)) 3330 rt2x00_rt(rt2x00dev, RT5392))
@@ -3206,20 +3333,26 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3206 rt2800_bbp_write(rt2x00dev, 65, 0x2c); 3333 rt2800_bbp_write(rt2x00dev, 65, 0x2c);
3207 rt2800_bbp_write(rt2x00dev, 66, 0x38); 3334 rt2800_bbp_write(rt2x00dev, 66, 0x38);
3208 3335
3209 if (rt2x00_rt(rt2x00dev, RT5390) || 3336 if (rt2x00_rt(rt2x00dev, RT3290) ||
3210 rt2x00_rt(rt2x00dev, RT5392)) 3337 rt2x00_rt(rt2x00dev, RT5390) ||
3338 rt2x00_rt(rt2x00dev, RT5392))
3211 rt2800_bbp_write(rt2x00dev, 68, 0x0b); 3339 rt2800_bbp_write(rt2x00dev, 68, 0x0b);
3212 3340
3213 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) { 3341 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
3214 rt2800_bbp_write(rt2x00dev, 69, 0x16); 3342 rt2800_bbp_write(rt2x00dev, 69, 0x16);
3215 rt2800_bbp_write(rt2x00dev, 73, 0x12); 3343 rt2800_bbp_write(rt2x00dev, 73, 0x12);
3216 } else if (rt2x00_rt(rt2x00dev, RT5390) || 3344 } else if (rt2x00_rt(rt2x00dev, RT3290) ||
3217 rt2x00_rt(rt2x00dev, RT5392)) { 3345 rt2x00_rt(rt2x00dev, RT5390) ||
3346 rt2x00_rt(rt2x00dev, RT5392)) {
3218 rt2800_bbp_write(rt2x00dev, 69, 0x12); 3347 rt2800_bbp_write(rt2x00dev, 69, 0x12);
3219 rt2800_bbp_write(rt2x00dev, 73, 0x13); 3348 rt2800_bbp_write(rt2x00dev, 73, 0x13);
3220 rt2800_bbp_write(rt2x00dev, 75, 0x46); 3349 rt2800_bbp_write(rt2x00dev, 75, 0x46);
3221 rt2800_bbp_write(rt2x00dev, 76, 0x28); 3350 rt2800_bbp_write(rt2x00dev, 76, 0x28);
3222 rt2800_bbp_write(rt2x00dev, 77, 0x59); 3351
3352 if (rt2x00_rt(rt2x00dev, RT3290))
3353 rt2800_bbp_write(rt2x00dev, 77, 0x58);
3354 else
3355 rt2800_bbp_write(rt2x00dev, 77, 0x59);
3223 } else { 3356 } else {
3224 rt2800_bbp_write(rt2x00dev, 69, 0x12); 3357 rt2800_bbp_write(rt2x00dev, 69, 0x12);
3225 rt2800_bbp_write(rt2x00dev, 73, 0x10); 3358 rt2800_bbp_write(rt2x00dev, 73, 0x10);
@@ -3244,23 +3377,33 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3244 rt2800_bbp_write(rt2x00dev, 81, 0x37); 3377 rt2800_bbp_write(rt2x00dev, 81, 0x37);
3245 } 3378 }
3246 3379
3380 if (rt2x00_rt(rt2x00dev, RT3290)) {
3381 rt2800_bbp_write(rt2x00dev, 74, 0x0b);
3382 rt2800_bbp_write(rt2x00dev, 79, 0x18);
3383 rt2800_bbp_write(rt2x00dev, 80, 0x09);
3384 rt2800_bbp_write(rt2x00dev, 81, 0x33);
3385 }
3386
3247 rt2800_bbp_write(rt2x00dev, 82, 0x62); 3387 rt2800_bbp_write(rt2x00dev, 82, 0x62);
3248 if (rt2x00_rt(rt2x00dev, RT5390) || 3388 if (rt2x00_rt(rt2x00dev, RT3290) ||
3249 rt2x00_rt(rt2x00dev, RT5392)) 3389 rt2x00_rt(rt2x00dev, RT5390) ||
3390 rt2x00_rt(rt2x00dev, RT5392))
3250 rt2800_bbp_write(rt2x00dev, 83, 0x7a); 3391 rt2800_bbp_write(rt2x00dev, 83, 0x7a);
3251 else 3392 else
3252 rt2800_bbp_write(rt2x00dev, 83, 0x6a); 3393 rt2800_bbp_write(rt2x00dev, 83, 0x6a);
3253 3394
3254 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D)) 3395 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D))
3255 rt2800_bbp_write(rt2x00dev, 84, 0x19); 3396 rt2800_bbp_write(rt2x00dev, 84, 0x19);
3256 else if (rt2x00_rt(rt2x00dev, RT5390) || 3397 else if (rt2x00_rt(rt2x00dev, RT3290) ||
3257 rt2x00_rt(rt2x00dev, RT5392)) 3398 rt2x00_rt(rt2x00dev, RT5390) ||
3399 rt2x00_rt(rt2x00dev, RT5392))
3258 rt2800_bbp_write(rt2x00dev, 84, 0x9a); 3400 rt2800_bbp_write(rt2x00dev, 84, 0x9a);
3259 else 3401 else
3260 rt2800_bbp_write(rt2x00dev, 84, 0x99); 3402 rt2800_bbp_write(rt2x00dev, 84, 0x99);
3261 3403
3262 if (rt2x00_rt(rt2x00dev, RT5390) || 3404 if (rt2x00_rt(rt2x00dev, RT3290) ||
3263 rt2x00_rt(rt2x00dev, RT5392)) 3405 rt2x00_rt(rt2x00dev, RT5390) ||
3406 rt2x00_rt(rt2x00dev, RT5392))
3264 rt2800_bbp_write(rt2x00dev, 86, 0x38); 3407 rt2800_bbp_write(rt2x00dev, 86, 0x38);
3265 else 3408 else
3266 rt2800_bbp_write(rt2x00dev, 86, 0x00); 3409 rt2800_bbp_write(rt2x00dev, 86, 0x00);
@@ -3270,8 +3413,9 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3270 3413
3271 rt2800_bbp_write(rt2x00dev, 91, 0x04); 3414 rt2800_bbp_write(rt2x00dev, 91, 0x04);
3272 3415
3273 if (rt2x00_rt(rt2x00dev, RT5390) || 3416 if (rt2x00_rt(rt2x00dev, RT3290) ||
3274 rt2x00_rt(rt2x00dev, RT5392)) 3417 rt2x00_rt(rt2x00dev, RT5390) ||
3418 rt2x00_rt(rt2x00dev, RT5392))
3275 rt2800_bbp_write(rt2x00dev, 92, 0x02); 3419 rt2800_bbp_write(rt2x00dev, 92, 0x02);
3276 else 3420 else
3277 rt2800_bbp_write(rt2x00dev, 92, 0x00); 3421 rt2800_bbp_write(rt2x00dev, 92, 0x00);
@@ -3285,6 +3429,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3285 rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) || 3429 rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
3286 rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) || 3430 rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
3287 rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) || 3431 rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
3432 rt2x00_rt(rt2x00dev, RT3290) ||
3288 rt2x00_rt(rt2x00dev, RT3572) || 3433 rt2x00_rt(rt2x00dev, RT3572) ||
3289 rt2x00_rt(rt2x00dev, RT5390) || 3434 rt2x00_rt(rt2x00dev, RT5390) ||
3290 rt2x00_rt(rt2x00dev, RT5392) || 3435 rt2x00_rt(rt2x00dev, RT5392) ||
@@ -3293,27 +3438,32 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3293 else 3438 else
3294 rt2800_bbp_write(rt2x00dev, 103, 0x00); 3439 rt2800_bbp_write(rt2x00dev, 103, 0x00);
3295 3440
3296 if (rt2x00_rt(rt2x00dev, RT5390) || 3441 if (rt2x00_rt(rt2x00dev, RT3290) ||
3297 rt2x00_rt(rt2x00dev, RT5392)) 3442 rt2x00_rt(rt2x00dev, RT5390) ||
3443 rt2x00_rt(rt2x00dev, RT5392))
3298 rt2800_bbp_write(rt2x00dev, 104, 0x92); 3444 rt2800_bbp_write(rt2x00dev, 104, 0x92);
3299 3445
3300 if (rt2800_is_305x_soc(rt2x00dev)) 3446 if (rt2800_is_305x_soc(rt2x00dev))
3301 rt2800_bbp_write(rt2x00dev, 105, 0x01); 3447 rt2800_bbp_write(rt2x00dev, 105, 0x01);
3448 else if (rt2x00_rt(rt2x00dev, RT3290))
3449 rt2800_bbp_write(rt2x00dev, 105, 0x1c);
3302 else if (rt2x00_rt(rt2x00dev, RT5390) || 3450 else if (rt2x00_rt(rt2x00dev, RT5390) ||
3303 rt2x00_rt(rt2x00dev, RT5392)) 3451 rt2x00_rt(rt2x00dev, RT5392))
3304 rt2800_bbp_write(rt2x00dev, 105, 0x3c); 3452 rt2800_bbp_write(rt2x00dev, 105, 0x3c);
3305 else 3453 else
3306 rt2800_bbp_write(rt2x00dev, 105, 0x05); 3454 rt2800_bbp_write(rt2x00dev, 105, 0x05);
3307 3455
3308 if (rt2x00_rt(rt2x00dev, RT5390)) 3456 if (rt2x00_rt(rt2x00dev, RT3290) ||
3457 rt2x00_rt(rt2x00dev, RT5390))
3309 rt2800_bbp_write(rt2x00dev, 106, 0x03); 3458 rt2800_bbp_write(rt2x00dev, 106, 0x03);
3310 else if (rt2x00_rt(rt2x00dev, RT5392)) 3459 else if (rt2x00_rt(rt2x00dev, RT5392))
3311 rt2800_bbp_write(rt2x00dev, 106, 0x12); 3460 rt2800_bbp_write(rt2x00dev, 106, 0x12);
3312 else 3461 else
3313 rt2800_bbp_write(rt2x00dev, 106, 0x35); 3462 rt2800_bbp_write(rt2x00dev, 106, 0x35);
3314 3463
3315 if (rt2x00_rt(rt2x00dev, RT5390) || 3464 if (rt2x00_rt(rt2x00dev, RT3290) ||
3316 rt2x00_rt(rt2x00dev, RT5392)) 3465 rt2x00_rt(rt2x00dev, RT5390) ||
3466 rt2x00_rt(rt2x00dev, RT5392))
3317 rt2800_bbp_write(rt2x00dev, 128, 0x12); 3467 rt2800_bbp_write(rt2x00dev, 128, 0x12);
3318 3468
3319 if (rt2x00_rt(rt2x00dev, RT5392)) { 3469 if (rt2x00_rt(rt2x00dev, RT5392)) {
@@ -3338,6 +3488,29 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3338 rt2800_bbp_write(rt2x00dev, 138, value); 3488 rt2800_bbp_write(rt2x00dev, 138, value);
3339 } 3489 }
3340 3490
3491 if (rt2x00_rt(rt2x00dev, RT3290)) {
3492 rt2800_bbp_write(rt2x00dev, 67, 0x24);
3493 rt2800_bbp_write(rt2x00dev, 143, 0x04);
3494 rt2800_bbp_write(rt2x00dev, 142, 0x99);
3495 rt2800_bbp_write(rt2x00dev, 150, 0x30);
3496 rt2800_bbp_write(rt2x00dev, 151, 0x2e);
3497 rt2800_bbp_write(rt2x00dev, 152, 0x20);
3498 rt2800_bbp_write(rt2x00dev, 153, 0x34);
3499 rt2800_bbp_write(rt2x00dev, 154, 0x40);
3500 rt2800_bbp_write(rt2x00dev, 155, 0x3b);
3501 rt2800_bbp_write(rt2x00dev, 253, 0x04);
3502
3503 rt2800_bbp_read(rt2x00dev, 47, &value);
3504 rt2x00_set_field8(&value, BBP47_TSSI_ADC6, 1);
3505 rt2800_bbp_write(rt2x00dev, 47, value);
3506
3507 /* Use 5-bit ADC for Acquisition and 8-bit ADC for data */
3508 rt2800_bbp_read(rt2x00dev, 3, &value);
3509 rt2x00_set_field8(&value, BBP3_ADC_MODE_SWITCH, 1);
3510 rt2x00_set_field8(&value, BBP3_ADC_INIT_MODE, 1);
3511 rt2800_bbp_write(rt2x00dev, 3, value);
3512 }
3513
3341 if (rt2x00_rt(rt2x00dev, RT5390) || 3514 if (rt2x00_rt(rt2x00dev, RT5390) ||
3342 rt2x00_rt(rt2x00dev, RT5392)) { 3515 rt2x00_rt(rt2x00dev, RT5392)) {
3343 int ant, div_mode; 3516 int ant, div_mode;
@@ -3470,6 +3643,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
3470 if (!rt2x00_rt(rt2x00dev, RT3070) && 3643 if (!rt2x00_rt(rt2x00dev, RT3070) &&
3471 !rt2x00_rt(rt2x00dev, RT3071) && 3644 !rt2x00_rt(rt2x00dev, RT3071) &&
3472 !rt2x00_rt(rt2x00dev, RT3090) && 3645 !rt2x00_rt(rt2x00dev, RT3090) &&
3646 !rt2x00_rt(rt2x00dev, RT3290) &&
3473 !rt2x00_rt(rt2x00dev, RT3390) && 3647 !rt2x00_rt(rt2x00dev, RT3390) &&
3474 !rt2x00_rt(rt2x00dev, RT3572) && 3648 !rt2x00_rt(rt2x00dev, RT3572) &&
3475 !rt2x00_rt(rt2x00dev, RT5390) && 3649 !rt2x00_rt(rt2x00dev, RT5390) &&
@@ -3480,8 +3654,9 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
3480 /* 3654 /*
3481 * Init RF calibration. 3655 * Init RF calibration.
3482 */ 3656 */
3483 if (rt2x00_rt(rt2x00dev, RT5390) || 3657 if (rt2x00_rt(rt2x00dev, RT3290) ||
3484 rt2x00_rt(rt2x00dev, RT5392)) { 3658 rt2x00_rt(rt2x00dev, RT5390) ||
3659 rt2x00_rt(rt2x00dev, RT5392)) {
3485 rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr); 3660 rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
3486 rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1); 3661 rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
3487 rt2800_rfcsr_write(rt2x00dev, 2, rfcsr); 3662 rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
@@ -3519,6 +3694,53 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
3519 rt2800_rfcsr_write(rt2x00dev, 24, 0x16); 3694 rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
3520 rt2800_rfcsr_write(rt2x00dev, 25, 0x01); 3695 rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
3521 rt2800_rfcsr_write(rt2x00dev, 29, 0x1f); 3696 rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
3697 } else if (rt2x00_rt(rt2x00dev, RT3290)) {
3698 rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
3699 rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
3700 rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
3701 rt2800_rfcsr_write(rt2x00dev, 4, 0x00);
3702 rt2800_rfcsr_write(rt2x00dev, 6, 0xa0);
3703 rt2800_rfcsr_write(rt2x00dev, 8, 0xf3);
3704 rt2800_rfcsr_write(rt2x00dev, 9, 0x02);
3705 rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
3706 rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
3707 rt2800_rfcsr_write(rt2x00dev, 12, 0x46);
3708 rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
3709 rt2800_rfcsr_write(rt2x00dev, 18, 0x02);
3710 rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
3711 rt2800_rfcsr_write(rt2x00dev, 25, 0x83);
3712 rt2800_rfcsr_write(rt2x00dev, 26, 0x82);
3713 rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
3714 rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
3715 rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
3716 rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
3717 rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
3718 rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
3719 rt2800_rfcsr_write(rt2x00dev, 34, 0x05);
3720 rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
3721 rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
3722 rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
3723 rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
3724 rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
3725 rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
3726 rt2800_rfcsr_write(rt2x00dev, 42, 0xd5);
3727 rt2800_rfcsr_write(rt2x00dev, 43, 0x7b);
3728 rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
3729 rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
3730 rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
3731 rt2800_rfcsr_write(rt2x00dev, 47, 0x00);
3732 rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
3733 rt2800_rfcsr_write(rt2x00dev, 49, 0x98);
3734 rt2800_rfcsr_write(rt2x00dev, 52, 0x38);
3735 rt2800_rfcsr_write(rt2x00dev, 53, 0x00);
3736 rt2800_rfcsr_write(rt2x00dev, 54, 0x78);
3737 rt2800_rfcsr_write(rt2x00dev, 55, 0x43);
3738 rt2800_rfcsr_write(rt2x00dev, 56, 0x02);
3739 rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
3740 rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
3741 rt2800_rfcsr_write(rt2x00dev, 59, 0x09);
3742 rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
3743 rt2800_rfcsr_write(rt2x00dev, 61, 0xc1);
3522 } else if (rt2x00_rt(rt2x00dev, RT3390)) { 3744 } else if (rt2x00_rt(rt2x00dev, RT3390)) {
3523 rt2800_rfcsr_write(rt2x00dev, 0, 0xa0); 3745 rt2800_rfcsr_write(rt2x00dev, 0, 0xa0);
3524 rt2800_rfcsr_write(rt2x00dev, 1, 0xe1); 3746 rt2800_rfcsr_write(rt2x00dev, 1, 0xe1);
@@ -3927,6 +4149,12 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
3927 rt2800_rfcsr_write(rt2x00dev, 27, rfcsr); 4149 rt2800_rfcsr_write(rt2x00dev, 27, rfcsr);
3928 } 4150 }
3929 4151
4152 if (rt2x00_rt(rt2x00dev, RT3290)) {
4153 rt2800_rfcsr_read(rt2x00dev, 29, &rfcsr);
4154 rt2x00_set_field8(&rfcsr, RFCSR29_RSSI_GAIN, 3);
4155 rt2800_rfcsr_write(rt2x00dev, 29, rfcsr);
4156 }
4157
3930 if (rt2x00_rt(rt2x00dev, RT5390) || 4158 if (rt2x00_rt(rt2x00dev, RT5390) ||
3931 rt2x00_rt(rt2x00dev, RT5392)) { 4159 rt2x00_rt(rt2x00dev, RT5392)) {
3932 rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr); 4160 rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
@@ -4033,9 +4261,14 @@ EXPORT_SYMBOL_GPL(rt2800_disable_radio);
4033int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev) 4261int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev)
4034{ 4262{
4035 u32 reg; 4263 u32 reg;
4264 u16 efuse_ctrl_reg;
4036 4265
4037 rt2800_register_read(rt2x00dev, EFUSE_CTRL, &reg); 4266 if (rt2x00_rt(rt2x00dev, RT3290))
4267 efuse_ctrl_reg = EFUSE_CTRL_3290;
4268 else
4269 efuse_ctrl_reg = EFUSE_CTRL;
4038 4270
4271 rt2800_register_read(rt2x00dev, efuse_ctrl_reg, &reg);
4039 return rt2x00_get_field32(reg, EFUSE_CTRL_PRESENT); 4272 return rt2x00_get_field32(reg, EFUSE_CTRL_PRESENT);
4040} 4273}
4041EXPORT_SYMBOL_GPL(rt2800_efuse_detect); 4274EXPORT_SYMBOL_GPL(rt2800_efuse_detect);
@@ -4043,27 +4276,44 @@ EXPORT_SYMBOL_GPL(rt2800_efuse_detect);
4043static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i) 4276static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
4044{ 4277{
4045 u32 reg; 4278 u32 reg;
4046 4279 u16 efuse_ctrl_reg;
4280 u16 efuse_data0_reg;
4281 u16 efuse_data1_reg;
4282 u16 efuse_data2_reg;
4283 u16 efuse_data3_reg;
4284
4285 if (rt2x00_rt(rt2x00dev, RT3290)) {
4286 efuse_ctrl_reg = EFUSE_CTRL_3290;
4287 efuse_data0_reg = EFUSE_DATA0_3290;
4288 efuse_data1_reg = EFUSE_DATA1_3290;
4289 efuse_data2_reg = EFUSE_DATA2_3290;
4290 efuse_data3_reg = EFUSE_DATA3_3290;
4291 } else {
4292 efuse_ctrl_reg = EFUSE_CTRL;
4293 efuse_data0_reg = EFUSE_DATA0;
4294 efuse_data1_reg = EFUSE_DATA1;
4295 efuse_data2_reg = EFUSE_DATA2;
4296 efuse_data3_reg = EFUSE_DATA3;
4297 }
4047 mutex_lock(&rt2x00dev->csr_mutex); 4298 mutex_lock(&rt2x00dev->csr_mutex);
4048 4299
4049 rt2800_register_read_lock(rt2x00dev, EFUSE_CTRL, &reg); 4300 rt2800_register_read_lock(rt2x00dev, efuse_ctrl_reg, &reg);
4050 rt2x00_set_field32(&reg, EFUSE_CTRL_ADDRESS_IN, i); 4301 rt2x00_set_field32(&reg, EFUSE_CTRL_ADDRESS_IN, i);
4051 rt2x00_set_field32(&reg, EFUSE_CTRL_MODE, 0); 4302 rt2x00_set_field32(&reg, EFUSE_CTRL_MODE, 0);
4052 rt2x00_set_field32(&reg, EFUSE_CTRL_KICK, 1); 4303 rt2x00_set_field32(&reg, EFUSE_CTRL_KICK, 1);
4053 rt2800_register_write_lock(rt2x00dev, EFUSE_CTRL, reg); 4304 rt2800_register_write_lock(rt2x00dev, efuse_ctrl_reg, reg);
4054 4305
4055 /* Wait until the EEPROM has been loaded */ 4306 /* Wait until the EEPROM has been loaded */
4056 rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, &reg); 4307 rt2800_regbusy_read(rt2x00dev, efuse_ctrl_reg, EFUSE_CTRL_KICK, &reg);
4057
4058 /* Apparently the data is read from end to start */ 4308 /* Apparently the data is read from end to start */
4059 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg); 4309 rt2800_register_read_lock(rt2x00dev, efuse_data3_reg, &reg);
4060 /* The returned value is in CPU order, but eeprom is le */ 4310 /* The returned value is in CPU order, but eeprom is le */
4061 *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg); 4311 *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
4062 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg); 4312 rt2800_register_read_lock(rt2x00dev, efuse_data2_reg, &reg);
4063 *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg); 4313 *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
4064 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg); 4314 rt2800_register_read_lock(rt2x00dev, efuse_data1_reg, &reg);
4065 *(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg); 4315 *(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg);
4066 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, &reg); 4316 rt2800_register_read_lock(rt2x00dev, efuse_data0_reg, &reg);
4067 *(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg); 4317 *(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);
4068 4318
4069 mutex_unlock(&rt2x00dev->csr_mutex); 4319 mutex_unlock(&rt2x00dev->csr_mutex);
@@ -4225,9 +4475,14 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4225 * RT28xx/RT30xx: defined in "EEPROM_NIC_CONF0_RF_TYPE" field 4475 * RT28xx/RT30xx: defined in "EEPROM_NIC_CONF0_RF_TYPE" field
4226 * RT53xx: defined in "EEPROM_CHIP_ID" field 4476 * RT53xx: defined in "EEPROM_CHIP_ID" field
4227 */ 4477 */
4228 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg); 4478 if (rt2x00_rt(rt2x00dev, RT3290))
4229 if (rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5390 || 4479 rt2800_register_read(rt2x00dev, MAC_CSR0_3290, &reg);
4230 rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5392) 4480 else
4481 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
4482
4483 if (rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT3290 ||
4484 rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5390 ||
4485 rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5392)
4231 rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &value); 4486 rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &value);
4232 else 4487 else
4233 value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE); 4488 value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
@@ -4242,6 +4497,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4242 case RT3070: 4497 case RT3070:
4243 case RT3071: 4498 case RT3071:
4244 case RT3090: 4499 case RT3090:
4500 case RT3290:
4245 case RT3390: 4501 case RT3390:
4246 case RT3572: 4502 case RT3572:
4247 case RT5390: 4503 case RT5390:
@@ -4262,10 +4518,13 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4262 case RF3021: 4518 case RF3021:
4263 case RF3022: 4519 case RF3022:
4264 case RF3052: 4520 case RF3052:
4521 case RF3290:
4265 case RF3320: 4522 case RF3320:
4523 case RF5360:
4266 case RF5370: 4524 case RF5370:
4267 case RF5372: 4525 case RF5372:
4268 case RF5390: 4526 case RF5390:
4527 case RF5392:
4269 break; 4528 break;
4270 default: 4529 default:
4271 ERROR(rt2x00dev, "Invalid RF chipset 0x%04x detected.\n", 4530 ERROR(rt2x00dev, "Invalid RF chipset 0x%04x detected.\n",
@@ -4576,10 +4835,13 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
4576 rt2x00_rf(rt2x00dev, RF2020) || 4835 rt2x00_rf(rt2x00dev, RF2020) ||
4577 rt2x00_rf(rt2x00dev, RF3021) || 4836 rt2x00_rf(rt2x00dev, RF3021) ||
4578 rt2x00_rf(rt2x00dev, RF3022) || 4837 rt2x00_rf(rt2x00dev, RF3022) ||
4838 rt2x00_rf(rt2x00dev, RF3290) ||
4579 rt2x00_rf(rt2x00dev, RF3320) || 4839 rt2x00_rf(rt2x00dev, RF3320) ||
4840 rt2x00_rf(rt2x00dev, RF5360) ||
4580 rt2x00_rf(rt2x00dev, RF5370) || 4841 rt2x00_rf(rt2x00dev, RF5370) ||
4581 rt2x00_rf(rt2x00dev, RF5372) || 4842 rt2x00_rf(rt2x00dev, RF5372) ||
4582 rt2x00_rf(rt2x00dev, RF5390)) { 4843 rt2x00_rf(rt2x00dev, RF5390) ||
4844 rt2x00_rf(rt2x00dev, RF5392)) {
4583 spec->num_channels = 14; 4845 spec->num_channels = 14;
4584 spec->channels = rf_vals_3x; 4846 spec->channels = rf_vals_3x;
4585 } else if (rt2x00_rf(rt2x00dev, RF3052)) { 4847 } else if (rt2x00_rf(rt2x00dev, RF3052)) {
@@ -4662,9 +4924,12 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
4662 case RF3022: 4924 case RF3022:
4663 case RF3320: 4925 case RF3320:
4664 case RF3052: 4926 case RF3052:
4927 case RF3290:
4928 case RF5360:
4665 case RF5370: 4929 case RF5370:
4666 case RF5372: 4930 case RF5372:
4667 case RF5390: 4931 case RF5390:
4932 case RF5392:
4668 __set_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags); 4933 __set_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags);
4669 break; 4934 break;
4670 } 4935 }
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index cad25bfebd7..dd436125fe3 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -280,7 +280,13 @@ static void rt2800pci_stop_queue(struct data_queue *queue)
280 */ 280 */
281static char *rt2800pci_get_firmware_name(struct rt2x00_dev *rt2x00dev) 281static char *rt2800pci_get_firmware_name(struct rt2x00_dev *rt2x00dev)
282{ 282{
283 return FIRMWARE_RT2860; 283 /*
284 * Chip rt3290 use specific 4KB firmware named rt3290.bin.
285 */
286 if (rt2x00_rt(rt2x00dev, RT3290))
287 return FIRMWARE_RT3290;
288 else
289 return FIRMWARE_RT2860;
284} 290}
285 291
286static int rt2800pci_write_firmware(struct rt2x00_dev *rt2x00dev, 292static int rt2800pci_write_firmware(struct rt2x00_dev *rt2x00dev,
@@ -974,6 +980,66 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
974 return rt2800_validate_eeprom(rt2x00dev); 980 return rt2800_validate_eeprom(rt2x00dev);
975} 981}
976 982
983static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
984{
985 u32 reg;
986 int i, count;
987
988 rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
989 if ((rt2x00_get_field32(reg, WLAN_EN) == 1))
990 return 0;
991
992 rt2x00_set_field32(&reg, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff);
993 rt2x00_set_field32(&reg, FRC_WL_ANT_SET, 1);
994 rt2x00_set_field32(&reg, WLAN_CLK_EN, 0);
995 rt2x00_set_field32(&reg, WLAN_EN, 1);
996 rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
997
998 udelay(REGISTER_BUSY_DELAY);
999
1000 count = 0;
1001 do {
1002 /*
1003 * Check PLL_LD & XTAL_RDY.
1004 */
1005 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1006 rt2800_register_read(rt2x00dev, CMB_CTRL, &reg);
1007 if ((rt2x00_get_field32(reg, PLL_LD) == 1) &&
1008 (rt2x00_get_field32(reg, XTAL_RDY) == 1))
1009 break;
1010 udelay(REGISTER_BUSY_DELAY);
1011 }
1012
1013 if (i >= REGISTER_BUSY_COUNT) {
1014
1015 if (count >= 10)
1016 return -EIO;
1017
1018 rt2800_register_write(rt2x00dev, 0x58, 0x018);
1019 udelay(REGISTER_BUSY_DELAY);
1020 rt2800_register_write(rt2x00dev, 0x58, 0x418);
1021 udelay(REGISTER_BUSY_DELAY);
1022 rt2800_register_write(rt2x00dev, 0x58, 0x618);
1023 udelay(REGISTER_BUSY_DELAY);
1024 count++;
1025 } else {
1026 count = 0;
1027 }
1028
1029 rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
1030 rt2x00_set_field32(&reg, PCIE_APP0_CLK_REQ, 0);
1031 rt2x00_set_field32(&reg, WLAN_CLK_EN, 1);
1032 rt2x00_set_field32(&reg, WLAN_RESET, 1);
1033 rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
1034 udelay(10);
1035 rt2x00_set_field32(&reg, WLAN_RESET, 0);
1036 rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
1037 udelay(10);
1038 rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff);
1039 } while (count != 0);
1040
1041 return 0;
1042}
977static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) 1043static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
978{ 1044{
979 int retval; 1045 int retval;
@@ -997,6 +1063,17 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
997 return retval; 1063 return retval;
998 1064
999 /* 1065 /*
1066 * In probe phase call rt2800_enable_wlan_rt3290 to enable wlan
1067 * clk for rt3290. That avoid the MCU fail in start phase.
1068 */
1069 if (rt2x00_rt(rt2x00dev, RT3290)) {
1070 retval = rt2800_enable_wlan_rt3290(rt2x00dev);
1071
1072 if (retval)
1073 return retval;
1074 }
1075
1076 /*
1000 * This device has multiple filters for control frames 1077 * This device has multiple filters for control frames
1001 * and has a separate filter for PS Poll frames. 1078 * and has a separate filter for PS Poll frames.
1002 */ 1079 */
@@ -1175,6 +1252,9 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1175 { PCI_DEVICE(0x1432, 0x7768) }, 1252 { PCI_DEVICE(0x1432, 0x7768) },
1176 { PCI_DEVICE(0x1462, 0x891a) }, 1253 { PCI_DEVICE(0x1462, 0x891a) },
1177 { PCI_DEVICE(0x1a3b, 0x1059) }, 1254 { PCI_DEVICE(0x1a3b, 0x1059) },
1255#ifdef CONFIG_RT2800PCI_RT3290
1256 { PCI_DEVICE(0x1814, 0x3290) },
1257#endif
1178#ifdef CONFIG_RT2800PCI_RT33XX 1258#ifdef CONFIG_RT2800PCI_RT33XX
1179 { PCI_DEVICE(0x1814, 0x3390) }, 1259 { PCI_DEVICE(0x1814, 0x3390) },
1180#endif 1260#endif
@@ -1188,6 +1268,7 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1188 { PCI_DEVICE(0x1814, 0x3593) }, 1268 { PCI_DEVICE(0x1814, 0x3593) },
1189#endif 1269#endif
1190#ifdef CONFIG_RT2800PCI_RT53XX 1270#ifdef CONFIG_RT2800PCI_RT53XX
1271 { PCI_DEVICE(0x1814, 0x5360) },
1191 { PCI_DEVICE(0x1814, 0x5362) }, 1272 { PCI_DEVICE(0x1814, 0x5362) },
1192 { PCI_DEVICE(0x1814, 0x5390) }, 1273 { PCI_DEVICE(0x1814, 0x5390) },
1193 { PCI_DEVICE(0x1814, 0x5392) }, 1274 { PCI_DEVICE(0x1814, 0x5392) },
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
index 70e050d904c..ab22a087c50 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.h
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -47,6 +47,7 @@
47 * 8051 firmware image. 47 * 8051 firmware image.
48 */ 48 */
49#define FIRMWARE_RT2860 "rt2860.bin" 49#define FIRMWARE_RT2860 "rt2860.bin"
50#define FIRMWARE_RT3290 "rt3290.bin"
50#define FIRMWARE_IMAGE_BASE 0x2000 51#define FIRMWARE_IMAGE_BASE 0x2000
51 52
52/* 53/*
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index bf78317a6ad..6cf336595e2 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -971,6 +971,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
971 { USB_DEVICE(0x0411, 0x015d) }, 971 { USB_DEVICE(0x0411, 0x015d) },
972 { USB_DEVICE(0x0411, 0x016f) }, 972 { USB_DEVICE(0x0411, 0x016f) },
973 { USB_DEVICE(0x0411, 0x01a2) }, 973 { USB_DEVICE(0x0411, 0x01a2) },
974 { USB_DEVICE(0x0411, 0x01ee) },
974 /* Corega */ 975 /* Corega */
975 { USB_DEVICE(0x07aa, 0x002f) }, 976 { USB_DEVICE(0x07aa, 0x002f) },
976 { USB_DEVICE(0x07aa, 0x003c) }, 977 { USB_DEVICE(0x07aa, 0x003c) },
@@ -1137,6 +1138,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
1137#ifdef CONFIG_RT2800USB_RT33XX 1138#ifdef CONFIG_RT2800USB_RT33XX
1138 /* Belkin */ 1139 /* Belkin */
1139 { USB_DEVICE(0x050d, 0x945b) }, 1140 { USB_DEVICE(0x050d, 0x945b) },
1141 /* D-Link */
1142 { USB_DEVICE(0x2001, 0x3c17) },
1140 /* Panasonic */ 1143 /* Panasonic */
1141 { USB_DEVICE(0x083a, 0xb511) }, 1144 { USB_DEVICE(0x083a, 0xb511) },
1142 /* Philips */ 1145 /* Philips */
@@ -1237,7 +1240,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
1237 /* D-Link */ 1240 /* D-Link */
1238 { USB_DEVICE(0x07d1, 0x3c0b) }, 1241 { USB_DEVICE(0x07d1, 0x3c0b) },
1239 { USB_DEVICE(0x07d1, 0x3c17) }, 1242 { USB_DEVICE(0x07d1, 0x3c17) },
1240 { USB_DEVICE(0x2001, 0x3c17) },
1241 /* Encore */ 1243 /* Encore */
1242 { USB_DEVICE(0x203d, 0x14a1) }, 1244 { USB_DEVICE(0x203d, 0x14a1) },
1243 /* Gemtek */ 1245 /* Gemtek */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 8f754025b06..8afb546c2b2 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -187,6 +187,7 @@ struct rt2x00_chip {
187#define RT3070 0x3070 187#define RT3070 0x3070
188#define RT3071 0x3071 188#define RT3071 0x3071
189#define RT3090 0x3090 /* 2.4GHz PCIe */ 189#define RT3090 0x3090 /* 2.4GHz PCIe */
190#define RT3290 0x3290
190#define RT3390 0x3390 191#define RT3390 0x3390
191#define RT3572 0x3572 192#define RT3572 0x3572
192#define RT3593 0x3593 193#define RT3593 0x3593
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index e5404e57625..a6b88bd4a1a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1161,6 +1161,8 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1161 BIT(NL80211_IFTYPE_MESH_POINT) | 1161 BIT(NL80211_IFTYPE_MESH_POINT) |
1162 BIT(NL80211_IFTYPE_WDS); 1162 BIT(NL80211_IFTYPE_WDS);
1163 1163
1164 rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
1165
1164 /* 1166 /*
1165 * Initialize work. 1167 * Initialize work.
1166 */ 1168 */
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index dd24b2663b5..4ff26c2159b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -506,9 +506,19 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
506 506
507 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) 507 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
508 return 0; 508 return 0;
509 else if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) 509
510 if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
511 return -EOPNOTSUPP;
512
513 /*
514 * To support IBSS RSN, don't program group keys in IBSS, the
515 * hardware will then not attempt to decrypt the frames.
516 */
517 if (vif->type == NL80211_IFTYPE_ADHOC &&
518 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
510 return -EOPNOTSUPP; 519 return -EOPNOTSUPP;
511 else if (key->keylen > 32) 520
521 if (key->keylen > 32)
512 return -ENOSPC; 522 return -ENOSPC;
513 523
514 memset(&crypto, 0, sizeof(crypto)); 524 memset(&crypto, 0, sizeof(crypto));
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 0a4653a92ca..a0c8caef3b0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -256,6 +256,7 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
256 struct ieee80211_hw *hw; 256 struct ieee80211_hw *hw;
257 struct rt2x00_dev *rt2x00dev; 257 struct rt2x00_dev *rt2x00dev;
258 int retval; 258 int retval;
259 u16 chip;
259 260
260 retval = pci_enable_device(pci_dev); 261 retval = pci_enable_device(pci_dev);
261 if (retval) { 262 if (retval) {
@@ -305,6 +306,14 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
305 if (retval) 306 if (retval)
306 goto exit_free_device; 307 goto exit_free_device;
307 308
309 /*
310 * Because rt3290 chip use different efuse offset to read efuse data.
311 * So before read efuse it need to indicate it is the
312 * rt3290 or not.
313 */
314 pci_read_config_word(pci_dev, PCI_DEVICE_ID, &chip);
315 rt2x00dev->chip.rt = chip;
316
308 retval = rt2x00lib_probe_dev(rt2x00dev); 317 retval = rt2x00lib_probe_dev(rt2x00dev);
309 if (retval) 318 if (retval)
310 goto exit_free_reg; 319 goto exit_free_reg;
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 2bebcb71a1e..3b505395d86 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -47,6 +47,8 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8180_table) = {
47 { PCI_DEVICE(0x1799, 0x6001) }, 47 { PCI_DEVICE(0x1799, 0x6001) },
48 { PCI_DEVICE(0x1799, 0x6020) }, 48 { PCI_DEVICE(0x1799, 0x6020) },
49 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x3300) }, 49 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x3300) },
50 { PCI_DEVICE(0x1186, 0x3301) },
51 { PCI_DEVICE(0x1432, 0x7106) },
50 { } 52 { }
51}; 53};
52 54
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index f4c852c6749..58e1f7bb4df 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -907,7 +907,7 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
907 struct ieee80211_hdr *hdr = rtl_get_hdr(skb); 907 struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
908 struct rtl_priv *rtlpriv = rtl_priv(hw); 908 struct rtl_priv *rtlpriv = rtl_priv(hw);
909 __le16 fc = hdr->frame_control; 909 __le16 fc = hdr->frame_control;
910 u8 *act = (u8 *) (((u8 *) skb->data + MAC80211_3ADDR_LEN)); 910 u8 *act = (u8 *)skb->data + MAC80211_3ADDR_LEN;
911 u8 category; 911 u8 category;
912 912
913 if (!ieee80211_is_action(fc)) 913 if (!ieee80211_is_action(fc))
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c
index 3d8cc4a0c86..5b4b4d4eaf9 100644
--- a/drivers/net/wireless/rtlwifi/cam.c
+++ b/drivers/net/wireless/rtlwifi/cam.c
@@ -128,7 +128,7 @@ u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
128 u32 us_config; 128 u32 us_config;
129 struct rtl_priv *rtlpriv = rtl_priv(hw); 129 struct rtl_priv *rtlpriv = rtl_priv(hw);
130 130
131 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 131 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
132 "EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, ulUseDK=%x MacAddr %pM\n", 132 "EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, ulUseDK=%x MacAddr %pM\n",
133 ul_entry_idx, ul_key_id, ul_enc_alg, 133 ul_entry_idx, ul_key_id, ul_enc_alg,
134 ul_default_key, mac_addr); 134 ul_default_key, mac_addr);
@@ -146,7 +146,7 @@ u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
146 } 146 }
147 147
148 rtl_cam_program_entry(hw, ul_entry_idx, mac_addr, 148 rtl_cam_program_entry(hw, ul_entry_idx, mac_addr,
149 (u8 *) key_content, us_config); 149 key_content, us_config);
150 150
151 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "<===\n"); 151 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "<===\n");
152 152
@@ -342,7 +342,8 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
342 /* Remove from HW Security CAM */ 342 /* Remove from HW Security CAM */
343 memset(rtlpriv->sec.hwsec_cam_sta_addr[i], 0, ETH_ALEN); 343 memset(rtlpriv->sec.hwsec_cam_sta_addr[i], 0, ETH_ALEN);
344 rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i); 344 rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i);
345 pr_info("&&&&&&&&&del entry %d\n", i); 345 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
346 "del CAM entry %d\n", i);
346 } 347 }
347 } 348 }
348 return; 349 return;
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 278e9f957e0..a18ad2a9893 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -680,7 +680,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
680 680
681 mac->short_preamble = bss_conf->use_short_preamble; 681 mac->short_preamble = bss_conf->use_short_preamble;
682 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACK_PREAMBLE, 682 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACK_PREAMBLE,
683 (u8 *) (&mac->short_preamble)); 683 &mac->short_preamble);
684 } 684 }
685 685
686 if (changed & BSS_CHANGED_ERP_SLOT) { 686 if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -693,7 +693,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
693 mac->slot_time = RTL_SLOT_TIME_20; 693 mac->slot_time = RTL_SLOT_TIME_20;
694 694
695 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, 695 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
696 (u8 *) (&mac->slot_time)); 696 &mac->slot_time);
697 } 697 }
698 698
699 if (changed & BSS_CHANGED_HT) { 699 if (changed & BSS_CHANGED_HT) {
@@ -713,7 +713,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
713 rcu_read_unlock(); 713 rcu_read_unlock();
714 714
715 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY, 715 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY,
716 (u8 *) (&mac->max_mss_density)); 716 &mac->max_mss_density);
717 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_FACTOR, 717 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_FACTOR,
718 &mac->current_ampdu_factor); 718 &mac->current_ampdu_factor);
719 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_MIN_SPACE, 719 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_MIN_SPACE,
@@ -801,7 +801,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
801 u8 mstatus = RT_MEDIA_CONNECT; 801 u8 mstatus = RT_MEDIA_CONNECT;
802 rtlpriv->cfg->ops->set_hw_reg(hw, 802 rtlpriv->cfg->ops->set_hw_reg(hw,
803 HW_VAR_H2C_FW_JOINBSSRPT, 803 HW_VAR_H2C_FW_JOINBSSRPT,
804 (u8 *) (&mstatus)); 804 &mstatus);
805 ppsc->report_linked = true; 805 ppsc->report_linked = true;
806 } 806 }
807 } else { 807 } else {
@@ -809,7 +809,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
809 u8 mstatus = RT_MEDIA_DISCONNECT; 809 u8 mstatus = RT_MEDIA_DISCONNECT;
810 rtlpriv->cfg->ops->set_hw_reg(hw, 810 rtlpriv->cfg->ops->set_hw_reg(hw,
811 HW_VAR_H2C_FW_JOINBSSRPT, 811 HW_VAR_H2C_FW_JOINBSSRPT,
812 (u8 *)(&mstatus)); 812 &mstatus);
813 ppsc->report_linked = false; 813 ppsc->report_linked = false;
814 } 814 }
815 } 815 }
@@ -836,7 +836,7 @@ static void rtl_op_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
836 u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0; 836 u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;
837 837
838 mac->tsf = tsf; 838 mac->tsf = tsf;
839 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *) (&bibss)); 839 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CORRECT_TSF, &bibss);
840} 840}
841 841
842static void rtl_op_reset_tsf(struct ieee80211_hw *hw, 842static void rtl_op_reset_tsf(struct ieee80211_hw *hw,
@@ -845,7 +845,7 @@ static void rtl_op_reset_tsf(struct ieee80211_hw *hw,
845 struct rtl_priv *rtlpriv = rtl_priv(hw); 845 struct rtl_priv *rtlpriv = rtl_priv(hw);
846 u8 tmp = 0; 846 u8 tmp = 0;
847 847
848 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DUAL_TSF_RST, (u8 *) (&tmp)); 848 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DUAL_TSF_RST, &tmp);
849} 849}
850 850
851static void rtl_op_sta_notify(struct ieee80211_hw *hw, 851static void rtl_op_sta_notify(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 1f143800a8d..8e2f9afb125 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -352,7 +352,7 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
352 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_BYTES, 352 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_BYTES,
353 (u8 *)&efuse_utilized); 353 (u8 *)&efuse_utilized);
354 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_USAGE, 354 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_USAGE,
355 (u8 *)&efuse_usage); 355 &efuse_usage);
356done: 356done:
357 for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) 357 for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++)
358 kfree(efuse_word[i]); 358 kfree(efuse_word[i]);
@@ -409,7 +409,7 @@ void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
409 else if (type == 2) 409 else if (type == 2)
410 efuse_shadow_read_2byte(hw, offset, (u16 *) value); 410 efuse_shadow_read_2byte(hw, offset, (u16 *) value);
411 else if (type == 4) 411 else if (type == 4)
412 efuse_shadow_read_4byte(hw, offset, (u32 *) value); 412 efuse_shadow_read_4byte(hw, offset, value);
413 413
414} 414}
415 415
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 2062ea1d7c8..31138fdad1f 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -756,10 +756,10 @@ done:
756 if (index == rtlpci->rxringcount - 1) 756 if (index == rtlpci->rxringcount - 1)
757 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, 757 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
758 HW_DESC_RXERO, 758 HW_DESC_RXERO,
759 (u8 *)&tmp_one); 759 &tmp_one);
760 760
761 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN, 761 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN,
762 (u8 *)&tmp_one); 762 &tmp_one);
763 763
764 index = (index + 1) % rtlpci->rxringcount; 764 index = (index + 1) % rtlpci->rxringcount;
765 } 765 }
@@ -934,7 +934,7 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
934 __skb_queue_tail(&ring->queue, pskb); 934 __skb_queue_tail(&ring->queue, pskb);
935 935
936 rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true, HW_DESC_OWN, 936 rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true, HW_DESC_OWN,
937 (u8 *)&temp_one); 937 &temp_one);
938 938
939 return; 939 return;
940} 940}
@@ -1126,11 +1126,11 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
1126 rxbuffersize); 1126 rxbuffersize);
1127 rtlpriv->cfg->ops->set_desc((u8 *) entry, false, 1127 rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
1128 HW_DESC_RXOWN, 1128 HW_DESC_RXOWN,
1129 (u8 *)&tmp_one); 1129 &tmp_one);
1130 } 1130 }
1131 1131
1132 rtlpriv->cfg->ops->set_desc((u8 *) entry, false, 1132 rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
1133 HW_DESC_RXERO, (u8 *)&tmp_one); 1133 HW_DESC_RXERO, &tmp_one);
1134 } 1134 }
1135 return 0; 1135 return 0;
1136} 1136}
@@ -1263,7 +1263,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1263 rtlpriv->cfg->ops->set_desc((u8 *) entry, 1263 rtlpriv->cfg->ops->set_desc((u8 *) entry,
1264 false, 1264 false,
1265 HW_DESC_RXOWN, 1265 HW_DESC_RXOWN,
1266 (u8 *)&tmp_one); 1266 &tmp_one);
1267 } 1267 }
1268 rtlpci->rx_ring[rx_queue_idx].idx = 0; 1268 rtlpci->rx_ring[rx_queue_idx].idx = 0;
1269 } 1269 }
@@ -1273,17 +1273,18 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1273 *after reset, release previous pending packet, 1273 *after reset, release previous pending packet,
1274 *and force the tx idx to the first one 1274 *and force the tx idx to the first one
1275 */ 1275 */
1276 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
1277 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) { 1276 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
1278 if (rtlpci->tx_ring[i].desc) { 1277 if (rtlpci->tx_ring[i].desc) {
1279 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i]; 1278 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i];
1280 1279
1281 while (skb_queue_len(&ring->queue)) { 1280 while (skb_queue_len(&ring->queue)) {
1282 struct rtl_tx_desc *entry = 1281 struct rtl_tx_desc *entry;
1283 &ring->desc[ring->idx]; 1282 struct sk_buff *skb;
1284 struct sk_buff *skb =
1285 __skb_dequeue(&ring->queue);
1286 1283
1284 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock,
1285 flags);
1286 entry = &ring->desc[ring->idx];
1287 skb = __skb_dequeue(&ring->queue);
1287 pci_unmap_single(rtlpci->pdev, 1288 pci_unmap_single(rtlpci->pdev,
1288 rtlpriv->cfg->ops-> 1289 rtlpriv->cfg->ops->
1289 get_desc((u8 *) 1290 get_desc((u8 *)
@@ -1291,15 +1292,15 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1291 true, 1292 true,
1292 HW_DESC_TXBUFF_ADDR), 1293 HW_DESC_TXBUFF_ADDR),
1293 skb->len, PCI_DMA_TODEVICE); 1294 skb->len, PCI_DMA_TODEVICE);
1294 kfree_skb(skb);
1295 ring->idx = (ring->idx + 1) % ring->entries; 1295 ring->idx = (ring->idx + 1) % ring->entries;
1296 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
1297 flags);
1298 kfree_skb(skb);
1296 } 1299 }
1297 ring->idx = 0; 1300 ring->idx = 0;
1298 } 1301 }
1299 } 1302 }
1300 1303
1301 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1302
1303 return 0; 1304 return 0;
1304} 1305}
1305 1306
@@ -1422,7 +1423,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
1422 __skb_queue_tail(&ring->queue, skb); 1423 __skb_queue_tail(&ring->queue, skb);
1423 1424
1424 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, true, 1425 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, true,
1425 HW_DESC_OWN, (u8 *)&temp_one); 1426 HW_DESC_OWN, &temp_one);
1426 1427
1427 1428
1428 if ((ring->entries - skb_queue_len(&ring->queue)) < 2 && 1429 if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 5ae26647f34..13ad33e8557 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -333,10 +333,10 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
333 rpwm_val = 0x0C; /* RF on */ 333 rpwm_val = 0x0C; /* RF on */
334 fw_pwrmode = FW_PS_ACTIVE_MODE; 334 fw_pwrmode = FW_PS_ACTIVE_MODE;
335 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, 335 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
336 (u8 *) (&rpwm_val)); 336 &rpwm_val);
337 rtlpriv->cfg->ops->set_hw_reg(hw, 337 rtlpriv->cfg->ops->set_hw_reg(hw,
338 HW_VAR_H2C_FW_PWRMODE, 338 HW_VAR_H2C_FW_PWRMODE,
339 (u8 *) (&fw_pwrmode)); 339 &fw_pwrmode);
340 fw_current_inps = false; 340 fw_current_inps = false;
341 341
342 rtlpriv->cfg->ops->set_hw_reg(hw, 342 rtlpriv->cfg->ops->set_hw_reg(hw,
@@ -356,11 +356,11 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
356 (u8 *) (&fw_current_inps)); 356 (u8 *) (&fw_current_inps));
357 rtlpriv->cfg->ops->set_hw_reg(hw, 357 rtlpriv->cfg->ops->set_hw_reg(hw,
358 HW_VAR_H2C_FW_PWRMODE, 358 HW_VAR_H2C_FW_PWRMODE,
359 (u8 *) (&ppsc->fwctrl_psmode)); 359 &ppsc->fwctrl_psmode);
360 360
361 rtlpriv->cfg->ops->set_hw_reg(hw, 361 rtlpriv->cfg->ops->set_hw_reg(hw,
362 HW_VAR_SET_RPWM, 362 HW_VAR_SET_RPWM,
363 (u8 *) (&rpwm_val)); 363 &rpwm_val);
364 } else { 364 } else {
365 /* Reset the power save related parameters. */ 365 /* Reset the power save related parameters. */
366 ppsc->dot11_psmode = EACTIVE; 366 ppsc->dot11_psmode = EACTIVE;
@@ -446,7 +446,7 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
446{ 446{
447 struct rtl_priv *rtlpriv = rtl_priv(hw); 447 struct rtl_priv *rtlpriv = rtl_priv(hw);
448 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 448 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
449 struct ieee80211_hdr *hdr = (void *) data; 449 struct ieee80211_hdr *hdr = data;
450 struct ieee80211_tim_ie *tim_ie; 450 struct ieee80211_tim_ie *tim_ie;
451 u8 *tim; 451 u8 *tim;
452 u8 tim_len; 452 u8 tim_len;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index f7f48c7ac85..a45afda8259 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -656,9 +656,8 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
656 } else { 656 } else {
657 if (rtlpriv->dm.current_turbo_edca) { 657 if (rtlpriv->dm.current_turbo_edca) {
658 u8 tmp = AC0_BE; 658 u8 tmp = AC0_BE;
659 rtlpriv->cfg->ops->set_hw_reg(hw, 659 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
660 HW_VAR_AC_PARAM, 660 &tmp);
661 (u8 *) (&tmp));
662 rtlpriv->dm.current_turbo_edca = false; 661 rtlpriv->dm.current_turbo_edca = false;
663 } 662 }
664 } 663 }
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 692c8ef5ee8..44febfde949 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -168,7 +168,7 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw,
168{ 168{
169 struct rtl_priv *rtlpriv = rtl_priv(hw); 169 struct rtl_priv *rtlpriv = rtl_priv(hw);
170 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 170 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
171 u8 *bufferPtr = (u8 *) buffer; 171 u8 *bufferPtr = buffer;
172 172
173 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes\n", size); 173 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes\n", size);
174 174
@@ -262,7 +262,7 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
262 return 1; 262 return 1;
263 263
264 pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware; 264 pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
265 pfwdata = (u8 *) rtlhal->pfirmware; 265 pfwdata = rtlhal->pfirmware;
266 fwsize = rtlhal->fwsize; 266 fwsize = rtlhal->fwsize;
267 267
268 if (IS_FW_HEADER_EXIST(pfwheader)) { 268 if (IS_FW_HEADER_EXIST(pfwheader)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 5c4d9bc040f..bd0da7ef290 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -214,13 +214,13 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
214 for (e_aci = 0; e_aci < AC_MAX; e_aci++) { 214 for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
215 rtlpriv->cfg->ops->set_hw_reg(hw, 215 rtlpriv->cfg->ops->set_hw_reg(hw,
216 HW_VAR_AC_PARAM, 216 HW_VAR_AC_PARAM,
217 (u8 *) (&e_aci)); 217 &e_aci);
218 } 218 }
219 break; 219 break;
220 } 220 }
221 case HW_VAR_ACK_PREAMBLE:{ 221 case HW_VAR_ACK_PREAMBLE:{
222 u8 reg_tmp; 222 u8 reg_tmp;
223 u8 short_preamble = (bool) (*(u8 *) val); 223 u8 short_preamble = (bool)*val;
224 reg_tmp = (mac->cur_40_prime_sc) << 5; 224 reg_tmp = (mac->cur_40_prime_sc) << 5;
225 if (short_preamble) 225 if (short_preamble)
226 reg_tmp |= 0x80; 226 reg_tmp |= 0x80;
@@ -232,7 +232,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
232 u8 min_spacing_to_set; 232 u8 min_spacing_to_set;
233 u8 sec_min_space; 233 u8 sec_min_space;
234 234
235 min_spacing_to_set = *((u8 *) val); 235 min_spacing_to_set = *val;
236 if (min_spacing_to_set <= 7) { 236 if (min_spacing_to_set <= 7) {
237 sec_min_space = 0; 237 sec_min_space = 0;
238 238
@@ -257,7 +257,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
257 case HW_VAR_SHORTGI_DENSITY:{ 257 case HW_VAR_SHORTGI_DENSITY:{
258 u8 density_to_set; 258 u8 density_to_set;
259 259
260 density_to_set = *((u8 *) val); 260 density_to_set = *val;
261 mac->min_space_cfg |= (density_to_set << 3); 261 mac->min_space_cfg |= (density_to_set << 3);
262 262
263 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, 263 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
@@ -284,7 +284,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
284 else 284 else
285 p_regtoset = regtoset_normal; 285 p_regtoset = regtoset_normal;
286 286
287 factor_toset = *((u8 *) val); 287 factor_toset = *(val);
288 if (factor_toset <= 3) { 288 if (factor_toset <= 3) {
289 factor_toset = (1 << (factor_toset + 2)); 289 factor_toset = (1 << (factor_toset + 2));
290 if (factor_toset > 0xf) 290 if (factor_toset > 0xf)
@@ -316,17 +316,17 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
316 break; 316 break;
317 } 317 }
318 case HW_VAR_AC_PARAM:{ 318 case HW_VAR_AC_PARAM:{
319 u8 e_aci = *((u8 *) val); 319 u8 e_aci = *(val);
320 rtl92c_dm_init_edca_turbo(hw); 320 rtl92c_dm_init_edca_turbo(hw);
321 321
322 if (rtlpci->acm_method != eAcmWay2_SW) 322 if (rtlpci->acm_method != eAcmWay2_SW)
323 rtlpriv->cfg->ops->set_hw_reg(hw, 323 rtlpriv->cfg->ops->set_hw_reg(hw,
324 HW_VAR_ACM_CTRL, 324 HW_VAR_ACM_CTRL,
325 (u8 *) (&e_aci)); 325 (&e_aci));
326 break; 326 break;
327 } 327 }
328 case HW_VAR_ACM_CTRL:{ 328 case HW_VAR_ACM_CTRL:{
329 u8 e_aci = *((u8 *) val); 329 u8 e_aci = *(val);
330 union aci_aifsn *p_aci_aifsn = 330 union aci_aifsn *p_aci_aifsn =
331 (union aci_aifsn *)(&(mac->ac[0].aifs)); 331 (union aci_aifsn *)(&(mac->ac[0].aifs));
332 u8 acm = p_aci_aifsn->f.acm; 332 u8 acm = p_aci_aifsn->f.acm;
@@ -382,7 +382,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
382 break; 382 break;
383 } 383 }
384 case HW_VAR_RETRY_LIMIT:{ 384 case HW_VAR_RETRY_LIMIT:{
385 u8 retry_limit = ((u8 *) (val))[0]; 385 u8 retry_limit = val[0];
386 386
387 rtl_write_word(rtlpriv, REG_RL, 387 rtl_write_word(rtlpriv, REG_RL,
388 retry_limit << RETRY_LIMIT_SHORT_SHIFT | 388 retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -396,13 +396,13 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
396 rtlefuse->efuse_usedbytes = *((u16 *) val); 396 rtlefuse->efuse_usedbytes = *((u16 *) val);
397 break; 397 break;
398 case HW_VAR_EFUSE_USAGE: 398 case HW_VAR_EFUSE_USAGE:
399 rtlefuse->efuse_usedpercentage = *((u8 *) val); 399 rtlefuse->efuse_usedpercentage = *val;
400 break; 400 break;
401 case HW_VAR_IO_CMD: 401 case HW_VAR_IO_CMD:
402 rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val)); 402 rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val));
403 break; 403 break;
404 case HW_VAR_WPA_CONFIG: 404 case HW_VAR_WPA_CONFIG:
405 rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val)); 405 rtl_write_byte(rtlpriv, REG_SECCFG, *val);
406 break; 406 break;
407 case HW_VAR_SET_RPWM:{ 407 case HW_VAR_SET_RPWM:{
408 u8 rpwm_val; 408 u8 rpwm_val;
@@ -411,31 +411,30 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
411 udelay(1); 411 udelay(1);
412 412
413 if (rpwm_val & BIT(7)) { 413 if (rpwm_val & BIT(7)) {
414 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, 414 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val);
415 (*(u8 *) val));
416 } else { 415 } else {
417 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, 416 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
418 ((*(u8 *) val) | BIT(7))); 417 *val | BIT(7));
419 } 418 }
420 419
421 break; 420 break;
422 } 421 }
423 case HW_VAR_H2C_FW_PWRMODE:{ 422 case HW_VAR_H2C_FW_PWRMODE:{
424 u8 psmode = (*(u8 *) val); 423 u8 psmode = *val;
425 424
426 if ((psmode != FW_PS_ACTIVE_MODE) && 425 if ((psmode != FW_PS_ACTIVE_MODE) &&
427 (!IS_92C_SERIAL(rtlhal->version))) { 426 (!IS_92C_SERIAL(rtlhal->version))) {
428 rtl92c_dm_rf_saving(hw, true); 427 rtl92c_dm_rf_saving(hw, true);
429 } 428 }
430 429
431 rtl92c_set_fw_pwrmode_cmd(hw, (*(u8 *) val)); 430 rtl92c_set_fw_pwrmode_cmd(hw, *val);
432 break; 431 break;
433 } 432 }
434 case HW_VAR_FW_PSMODE_STATUS: 433 case HW_VAR_FW_PSMODE_STATUS:
435 ppsc->fw_current_inpsmode = *((bool *) val); 434 ppsc->fw_current_inpsmode = *((bool *) val);
436 break; 435 break;
437 case HW_VAR_H2C_FW_JOINBSSRPT:{ 436 case HW_VAR_H2C_FW_JOINBSSRPT:{
438 u8 mstatus = (*(u8 *) val); 437 u8 mstatus = *val;
439 u8 tmp_regcr, tmp_reg422; 438 u8 tmp_regcr, tmp_reg422;
440 bool recover = false; 439 bool recover = false;
441 440
@@ -472,7 +471,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
472 rtl_write_byte(rtlpriv, REG_CR + 1, 471 rtl_write_byte(rtlpriv, REG_CR + 1,
473 (tmp_regcr & ~(BIT(0)))); 472 (tmp_regcr & ~(BIT(0))));
474 } 473 }
475 rtl92c_set_fw_joinbss_report_cmd(hw, (*(u8 *) val)); 474 rtl92c_set_fw_joinbss_report_cmd(hw, *val);
476 475
477 break; 476 break;
478 } 477 }
@@ -486,7 +485,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
486 break; 485 break;
487 } 486 }
488 case HW_VAR_CORRECT_TSF:{ 487 case HW_VAR_CORRECT_TSF:{
489 u8 btype_ibss = ((u8 *) (val))[0]; 488 u8 btype_ibss = val[0];
490 489
491 if (btype_ibss) 490 if (btype_ibss)
492 _rtl92ce_stop_tx_beacon(hw); 491 _rtl92ce_stop_tx_beacon(hw);
@@ -1589,10 +1588,10 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
1589 rtlefuse->autoload_failflag, 1588 rtlefuse->autoload_failflag,
1590 hwinfo); 1589 hwinfo);
1591 1590
1592 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; 1591 rtlefuse->eeprom_channelplan = *&hwinfo[EEPROM_CHANNELPLAN];
1593 rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION]; 1592 rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
1594 rtlefuse->txpwr_fromeprom = true; 1593 rtlefuse->txpwr_fromeprom = true;
1595 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID]; 1594 rtlefuse->eeprom_oemid = *&hwinfo[EEPROM_CUSTOMER_ID];
1596 1595
1597 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, 1596 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1598 "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid); 1597 "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
@@ -1939,7 +1938,7 @@ void rtl92ce_update_channel_access_setting(struct ieee80211_hw *hw)
1939 u16 sifs_timer; 1938 u16 sifs_timer;
1940 1939
1941 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, 1940 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
1942 (u8 *)&mac->slot_time); 1941 &mac->slot_time);
1943 if (!mac->ht_enable) 1942 if (!mac->ht_enable)
1944 sifs_timer = 0x0a0a; 1943 sifs_timer = 0x0a0a;
1945 else 1944 else
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 3af874e6959..52166640f16 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -605,7 +605,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
605 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 605 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
606 bool defaultadapter = true; 606 bool defaultadapter = true;
607 struct ieee80211_sta *sta; 607 struct ieee80211_sta *sta;
608 u8 *pdesc = (u8 *) pdesc_tx; 608 u8 *pdesc = pdesc_tx;
609 u16 seq_number; 609 u16 seq_number;
610 __le16 fc = hdr->frame_control; 610 __le16 fc = hdr->frame_control;
611 u8 fw_qsel = _rtl92ce_map_hwqueue_to_fwqueue(skb, hw_queue); 611 u8 fw_qsel = _rtl92ce_map_hwqueue_to_fwqueue(skb, hw_queue);
@@ -806,7 +806,7 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
806 806
807 SET_TX_DESC_OWN(pdesc, 1); 807 SET_TX_DESC_OWN(pdesc, 1);
808 808
809 SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len)); 809 SET_TX_DESC_PKT_SIZE(pdesc, (u16) (skb->len));
810 810
811 SET_TX_DESC_FIRST_SEG(pdesc, 1); 811 SET_TX_DESC_FIRST_SEG(pdesc, 1);
812 SET_TX_DESC_LAST_SEG(pdesc, 1); 812 SET_TX_DESC_LAST_SEG(pdesc, 1);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 0c74d4f2eeb..4bbb711a36c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -381,11 +381,11 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
381 rtlefuse->eeprom_did = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_DID]); 381 rtlefuse->eeprom_did = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_DID]);
382 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, " VID = 0x%02x PID = 0x%02x\n", 382 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, " VID = 0x%02x PID = 0x%02x\n",
383 rtlefuse->eeprom_vid, rtlefuse->eeprom_did); 383 rtlefuse->eeprom_vid, rtlefuse->eeprom_did);
384 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; 384 rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
385 rtlefuse->eeprom_version = 385 rtlefuse->eeprom_version =
386 le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VERSION]); 386 le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VERSION]);
387 rtlefuse->txpwr_fromeprom = true; 387 rtlefuse->txpwr_fromeprom = true;
388 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID]; 388 rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
389 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n", 389 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n",
390 rtlefuse->eeprom_oemid); 390 rtlefuse->eeprom_oemid);
391 if (rtlhal->oem_id == RT_CID_DEFAULT) { 391 if (rtlhal->oem_id == RT_CID_DEFAULT) {
@@ -1660,7 +1660,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1660 for (e_aci = 0; e_aci < AC_MAX; e_aci++) 1660 for (e_aci = 0; e_aci < AC_MAX; e_aci++)
1661 rtlpriv->cfg->ops->set_hw_reg(hw, 1661 rtlpriv->cfg->ops->set_hw_reg(hw,
1662 HW_VAR_AC_PARAM, 1662 HW_VAR_AC_PARAM,
1663 (u8 *)(&e_aci)); 1663 &e_aci);
1664 } else { 1664 } else {
1665 u8 sifstime = 0; 1665 u8 sifstime = 0;
1666 u8 u1bAIFS; 1666 u8 u1bAIFS;
@@ -1685,7 +1685,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1685 } 1685 }
1686 case HW_VAR_ACK_PREAMBLE:{ 1686 case HW_VAR_ACK_PREAMBLE:{
1687 u8 reg_tmp; 1687 u8 reg_tmp;
1688 u8 short_preamble = (bool) (*(u8 *) val); 1688 u8 short_preamble = (bool)*val;
1689 reg_tmp = 0; 1689 reg_tmp = 0;
1690 if (short_preamble) 1690 if (short_preamble)
1691 reg_tmp |= 0x80; 1691 reg_tmp |= 0x80;
@@ -1696,7 +1696,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1696 u8 min_spacing_to_set; 1696 u8 min_spacing_to_set;
1697 u8 sec_min_space; 1697 u8 sec_min_space;
1698 1698
1699 min_spacing_to_set = *((u8 *) val); 1699 min_spacing_to_set = *val;
1700 if (min_spacing_to_set <= 7) { 1700 if (min_spacing_to_set <= 7) {
1701 switch (rtlpriv->sec.pairwise_enc_algorithm) { 1701 switch (rtlpriv->sec.pairwise_enc_algorithm) {
1702 case NO_ENCRYPTION: 1702 case NO_ENCRYPTION:
@@ -1729,7 +1729,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1729 case HW_VAR_SHORTGI_DENSITY:{ 1729 case HW_VAR_SHORTGI_DENSITY:{
1730 u8 density_to_set; 1730 u8 density_to_set;
1731 1731
1732 density_to_set = *((u8 *) val); 1732 density_to_set = *val;
1733 density_to_set &= 0x1f; 1733 density_to_set &= 0x1f;
1734 mac->min_space_cfg &= 0x07; 1734 mac->min_space_cfg &= 0x07;
1735 mac->min_space_cfg |= (density_to_set << 3); 1735 mac->min_space_cfg |= (density_to_set << 3);
@@ -1747,7 +1747,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1747 u8 index = 0; 1747 u8 index = 0;
1748 1748
1749 p_regtoset = regtoset_normal; 1749 p_regtoset = regtoset_normal;
1750 factor_toset = *((u8 *) val); 1750 factor_toset = *val;
1751 if (factor_toset <= 3) { 1751 if (factor_toset <= 3) {
1752 factor_toset = (1 << (factor_toset + 2)); 1752 factor_toset = (1 << (factor_toset + 2));
1753 if (factor_toset > 0xf) 1753 if (factor_toset > 0xf)
@@ -1774,7 +1774,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1774 break; 1774 break;
1775 } 1775 }
1776 case HW_VAR_AC_PARAM:{ 1776 case HW_VAR_AC_PARAM:{
1777 u8 e_aci = *((u8 *) val); 1777 u8 e_aci = *val;
1778 u32 u4b_ac_param; 1778 u32 u4b_ac_param;
1779 u16 cw_min = le16_to_cpu(mac->ac[e_aci].cw_min); 1779 u16 cw_min = le16_to_cpu(mac->ac[e_aci].cw_min);
1780 u16 cw_max = le16_to_cpu(mac->ac[e_aci].cw_max); 1780 u16 cw_max = le16_to_cpu(mac->ac[e_aci].cw_max);
@@ -1814,11 +1814,11 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1814 } 1814 }
1815 if (rtlusb->acm_method != eAcmWay2_SW) 1815 if (rtlusb->acm_method != eAcmWay2_SW)
1816 rtlpriv->cfg->ops->set_hw_reg(hw, 1816 rtlpriv->cfg->ops->set_hw_reg(hw,
1817 HW_VAR_ACM_CTRL, (u8 *)(&e_aci)); 1817 HW_VAR_ACM_CTRL, &e_aci);
1818 break; 1818 break;
1819 } 1819 }
1820 case HW_VAR_ACM_CTRL:{ 1820 case HW_VAR_ACM_CTRL:{
1821 u8 e_aci = *((u8 *) val); 1821 u8 e_aci = *val;
1822 union aci_aifsn *p_aci_aifsn = (union aci_aifsn *) 1822 union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)
1823 (&(mac->ac[0].aifs)); 1823 (&(mac->ac[0].aifs));
1824 u8 acm = p_aci_aifsn->f.acm; 1824 u8 acm = p_aci_aifsn->f.acm;
@@ -1874,7 +1874,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1874 break; 1874 break;
1875 } 1875 }
1876 case HW_VAR_RETRY_LIMIT:{ 1876 case HW_VAR_RETRY_LIMIT:{
1877 u8 retry_limit = ((u8 *) (val))[0]; 1877 u8 retry_limit = val[0];
1878 1878
1879 rtl_write_word(rtlpriv, REG_RL, 1879 rtl_write_word(rtlpriv, REG_RL,
1880 retry_limit << RETRY_LIMIT_SHORT_SHIFT | 1880 retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -1891,39 +1891,38 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1891 rtlefuse->efuse_usedbytes = *((u16 *) val); 1891 rtlefuse->efuse_usedbytes = *((u16 *) val);
1892 break; 1892 break;
1893 case HW_VAR_EFUSE_USAGE: 1893 case HW_VAR_EFUSE_USAGE:
1894 rtlefuse->efuse_usedpercentage = *((u8 *) val); 1894 rtlefuse->efuse_usedpercentage = *val;
1895 break; 1895 break;
1896 case HW_VAR_IO_CMD: 1896 case HW_VAR_IO_CMD:
1897 rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val)); 1897 rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val));
1898 break; 1898 break;
1899 case HW_VAR_WPA_CONFIG: 1899 case HW_VAR_WPA_CONFIG:
1900 rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val)); 1900 rtl_write_byte(rtlpriv, REG_SECCFG, *val);
1901 break; 1901 break;
1902 case HW_VAR_SET_RPWM:{ 1902 case HW_VAR_SET_RPWM:{
1903 u8 rpwm_val = rtl_read_byte(rtlpriv, REG_USB_HRPWM); 1903 u8 rpwm_val = rtl_read_byte(rtlpriv, REG_USB_HRPWM);
1904 1904
1905 if (rpwm_val & BIT(7)) 1905 if (rpwm_val & BIT(7))
1906 rtl_write_byte(rtlpriv, REG_USB_HRPWM, 1906 rtl_write_byte(rtlpriv, REG_USB_HRPWM, *val);
1907 (*(u8 *)val));
1908 else 1907 else
1909 rtl_write_byte(rtlpriv, REG_USB_HRPWM, 1908 rtl_write_byte(rtlpriv, REG_USB_HRPWM,
1910 ((*(u8 *)val) | BIT(7))); 1909 *val | BIT(7));
1911 break; 1910 break;
1912 } 1911 }
1913 case HW_VAR_H2C_FW_PWRMODE:{ 1912 case HW_VAR_H2C_FW_PWRMODE:{
1914 u8 psmode = (*(u8 *) val); 1913 u8 psmode = *val;
1915 1914
1916 if ((psmode != FW_PS_ACTIVE_MODE) && 1915 if ((psmode != FW_PS_ACTIVE_MODE) &&
1917 (!IS_92C_SERIAL(rtlhal->version))) 1916 (!IS_92C_SERIAL(rtlhal->version)))
1918 rtl92c_dm_rf_saving(hw, true); 1917 rtl92c_dm_rf_saving(hw, true);
1919 rtl92c_set_fw_pwrmode_cmd(hw, (*(u8 *) val)); 1918 rtl92c_set_fw_pwrmode_cmd(hw, (*val));
1920 break; 1919 break;
1921 } 1920 }
1922 case HW_VAR_FW_PSMODE_STATUS: 1921 case HW_VAR_FW_PSMODE_STATUS:
1923 ppsc->fw_current_inpsmode = *((bool *) val); 1922 ppsc->fw_current_inpsmode = *((bool *) val);
1924 break; 1923 break;
1925 case HW_VAR_H2C_FW_JOINBSSRPT:{ 1924 case HW_VAR_H2C_FW_JOINBSSRPT:{
1926 u8 mstatus = (*(u8 *) val); 1925 u8 mstatus = *val;
1927 u8 tmp_reg422; 1926 u8 tmp_reg422;
1928 bool recover = false; 1927 bool recover = false;
1929 1928
@@ -1948,7 +1947,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1948 tmp_reg422 | BIT(6)); 1947 tmp_reg422 | BIT(6));
1949 rtl_write_byte(rtlpriv, REG_CR + 1, 0x02); 1948 rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
1950 } 1949 }
1951 rtl92c_set_fw_joinbss_report_cmd(hw, (*(u8 *) val)); 1950 rtl92c_set_fw_joinbss_report_cmd(hw, (*val));
1952 break; 1951 break;
1953 } 1952 }
1954 case HW_VAR_AID:{ 1953 case HW_VAR_AID:{
@@ -1961,7 +1960,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1961 break; 1960 break;
1962 } 1961 }
1963 case HW_VAR_CORRECT_TSF:{ 1962 case HW_VAR_CORRECT_TSF:{
1964 u8 btype_ibss = ((u8 *) (val))[0]; 1963 u8 btype_ibss = val[0];
1965 1964
1966 if (btype_ibss) 1965 if (btype_ibss)
1967 _rtl92cu_stop_tx_beacon(hw); 1966 _rtl92cu_stop_tx_beacon(hw);
@@ -2184,7 +2183,7 @@ void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw)
2184 u16 sifs_timer; 2183 u16 sifs_timer;
2185 2184
2186 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, 2185 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
2187 (u8 *)&mac->slot_time); 2186 &mac->slot_time);
2188 if (!mac->ht_enable) 2187 if (!mac->ht_enable)
2189 sifs_timer = 0x0a0a; 2188 sifs_timer = 0x0a0a;
2190 else 2189 else
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 21bc827c5fa..2e6eb356a93 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -668,7 +668,7 @@ void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
668 SET_TX_DESC_RATE_ID(pdesc, 7); 668 SET_TX_DESC_RATE_ID(pdesc, 7);
669 SET_TX_DESC_MACID(pdesc, 0); 669 SET_TX_DESC_MACID(pdesc, 0);
670 SET_TX_DESC_OWN(pdesc, 1); 670 SET_TX_DESC_OWN(pdesc, 1);
671 SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len)); 671 SET_TX_DESC_PKT_SIZE(pdesc, (u16)skb->len);
672 SET_TX_DESC_FIRST_SEG(pdesc, 1); 672 SET_TX_DESC_FIRST_SEG(pdesc, 1);
673 SET_TX_DESC_LAST_SEG(pdesc, 1); 673 SET_TX_DESC_LAST_SEG(pdesc, 1);
674 SET_TX_DESC_OFFSET(pdesc, 0x20); 674 SET_TX_DESC_OFFSET(pdesc, 0x20);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index a7d63a84551..c0201ed69dd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -696,7 +696,7 @@ static void rtl92d_dm_check_edca_turbo(struct ieee80211_hw *hw)
696 if (rtlpriv->dm.current_turbo_edca) { 696 if (rtlpriv->dm.current_turbo_edca) {
697 u8 tmp = AC0_BE; 697 u8 tmp = AC0_BE;
698 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, 698 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
699 (u8 *) (&tmp)); 699 &tmp);
700 rtlpriv->dm.current_turbo_edca = false; 700 rtlpriv->dm.current_turbo_edca = false;
701 } 701 }
702 } 702 }
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
index f548a8d0068..895ae6c1f35 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
@@ -120,7 +120,7 @@ static void _rtl92d_write_fw(struct ieee80211_hw *hw,
120{ 120{
121 struct rtl_priv *rtlpriv = rtl_priv(hw); 121 struct rtl_priv *rtlpriv = rtl_priv(hw);
122 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 122 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
123 u8 *bufferPtr = (u8 *) buffer; 123 u8 *bufferPtr = buffer;
124 u32 pagenums, remainSize; 124 u32 pagenums, remainSize;
125 u32 page, offset; 125 u32 page, offset;
126 126
@@ -256,8 +256,8 @@ int rtl92d_download_fw(struct ieee80211_hw *hw)
256 if (rtlpriv->max_fw_size == 0 || !rtlhal->pfirmware) 256 if (rtlpriv->max_fw_size == 0 || !rtlhal->pfirmware)
257 return 1; 257 return 1;
258 fwsize = rtlhal->fwsize; 258 fwsize = rtlhal->fwsize;
259 pfwheader = (u8 *) rtlhal->pfirmware; 259 pfwheader = rtlhal->pfirmware;
260 pfwdata = (u8 *) rtlhal->pfirmware; 260 pfwdata = rtlhal->pfirmware;
261 rtlhal->fw_version = (u16) GET_FIRMWARE_HDR_VERSION(pfwheader); 261 rtlhal->fw_version = (u16) GET_FIRMWARE_HDR_VERSION(pfwheader);
262 rtlhal->fw_subversion = (u16) GET_FIRMWARE_HDR_SUB_VER(pfwheader); 262 rtlhal->fw_subversion = (u16) GET_FIRMWARE_HDR_SUB_VER(pfwheader);
263 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, 263 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index b338d526c42..f4051f4f039 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -235,12 +235,12 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
235 for (e_aci = 0; e_aci < AC_MAX; e_aci++) 235 for (e_aci = 0; e_aci < AC_MAX; e_aci++)
236 rtlpriv->cfg->ops->set_hw_reg(hw, 236 rtlpriv->cfg->ops->set_hw_reg(hw,
237 HW_VAR_AC_PARAM, 237 HW_VAR_AC_PARAM,
238 (u8 *) (&e_aci)); 238 (&e_aci));
239 break; 239 break;
240 } 240 }
241 case HW_VAR_ACK_PREAMBLE: { 241 case HW_VAR_ACK_PREAMBLE: {
242 u8 reg_tmp; 242 u8 reg_tmp;
243 u8 short_preamble = (bool) (*(u8 *) val); 243 u8 short_preamble = (bool) (*val);
244 244
245 reg_tmp = (mac->cur_40_prime_sc) << 5; 245 reg_tmp = (mac->cur_40_prime_sc) << 5;
246 if (short_preamble) 246 if (short_preamble)
@@ -252,7 +252,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
252 u8 min_spacing_to_set; 252 u8 min_spacing_to_set;
253 u8 sec_min_space; 253 u8 sec_min_space;
254 254
255 min_spacing_to_set = *((u8 *) val); 255 min_spacing_to_set = *val;
256 if (min_spacing_to_set <= 7) { 256 if (min_spacing_to_set <= 7) {
257 sec_min_space = 0; 257 sec_min_space = 0;
258 if (min_spacing_to_set < sec_min_space) 258 if (min_spacing_to_set < sec_min_space)
@@ -271,7 +271,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
271 case HW_VAR_SHORTGI_DENSITY: { 271 case HW_VAR_SHORTGI_DENSITY: {
272 u8 density_to_set; 272 u8 density_to_set;
273 273
274 density_to_set = *((u8 *) val); 274 density_to_set = *val;
275 mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg; 275 mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg;
276 mac->min_space_cfg |= (density_to_set << 3); 276 mac->min_space_cfg |= (density_to_set << 3);
277 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, 277 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
@@ -293,7 +293,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
293 regtoSet = 0x66626641; 293 regtoSet = 0x66626641;
294 else 294 else
295 regtoSet = 0xb972a841; 295 regtoSet = 0xb972a841;
296 factor_toset = *((u8 *) val); 296 factor_toset = *val;
297 if (factor_toset <= 3) { 297 if (factor_toset <= 3) {
298 factor_toset = (1 << (factor_toset + 2)); 298 factor_toset = (1 << (factor_toset + 2));
299 if (factor_toset > 0xf) 299 if (factor_toset > 0xf)
@@ -316,15 +316,15 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
316 break; 316 break;
317 } 317 }
318 case HW_VAR_AC_PARAM: { 318 case HW_VAR_AC_PARAM: {
319 u8 e_aci = *((u8 *) val); 319 u8 e_aci = *val;
320 rtl92d_dm_init_edca_turbo(hw); 320 rtl92d_dm_init_edca_turbo(hw);
321 if (rtlpci->acm_method != eAcmWay2_SW) 321 if (rtlpci->acm_method != eAcmWay2_SW)
322 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL, 322 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
323 (u8 *) (&e_aci)); 323 &e_aci);
324 break; 324 break;
325 } 325 }
326 case HW_VAR_ACM_CTRL: { 326 case HW_VAR_ACM_CTRL: {
327 u8 e_aci = *((u8 *) val); 327 u8 e_aci = *val;
328 union aci_aifsn *p_aci_aifsn = 328 union aci_aifsn *p_aci_aifsn =
329 (union aci_aifsn *)(&(mac->ac[0].aifs)); 329 (union aci_aifsn *)(&(mac->ac[0].aifs));
330 u8 acm = p_aci_aifsn->f.acm; 330 u8 acm = p_aci_aifsn->f.acm;
@@ -376,7 +376,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
376 rtlpci->receive_config = ((u32 *) (val))[0]; 376 rtlpci->receive_config = ((u32 *) (val))[0];
377 break; 377 break;
378 case HW_VAR_RETRY_LIMIT: { 378 case HW_VAR_RETRY_LIMIT: {
379 u8 retry_limit = ((u8 *) (val))[0]; 379 u8 retry_limit = val[0];
380 380
381 rtl_write_word(rtlpriv, REG_RL, 381 rtl_write_word(rtlpriv, REG_RL,
382 retry_limit << RETRY_LIMIT_SHORT_SHIFT | 382 retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -390,16 +390,16 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
390 rtlefuse->efuse_usedbytes = *((u16 *) val); 390 rtlefuse->efuse_usedbytes = *((u16 *) val);
391 break; 391 break;
392 case HW_VAR_EFUSE_USAGE: 392 case HW_VAR_EFUSE_USAGE:
393 rtlefuse->efuse_usedpercentage = *((u8 *) val); 393 rtlefuse->efuse_usedpercentage = *val;
394 break; 394 break;
395 case HW_VAR_IO_CMD: 395 case HW_VAR_IO_CMD:
396 rtl92d_phy_set_io_cmd(hw, (*(enum io_type *)val)); 396 rtl92d_phy_set_io_cmd(hw, (*(enum io_type *)val));
397 break; 397 break;
398 case HW_VAR_WPA_CONFIG: 398 case HW_VAR_WPA_CONFIG:
399 rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val)); 399 rtl_write_byte(rtlpriv, REG_SECCFG, *val);
400 break; 400 break;
401 case HW_VAR_SET_RPWM: 401 case HW_VAR_SET_RPWM:
402 rtl92d_fill_h2c_cmd(hw, H2C_PWRM, 1, (u8 *) (val)); 402 rtl92d_fill_h2c_cmd(hw, H2C_PWRM, 1, (val));
403 break; 403 break;
404 case HW_VAR_H2C_FW_PWRMODE: 404 case HW_VAR_H2C_FW_PWRMODE:
405 break; 405 break;
@@ -407,7 +407,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
407 ppsc->fw_current_inpsmode = *((bool *) val); 407 ppsc->fw_current_inpsmode = *((bool *) val);
408 break; 408 break;
409 case HW_VAR_H2C_FW_JOINBSSRPT: { 409 case HW_VAR_H2C_FW_JOINBSSRPT: {
410 u8 mstatus = (*(u8 *) val); 410 u8 mstatus = (*val);
411 u8 tmp_regcr, tmp_reg422; 411 u8 tmp_regcr, tmp_reg422;
412 bool recover = false; 412 bool recover = false;
413 413
@@ -435,7 +435,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
435 rtl_write_byte(rtlpriv, REG_CR + 1, 435 rtl_write_byte(rtlpriv, REG_CR + 1,
436 (tmp_regcr & ~(BIT(0)))); 436 (tmp_regcr & ~(BIT(0))));
437 } 437 }
438 rtl92d_set_fw_joinbss_report_cmd(hw, (*(u8 *) val)); 438 rtl92d_set_fw_joinbss_report_cmd(hw, (*val));
439 break; 439 break;
440 } 440 }
441 case HW_VAR_AID: { 441 case HW_VAR_AID: {
@@ -447,7 +447,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
447 break; 447 break;
448 } 448 }
449 case HW_VAR_CORRECT_TSF: { 449 case HW_VAR_CORRECT_TSF: {
450 u8 btype_ibss = ((u8 *) (val))[0]; 450 u8 btype_ibss = val[0];
451 451
452 if (btype_ibss) 452 if (btype_ibss)
453 _rtl92de_stop_tx_beacon(hw); 453 _rtl92de_stop_tx_beacon(hw);
@@ -1794,7 +1794,7 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
1794 "RTL819X Not boot from eeprom, check it !!\n"); 1794 "RTL819X Not boot from eeprom, check it !!\n");
1795 return; 1795 return;
1796 } 1796 }
1797 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID]; 1797 rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
1798 _rtl92de_read_macphymode_and_bandtype(hw, hwinfo); 1798 _rtl92de_read_macphymode_and_bandtype(hw, hwinfo);
1799 1799
1800 /* VID, DID SE 0xA-D */ 1800 /* VID, DID SE 0xA-D */
@@ -2115,7 +2115,7 @@ void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw)
2115 u16 sifs_timer; 2115 u16 sifs_timer;
2116 2116
2117 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, 2117 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
2118 (u8 *)&mac->slot_time); 2118 &mac->slot_time);
2119 if (!mac->ht_enable) 2119 if (!mac->ht_enable)
2120 sifs_timer = 0x0a0a; 2120 sifs_timer = 0x0a0a;
2121 else 2121 else
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index 1666ef7fd87..f80690d82c1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -560,7 +560,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
560 struct rtl_hal *rtlhal = rtl_hal(rtlpriv); 560 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
561 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 561 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
562 struct ieee80211_sta *sta = info->control.sta; 562 struct ieee80211_sta *sta = info->control.sta;
563 u8 *pdesc = (u8 *) pdesc_tx; 563 u8 *pdesc = pdesc_tx;
564 u16 seq_number; 564 u16 seq_number;
565 __le16 fc = hdr->frame_control; 565 __le16 fc = hdr->frame_control;
566 unsigned int buf_len = 0; 566 unsigned int buf_len = 0;
@@ -761,11 +761,11 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
761 SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue); 761 SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
762 SET_TX_DESC_FIRST_SEG(pdesc, 1); 762 SET_TX_DESC_FIRST_SEG(pdesc, 1);
763 SET_TX_DESC_LAST_SEG(pdesc, 1); 763 SET_TX_DESC_LAST_SEG(pdesc, 1);
764 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) (skb->len)); 764 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)skb->len);
765 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); 765 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
766 SET_TX_DESC_RATE_ID(pdesc, 7); 766 SET_TX_DESC_RATE_ID(pdesc, 7);
767 SET_TX_DESC_MACID(pdesc, 0); 767 SET_TX_DESC_MACID(pdesc, 0);
768 SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len)); 768 SET_TX_DESC_PKT_SIZE(pdesc, (u16) (skb->len));
769 SET_TX_DESC_FIRST_SEG(pdesc, 1); 769 SET_TX_DESC_FIRST_SEG(pdesc, 1);
770 SET_TX_DESC_LAST_SEG(pdesc, 1); 770 SET_TX_DESC_LAST_SEG(pdesc, 1);
771 SET_TX_DESC_OFFSET(pdesc, 0x20); 771 SET_TX_DESC_OFFSET(pdesc, 0x20);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
index 2e1158026fb..465f5815710 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
@@ -146,7 +146,7 @@ static void _rtl92s_dm_check_edca_turbo(struct ieee80211_hw *hw)
146 if (rtlpriv->dm.current_turbo_edca) { 146 if (rtlpriv->dm.current_turbo_edca) {
147 u8 tmp = AC0_BE; 147 u8 tmp = AC0_BE;
148 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, 148 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
149 (u8 *)(&tmp)); 149 &tmp);
150 rtlpriv->dm.current_turbo_edca = false; 150 rtlpriv->dm.current_turbo_edca = false;
151 } 151 }
152 } 152 }
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index b141c35bf92..4542e6952b9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -145,13 +145,13 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
145 for (e_aci = 0; e_aci < AC_MAX; e_aci++) { 145 for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
146 rtlpriv->cfg->ops->set_hw_reg(hw, 146 rtlpriv->cfg->ops->set_hw_reg(hw,
147 HW_VAR_AC_PARAM, 147 HW_VAR_AC_PARAM,
148 (u8 *)(&e_aci)); 148 (&e_aci));
149 } 149 }
150 break; 150 break;
151 } 151 }
152 case HW_VAR_ACK_PREAMBLE:{ 152 case HW_VAR_ACK_PREAMBLE:{
153 u8 reg_tmp; 153 u8 reg_tmp;
154 u8 short_preamble = (bool) (*(u8 *) val); 154 u8 short_preamble = (bool) (*val);
155 reg_tmp = (mac->cur_40_prime_sc) << 5; 155 reg_tmp = (mac->cur_40_prime_sc) << 5;
156 if (short_preamble) 156 if (short_preamble)
157 reg_tmp |= 0x80; 157 reg_tmp |= 0x80;
@@ -163,7 +163,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
163 u8 min_spacing_to_set; 163 u8 min_spacing_to_set;
164 u8 sec_min_space; 164 u8 sec_min_space;
165 165
166 min_spacing_to_set = *((u8 *)val); 166 min_spacing_to_set = *val;
167 if (min_spacing_to_set <= 7) { 167 if (min_spacing_to_set <= 7) {
168 if (rtlpriv->sec.pairwise_enc_algorithm == 168 if (rtlpriv->sec.pairwise_enc_algorithm ==
169 NO_ENCRYPTION) 169 NO_ENCRYPTION)
@@ -194,7 +194,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
194 case HW_VAR_SHORTGI_DENSITY:{ 194 case HW_VAR_SHORTGI_DENSITY:{
195 u8 density_to_set; 195 u8 density_to_set;
196 196
197 density_to_set = *((u8 *) val); 197 density_to_set = *val;
198 mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg; 198 mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg;
199 mac->min_space_cfg |= (density_to_set << 3); 199 mac->min_space_cfg |= (density_to_set << 3);
200 200
@@ -216,7 +216,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
216 15, 15, 15, 15, 0}; 216 15, 15, 15, 15, 0};
217 u8 index = 0; 217 u8 index = 0;
218 218
219 factor_toset = *((u8 *) val); 219 factor_toset = *val;
220 if (factor_toset <= 3) { 220 if (factor_toset <= 3) {
221 factor_toset = (1 << (factor_toset + 2)); 221 factor_toset = (1 << (factor_toset + 2));
222 if (factor_toset > 0xf) 222 if (factor_toset > 0xf)
@@ -248,17 +248,17 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
248 break; 248 break;
249 } 249 }
250 case HW_VAR_AC_PARAM:{ 250 case HW_VAR_AC_PARAM:{
251 u8 e_aci = *((u8 *) val); 251 u8 e_aci = *val;
252 rtl92s_dm_init_edca_turbo(hw); 252 rtl92s_dm_init_edca_turbo(hw);
253 253
254 if (rtlpci->acm_method != eAcmWay2_SW) 254 if (rtlpci->acm_method != eAcmWay2_SW)
255 rtlpriv->cfg->ops->set_hw_reg(hw, 255 rtlpriv->cfg->ops->set_hw_reg(hw,
256 HW_VAR_ACM_CTRL, 256 HW_VAR_ACM_CTRL,
257 (u8 *)(&e_aci)); 257 &e_aci);
258 break; 258 break;
259 } 259 }
260 case HW_VAR_ACM_CTRL:{ 260 case HW_VAR_ACM_CTRL:{
261 u8 e_aci = *((u8 *) val); 261 u8 e_aci = *val;
262 union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)(&( 262 union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)(&(
263 mac->ac[0].aifs)); 263 mac->ac[0].aifs));
264 u8 acm = p_aci_aifsn->f.acm; 264 u8 acm = p_aci_aifsn->f.acm;
@@ -313,7 +313,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
313 break; 313 break;
314 } 314 }
315 case HW_VAR_RETRY_LIMIT:{ 315 case HW_VAR_RETRY_LIMIT:{
316 u8 retry_limit = ((u8 *) (val))[0]; 316 u8 retry_limit = val[0];
317 317
318 rtl_write_word(rtlpriv, RETRY_LIMIT, 318 rtl_write_word(rtlpriv, RETRY_LIMIT,
319 retry_limit << RETRY_LIMIT_SHORT_SHIFT | 319 retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -328,14 +328,14 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
328 break; 328 break;
329 } 329 }
330 case HW_VAR_EFUSE_USAGE: { 330 case HW_VAR_EFUSE_USAGE: {
331 rtlefuse->efuse_usedpercentage = *((u8 *) val); 331 rtlefuse->efuse_usedpercentage = *val;
332 break; 332 break;
333 } 333 }
334 case HW_VAR_IO_CMD: { 334 case HW_VAR_IO_CMD: {
335 break; 335 break;
336 } 336 }
337 case HW_VAR_WPA_CONFIG: { 337 case HW_VAR_WPA_CONFIG: {
338 rtl_write_byte(rtlpriv, REG_SECR, *((u8 *) val)); 338 rtl_write_byte(rtlpriv, REG_SECR, *val);
339 break; 339 break;
340 } 340 }
341 case HW_VAR_SET_RPWM:{ 341 case HW_VAR_SET_RPWM:{
@@ -1813,8 +1813,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1813 else 1813 else
1814 index = 2; 1814 index = 2;
1815 1815
1816 tempval = (*(u8 *)&hwinfo[EEPROM_TX_PWR_HT20_DIFF + 1816 tempval = hwinfo[EEPROM_TX_PWR_HT20_DIFF + index] & 0xff;
1817 index]) & 0xff;
1818 rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] = (tempval & 0xF); 1817 rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] = (tempval & 0xF);
1819 rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] = 1818 rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] =
1820 ((tempval >> 4) & 0xF); 1819 ((tempval >> 4) & 0xF);
@@ -1830,14 +1829,13 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1830 else 1829 else
1831 index = 1; 1830 index = 1;
1832 1831
1833 tempval = (*(u8 *)&hwinfo[EEPROM_TX_PWR_OFDM_DIFF + index]) 1832 tempval = hwinfo[EEPROM_TX_PWR_OFDM_DIFF + index] & 0xff;
1834 & 0xff;
1835 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i] = 1833 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i] =
1836 (tempval & 0xF); 1834 (tempval & 0xF);
1837 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i] = 1835 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i] =
1838 ((tempval >> 4) & 0xF); 1836 ((tempval >> 4) & 0xF);
1839 1837
1840 tempval = (*(u8 *)&hwinfo[TX_PWR_SAFETY_CHK]); 1838 tempval = hwinfo[TX_PWR_SAFETY_CHK];
1841 rtlefuse->txpwr_safetyflag = (tempval & 0x01); 1839 rtlefuse->txpwr_safetyflag = (tempval & 0x01);
1842 } 1840 }
1843 1841
@@ -1876,7 +1874,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1876 1874
1877 /* Read RF-indication and Tx Power gain 1875 /* Read RF-indication and Tx Power gain
1878 * index diff of legacy to HT OFDM rate. */ 1876 * index diff of legacy to HT OFDM rate. */
1879 tempval = (*(u8 *)&hwinfo[EEPROM_RFIND_POWERDIFF]) & 0xff; 1877 tempval = hwinfo[EEPROM_RFIND_POWERDIFF] & 0xff;
1880 rtlefuse->eeprom_txpowerdiff = tempval; 1878 rtlefuse->eeprom_txpowerdiff = tempval;
1881 rtlefuse->legacy_httxpowerdiff = 1879 rtlefuse->legacy_httxpowerdiff =
1882 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][0]; 1880 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][0];
@@ -1887,7 +1885,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1887 /* Get TSSI value for each path. */ 1885 /* Get TSSI value for each path. */
1888 usvalue = *(u16 *)&hwinfo[EEPROM_TSSI_A]; 1886 usvalue = *(u16 *)&hwinfo[EEPROM_TSSI_A];
1889 rtlefuse->eeprom_tssi[RF90_PATH_A] = (u8)((usvalue & 0xff00) >> 8); 1887 rtlefuse->eeprom_tssi[RF90_PATH_A] = (u8)((usvalue & 0xff00) >> 8);
1890 usvalue = *(u8 *)&hwinfo[EEPROM_TSSI_B]; 1888 usvalue = hwinfo[EEPROM_TSSI_B];
1891 rtlefuse->eeprom_tssi[RF90_PATH_B] = (u8)(usvalue & 0xff); 1889 rtlefuse->eeprom_tssi[RF90_PATH_B] = (u8)(usvalue & 0xff);
1892 1890
1893 RTPRINT(rtlpriv, FINIT, INIT_TxPower, "TSSI_A = 0x%x, TSSI_B = 0x%x\n", 1891 RTPRINT(rtlpriv, FINIT, INIT_TxPower, "TSSI_A = 0x%x, TSSI_B = 0x%x\n",
@@ -1896,7 +1894,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1896 1894
1897 /* Read antenna tx power offset of B/C/D to A from EEPROM */ 1895 /* Read antenna tx power offset of B/C/D to A from EEPROM */
1898 /* and read ThermalMeter from EEPROM */ 1896 /* and read ThermalMeter from EEPROM */
1899 tempval = *(u8 *)&hwinfo[EEPROM_THERMALMETER]; 1897 tempval = hwinfo[EEPROM_THERMALMETER];
1900 rtlefuse->eeprom_thermalmeter = tempval; 1898 rtlefuse->eeprom_thermalmeter = tempval;
1901 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1899 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1902 "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter); 1900 "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
@@ -1906,20 +1904,20 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1906 rtlefuse->tssi_13dbm = rtlefuse->eeprom_thermalmeter * 100; 1904 rtlefuse->tssi_13dbm = rtlefuse->eeprom_thermalmeter * 100;
1907 1905
1908 /* Read CrystalCap from EEPROM */ 1906 /* Read CrystalCap from EEPROM */
1909 tempval = (*(u8 *)&hwinfo[EEPROM_CRYSTALCAP]) >> 4; 1907 tempval = hwinfo[EEPROM_CRYSTALCAP] >> 4;
1910 rtlefuse->eeprom_crystalcap = tempval; 1908 rtlefuse->eeprom_crystalcap = tempval;
1911 /* CrystalCap, BIT(12)~15 */ 1909 /* CrystalCap, BIT(12)~15 */
1912 rtlefuse->crystalcap = rtlefuse->eeprom_crystalcap; 1910 rtlefuse->crystalcap = rtlefuse->eeprom_crystalcap;
1913 1911
1914 /* Read IC Version && Channel Plan */ 1912 /* Read IC Version && Channel Plan */
1915 /* Version ID, Channel plan */ 1913 /* Version ID, Channel plan */
1916 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; 1914 rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
1917 rtlefuse->txpwr_fromeprom = true; 1915 rtlefuse->txpwr_fromeprom = true;
1918 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1916 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1919 "EEPROM ChannelPlan = 0x%4x\n", rtlefuse->eeprom_channelplan); 1917 "EEPROM ChannelPlan = 0x%4x\n", rtlefuse->eeprom_channelplan);
1920 1918
1921 /* Read Customer ID or Board Type!!! */ 1919 /* Read Customer ID or Board Type!!! */
1922 tempval = *(u8 *)&hwinfo[EEPROM_BOARDTYPE]; 1920 tempval = hwinfo[EEPROM_BOARDTYPE];
1923 /* Change RF type definition */ 1921 /* Change RF type definition */
1924 if (tempval == 0) 1922 if (tempval == 0)
1925 rtlphy->rf_type = RF_2T2R; 1923 rtlphy->rf_type = RF_2T2R;
@@ -1941,7 +1939,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1941 } 1939 }
1942 } 1940 }
1943 rtlefuse->b1ss_support = rtlefuse->b1x1_recvcombine; 1941 rtlefuse->b1ss_support = rtlefuse->b1x1_recvcombine;
1944 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMID]; 1942 rtlefuse->eeprom_oemid = *&hwinfo[EEPROM_CUSTOMID];
1945 1943
1946 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x", 1944 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x",
1947 rtlefuse->eeprom_oemid); 1945 rtlefuse->eeprom_oemid);
@@ -2251,7 +2249,7 @@ void rtl92se_update_channel_access_setting(struct ieee80211_hw *hw)
2251 u16 sifs_timer; 2249 u16 sifs_timer;
2252 2250
2253 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, 2251 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
2254 (u8 *)&mac->slot_time); 2252 &mac->slot_time);
2255 sifs_timer = 0x0e0e; 2253 sifs_timer = 0x0e0e;
2256 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer); 2254 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
2257 2255
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index 8d7099bc472..b917a2a3caf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -1247,6 +1247,9 @@ static void _rtl92s_phy_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
1247 /* Read HT 40 OFDM TX power */ 1247 /* Read HT 40 OFDM TX power */
1248 ofdmpowerLevel[0] = rtlefuse->txpwrlevel_ht40_2s[0][index]; 1248 ofdmpowerLevel[0] = rtlefuse->txpwrlevel_ht40_2s[0][index];
1249 ofdmpowerLevel[1] = rtlefuse->txpwrlevel_ht40_2s[1][index]; 1249 ofdmpowerLevel[1] = rtlefuse->txpwrlevel_ht40_2s[1][index];
1250 } else {
1251 ofdmpowerLevel[0] = 0;
1252 ofdmpowerLevel[1] = 0;
1250 } 1253 }
1251} 1254}
1252 1255
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index 730bcc91952..ad4b4803482 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -29,7 +29,6 @@
29 29
30#include "../wifi.h" 30#include "../wifi.h"
31#include "../core.h" 31#include "../core.h"
32#include "../pci.h"
33#include "../base.h" 32#include "../base.h"
34#include "../pci.h" 33#include "../pci.h"
35#include "reg.h" 34#include "reg.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 812b5858f14..36d1cb3aef8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -599,7 +599,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
599 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 599 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
600 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 600 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
601 struct ieee80211_sta *sta = info->control.sta; 601 struct ieee80211_sta *sta = info->control.sta;
602 u8 *pdesc = (u8 *) pdesc_tx; 602 u8 *pdesc = pdesc_tx;
603 u16 seq_number; 603 u16 seq_number;
604 __le16 fc = hdr->frame_control; 604 __le16 fc = hdr->frame_control;
605 u8 reserved_macid = 0; 605 u8 reserved_macid = 0;
diff --git a/drivers/net/wireless/ti/Kconfig b/drivers/net/wireless/ti/Kconfig
index 1a72932e221..be800119d0a 100644
--- a/drivers/net/wireless/ti/Kconfig
+++ b/drivers/net/wireless/ti/Kconfig
@@ -8,6 +8,7 @@ menuconfig WL_TI
8if WL_TI 8if WL_TI
9source "drivers/net/wireless/ti/wl1251/Kconfig" 9source "drivers/net/wireless/ti/wl1251/Kconfig"
10source "drivers/net/wireless/ti/wl12xx/Kconfig" 10source "drivers/net/wireless/ti/wl12xx/Kconfig"
11source "drivers/net/wireless/ti/wl18xx/Kconfig"
11 12
12# keep last for automatic dependencies 13# keep last for automatic dependencies
13source "drivers/net/wireless/ti/wlcore/Kconfig" 14source "drivers/net/wireless/ti/wlcore/Kconfig"
diff --git a/drivers/net/wireless/ti/Makefile b/drivers/net/wireless/ti/Makefile
index 0a565622d4a..4d6823983c0 100644
--- a/drivers/net/wireless/ti/Makefile
+++ b/drivers/net/wireless/ti/Makefile
@@ -2,3 +2,4 @@ obj-$(CONFIG_WLCORE) += wlcore/
2obj-$(CONFIG_WL12XX) += wl12xx/ 2obj-$(CONFIG_WL12XX) += wl12xx/
3obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wlcore/ 3obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wlcore/
4obj-$(CONFIG_WL1251) += wl1251/ 4obj-$(CONFIG_WL1251) += wl1251/
5obj-$(CONFIG_WL18XX) += wl18xx/
diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
index d14d69d733a..6822b845efc 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.c
+++ b/drivers/net/wireless/ti/wl1251/cmd.c
@@ -277,15 +277,6 @@ int wl1251_cmd_join(struct wl1251 *wl, u8 bss_type, u8 channel,
277 join->rx_config_options = wl->rx_config; 277 join->rx_config_options = wl->rx_config;
278 join->rx_filter_options = wl->rx_filter; 278 join->rx_filter_options = wl->rx_filter;
279 279
280 /*
281 * FIXME: disable temporarily all filters because after commit
282 * 9cef8737 "mac80211: fix managed mode BSSID handling" broke
283 * association. The filter logic needs to be implemented properly
284 * and once that is done, this hack can be removed.
285 */
286 join->rx_config_options = 0;
287 join->rx_filter_options = WL1251_DEFAULT_RX_FILTER;
288
289 join->basic_rate_set = RATE_MASK_1MBPS | RATE_MASK_2MBPS | 280 join->basic_rate_set = RATE_MASK_1MBPS | RATE_MASK_2MBPS |
290 RATE_MASK_5_5MBPS | RATE_MASK_11MBPS; 281 RATE_MASK_5_5MBPS | RATE_MASK_11MBPS;
291 282
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index d1afb8e3b2e..3118c425bcf 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -334,6 +334,12 @@ static int wl1251_join(struct wl1251 *wl, u8 bss_type, u8 channel,
334 if (ret < 0) 334 if (ret < 0)
335 goto out; 335 goto out;
336 336
337 /*
338 * Join command applies filters, and if we are not associated,
339 * BSSID filter must be disabled for association to work.
340 */
341 if (is_zero_ether_addr(wl->bssid))
342 wl->rx_config &= ~CFG_BSSID_FILTER_EN;
337 343
338 ret = wl1251_cmd_join(wl, bss_type, channel, beacon_interval, 344 ret = wl1251_cmd_join(wl, bss_type, channel, beacon_interval,
339 dtim_period); 345 dtim_period);
@@ -348,33 +354,6 @@ out:
348 return ret; 354 return ret;
349} 355}
350 356
351static void wl1251_filter_work(struct work_struct *work)
352{
353 struct wl1251 *wl =
354 container_of(work, struct wl1251, filter_work);
355 int ret;
356
357 mutex_lock(&wl->mutex);
358
359 if (wl->state == WL1251_STATE_OFF)
360 goto out;
361
362 ret = wl1251_ps_elp_wakeup(wl);
363 if (ret < 0)
364 goto out;
365
366 ret = wl1251_join(wl, wl->bss_type, wl->channel, wl->beacon_int,
367 wl->dtim_period);
368 if (ret < 0)
369 goto out_sleep;
370
371out_sleep:
372 wl1251_ps_elp_sleep(wl);
373
374out:
375 mutex_unlock(&wl->mutex);
376}
377
378static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 357static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
379{ 358{
380 struct wl1251 *wl = hw->priv; 359 struct wl1251 *wl = hw->priv;
@@ -478,7 +457,6 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
478 457
479 cancel_work_sync(&wl->irq_work); 458 cancel_work_sync(&wl->irq_work);
480 cancel_work_sync(&wl->tx_work); 459 cancel_work_sync(&wl->tx_work);
481 cancel_work_sync(&wl->filter_work);
482 cancel_delayed_work_sync(&wl->elp_work); 460 cancel_delayed_work_sync(&wl->elp_work);
483 461
484 mutex_lock(&wl->mutex); 462 mutex_lock(&wl->mutex);
@@ -681,13 +659,15 @@ out:
681 FIF_FCSFAIL | \ 659 FIF_FCSFAIL | \
682 FIF_BCN_PRBRESP_PROMISC | \ 660 FIF_BCN_PRBRESP_PROMISC | \
683 FIF_CONTROL | \ 661 FIF_CONTROL | \
684 FIF_OTHER_BSS) 662 FIF_OTHER_BSS | \
663 FIF_PROBE_REQ)
685 664
686static void wl1251_op_configure_filter(struct ieee80211_hw *hw, 665static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
687 unsigned int changed, 666 unsigned int changed,
688 unsigned int *total,u64 multicast) 667 unsigned int *total,u64 multicast)
689{ 668{
690 struct wl1251 *wl = hw->priv; 669 struct wl1251 *wl = hw->priv;
670 int ret;
691 671
692 wl1251_debug(DEBUG_MAC80211, "mac80211 configure filter"); 672 wl1251_debug(DEBUG_MAC80211, "mac80211 configure filter");
693 673
@@ -698,7 +678,7 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
698 /* no filters which we support changed */ 678 /* no filters which we support changed */
699 return; 679 return;
700 680
701 /* FIXME: wl->rx_config and wl->rx_filter are not protected */ 681 mutex_lock(&wl->mutex);
702 682
703 wl->rx_config = WL1251_DEFAULT_RX_CONFIG; 683 wl->rx_config = WL1251_DEFAULT_RX_CONFIG;
704 wl->rx_filter = WL1251_DEFAULT_RX_FILTER; 684 wl->rx_filter = WL1251_DEFAULT_RX_FILTER;
@@ -721,15 +701,25 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
721 } 701 }
722 if (*total & FIF_CONTROL) 702 if (*total & FIF_CONTROL)
723 wl->rx_filter |= CFG_RX_CTL_EN; 703 wl->rx_filter |= CFG_RX_CTL_EN;
724 if (*total & FIF_OTHER_BSS) 704 if (*total & FIF_OTHER_BSS || is_zero_ether_addr(wl->bssid))
725 wl->rx_filter &= ~CFG_BSSID_FILTER_EN; 705 wl->rx_config &= ~CFG_BSSID_FILTER_EN;
706 if (*total & FIF_PROBE_REQ)
707 wl->rx_filter |= CFG_RX_PREQ_EN;
726 708
727 /* 709 if (wl->state == WL1251_STATE_OFF)
728 * FIXME: workqueues need to be properly cancelled on stop(), for 710 goto out;
729 * now let's just disable changing the filter settings. They will 711
730 * be updated any on config(). 712 ret = wl1251_ps_elp_wakeup(wl);
731 */ 713 if (ret < 0)
732 /* schedule_work(&wl->filter_work); */ 714 goto out;
715
716 /* send filters to firmware */
717 wl1251_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
718
719 wl1251_ps_elp_sleep(wl);
720
721out:
722 mutex_unlock(&wl->mutex);
733} 723}
734 724
735/* HW encryption */ 725/* HW encryption */
@@ -1390,7 +1380,6 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
1390 1380
1391 skb_queue_head_init(&wl->tx_queue); 1381 skb_queue_head_init(&wl->tx_queue);
1392 1382
1393 INIT_WORK(&wl->filter_work, wl1251_filter_work);
1394 INIT_DELAYED_WORK(&wl->elp_work, wl1251_elp_work); 1383 INIT_DELAYED_WORK(&wl->elp_work, wl1251_elp_work);
1395 wl->channel = WL1251_DEFAULT_CHANNEL; 1384 wl->channel = WL1251_DEFAULT_CHANNEL;
1396 wl->scanning = false; 1385 wl->scanning = false;
diff --git a/drivers/net/wireless/ti/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
index 9d8f5816c6f..fd02060038d 100644
--- a/drivers/net/wireless/ti/wl1251/wl1251.h
+++ b/drivers/net/wireless/ti/wl1251/wl1251.h
@@ -315,7 +315,6 @@ struct wl1251 {
315 bool tx_queue_stopped; 315 bool tx_queue_stopped;
316 316
317 struct work_struct tx_work; 317 struct work_struct tx_work;
318 struct work_struct filter_work;
319 318
320 /* Pending TX frames */ 319 /* Pending TX frames */
321 struct sk_buff *tx_frames[16]; 320 struct sk_buff *tx_frames[16];
diff --git a/drivers/net/wireless/ti/wl12xx/Makefile b/drivers/net/wireless/ti/wl12xx/Makefile
index 87f64b14db3..da509aa7d00 100644
--- a/drivers/net/wireless/ti/wl12xx/Makefile
+++ b/drivers/net/wireless/ti/wl12xx/Makefile
@@ -1,3 +1,3 @@
1wl12xx-objs = main.o cmd.o acx.o 1wl12xx-objs = main.o cmd.o acx.o debugfs.o
2 2
3obj-$(CONFIG_WL12XX) += wl12xx.o 3obj-$(CONFIG_WL12XX) += wl12xx.o
diff --git a/drivers/net/wireless/ti/wl12xx/acx.h b/drivers/net/wireless/ti/wl12xx/acx.h
index d1f5aba0afc..2a26868b837 100644
--- a/drivers/net/wireless/ti/wl12xx/acx.h
+++ b/drivers/net/wireless/ti/wl12xx/acx.h
@@ -24,6 +24,21 @@
24#define __WL12XX_ACX_H__ 24#define __WL12XX_ACX_H__
25 25
26#include "../wlcore/wlcore.h" 26#include "../wlcore/wlcore.h"
27#include "../wlcore/acx.h"
28
29#define WL12XX_ACX_ALL_EVENTS_VECTOR (WL1271_ACX_INTR_WATCHDOG | \
30 WL1271_ACX_INTR_INIT_COMPLETE | \
31 WL1271_ACX_INTR_EVENT_A | \
32 WL1271_ACX_INTR_EVENT_B | \
33 WL1271_ACX_INTR_CMD_COMPLETE | \
34 WL1271_ACX_INTR_HW_AVAILABLE | \
35 WL1271_ACX_INTR_DATA)
36
37#define WL12XX_INTR_MASK (WL1271_ACX_INTR_WATCHDOG | \
38 WL1271_ACX_INTR_EVENT_A | \
39 WL1271_ACX_INTR_EVENT_B | \
40 WL1271_ACX_INTR_HW_AVAILABLE | \
41 WL1271_ACX_INTR_DATA)
27 42
28struct wl1271_acx_host_config_bitmap { 43struct wl1271_acx_host_config_bitmap {
29 struct acx_header header; 44 struct acx_header header;
@@ -31,6 +46,228 @@ struct wl1271_acx_host_config_bitmap {
31 __le32 host_cfg_bitmap; 46 __le32 host_cfg_bitmap;
32} __packed; 47} __packed;
33 48
49struct wl12xx_acx_tx_statistics {
50 __le32 internal_desc_overflow;
51} __packed;
52
53struct wl12xx_acx_rx_statistics {
54 __le32 out_of_mem;
55 __le32 hdr_overflow;
56 __le32 hw_stuck;
57 __le32 dropped;
58 __le32 fcs_err;
59 __le32 xfr_hint_trig;
60 __le32 path_reset;
61 __le32 reset_counter;
62} __packed;
63
64struct wl12xx_acx_dma_statistics {
65 __le32 rx_requested;
66 __le32 rx_errors;
67 __le32 tx_requested;
68 __le32 tx_errors;
69} __packed;
70
71struct wl12xx_acx_isr_statistics {
72 /* host command complete */
73 __le32 cmd_cmplt;
74
75 /* fiqisr() */
76 __le32 fiqs;
77
78 /* (INT_STS_ND & INT_TRIG_RX_HEADER) */
79 __le32 rx_headers;
80
81 /* (INT_STS_ND & INT_TRIG_RX_CMPLT) */
82 __le32 rx_completes;
83
84 /* (INT_STS_ND & INT_TRIG_NO_RX_BUF) */
85 __le32 rx_mem_overflow;
86
87 /* (INT_STS_ND & INT_TRIG_S_RX_RDY) */
88 __le32 rx_rdys;
89
90 /* irqisr() */
91 __le32 irqs;
92
93 /* (INT_STS_ND & INT_TRIG_TX_PROC) */
94 __le32 tx_procs;
95
96 /* (INT_STS_ND & INT_TRIG_DECRYPT_DONE) */
97 __le32 decrypt_done;
98
99 /* (INT_STS_ND & INT_TRIG_DMA0) */
100 __le32 dma0_done;
101
102 /* (INT_STS_ND & INT_TRIG_DMA1) */
103 __le32 dma1_done;
104
105 /* (INT_STS_ND & INT_TRIG_TX_EXC_CMPLT) */
106 __le32 tx_exch_complete;
107
108 /* (INT_STS_ND & INT_TRIG_COMMAND) */
109 __le32 commands;
110
111 /* (INT_STS_ND & INT_TRIG_RX_PROC) */
112 __le32 rx_procs;
113
114 /* (INT_STS_ND & INT_TRIG_PM_802) */
115 __le32 hw_pm_mode_changes;
116
117 /* (INT_STS_ND & INT_TRIG_ACKNOWLEDGE) */
118 __le32 host_acknowledges;
119
120 /* (INT_STS_ND & INT_TRIG_PM_PCI) */
121 __le32 pci_pm;
122
123 /* (INT_STS_ND & INT_TRIG_ACM_WAKEUP) */
124 __le32 wakeups;
125
126 /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
127 __le32 low_rssi;
128} __packed;
129
130struct wl12xx_acx_wep_statistics {
131 /* WEP address keys configured */
132 __le32 addr_key_count;
133
134 /* default keys configured */
135 __le32 default_key_count;
136
137 __le32 reserved;
138
139 /* number of times that WEP key not found on lookup */
140 __le32 key_not_found;
141
142 /* number of times that WEP key decryption failed */
143 __le32 decrypt_fail;
144
145 /* WEP packets decrypted */
146 __le32 packets;
147
148 /* WEP decrypt interrupts */
149 __le32 interrupt;
150} __packed;
151
152#define ACX_MISSED_BEACONS_SPREAD 10
153
154struct wl12xx_acx_pwr_statistics {
155 /* the amount of enters into power save mode (both PD & ELP) */
156 __le32 ps_enter;
157
158 /* the amount of enters into ELP mode */
159 __le32 elp_enter;
160
161 /* the amount of missing beacon interrupts to the host */
162 __le32 missing_bcns;
163
164 /* the amount of wake on host-access times */
165 __le32 wake_on_host;
166
167 /* the amount of wake on timer-expire */
168 __le32 wake_on_timer_exp;
169
170 /* the number of packets that were transmitted with PS bit set */
171 __le32 tx_with_ps;
172
173 /* the number of packets that were transmitted with PS bit clear */
174 __le32 tx_without_ps;
175
176 /* the number of received beacons */
177 __le32 rcvd_beacons;
178
179 /* the number of entering into PowerOn (power save off) */
180 __le32 power_save_off;
181
182 /* the number of entries into power save mode */
183 __le16 enable_ps;
184
185 /*
186 * the number of exits from power save, not including failed PS
187 * transitions
188 */
189 __le16 disable_ps;
190
191 /*
192 * the number of times the TSF counter was adjusted because
193 * of drift
194 */
195 __le32 fix_tsf_ps;
196
197 /* Gives statistics about the spread continuous missed beacons.
198 * The 16 LSB are dedicated for the PS mode.
199 * The 16 MSB are dedicated for the PS mode.
200 * cont_miss_bcns_spread[0] - single missed beacon.
201 * cont_miss_bcns_spread[1] - two continuous missed beacons.
202 * cont_miss_bcns_spread[2] - three continuous missed beacons.
203 * ...
204 * cont_miss_bcns_spread[9] - ten and more continuous missed beacons.
205 */
206 __le32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD];
207
208 /* the number of beacons in awake mode */
209 __le32 rcvd_awake_beacons;
210} __packed;
211
212struct wl12xx_acx_mic_statistics {
213 __le32 rx_pkts;
214 __le32 calc_failure;
215} __packed;
216
217struct wl12xx_acx_aes_statistics {
218 __le32 encrypt_fail;
219 __le32 decrypt_fail;
220 __le32 encrypt_packets;
221 __le32 decrypt_packets;
222 __le32 encrypt_interrupt;
223 __le32 decrypt_interrupt;
224} __packed;
225
226struct wl12xx_acx_event_statistics {
227 __le32 heart_beat;
228 __le32 calibration;
229 __le32 rx_mismatch;
230 __le32 rx_mem_empty;
231 __le32 rx_pool;
232 __le32 oom_late;
233 __le32 phy_transmit_error;
234 __le32 tx_stuck;
235} __packed;
236
237struct wl12xx_acx_ps_statistics {
238 __le32 pspoll_timeouts;
239 __le32 upsd_timeouts;
240 __le32 upsd_max_sptime;
241 __le32 upsd_max_apturn;
242 __le32 pspoll_max_apturn;
243 __le32 pspoll_utilization;
244 __le32 upsd_utilization;
245} __packed;
246
247struct wl12xx_acx_rxpipe_statistics {
248 __le32 rx_prep_beacon_drop;
249 __le32 descr_host_int_trig_rx_data;
250 __le32 beacon_buffer_thres_host_int_trig_rx_data;
251 __le32 missed_beacon_host_int_trig_rx_data;
252 __le32 tx_xfr_host_int_trig_rx_data;
253} __packed;
254
255struct wl12xx_acx_statistics {
256 struct acx_header header;
257
258 struct wl12xx_acx_tx_statistics tx;
259 struct wl12xx_acx_rx_statistics rx;
260 struct wl12xx_acx_dma_statistics dma;
261 struct wl12xx_acx_isr_statistics isr;
262 struct wl12xx_acx_wep_statistics wep;
263 struct wl12xx_acx_pwr_statistics pwr;
264 struct wl12xx_acx_aes_statistics aes;
265 struct wl12xx_acx_mic_statistics mic;
266 struct wl12xx_acx_event_statistics event;
267 struct wl12xx_acx_ps_statistics ps;
268 struct wl12xx_acx_rxpipe_statistics rxpipe;
269} __packed;
270
34int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap); 271int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
35 272
36#endif /* __WL12XX_ACX_H__ */ 273#endif /* __WL12XX_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wl12xx/cmd.c b/drivers/net/wireless/ti/wl12xx/cmd.c
index 8ffaeb5f214..30be784a40d 100644
--- a/drivers/net/wireless/ti/wl12xx/cmd.c
+++ b/drivers/net/wireless/ti/wl12xx/cmd.c
@@ -65,6 +65,7 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
65 struct wl1271_general_parms_cmd *gen_parms; 65 struct wl1271_general_parms_cmd *gen_parms;
66 struct wl1271_ini_general_params *gp = 66 struct wl1271_ini_general_params *gp =
67 &((struct wl1271_nvs_file *)wl->nvs)->general_params; 67 &((struct wl1271_nvs_file *)wl->nvs)->general_params;
68 struct wl12xx_priv *priv = wl->priv;
68 bool answer = false; 69 bool answer = false;
69 int ret; 70 int ret;
70 71
@@ -88,7 +89,7 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
88 answer = true; 89 answer = true;
89 90
90 /* Override the REF CLK from the NVS with the one from platform data */ 91 /* Override the REF CLK from the NVS with the one from platform data */
91 gen_parms->general_params.ref_clock = wl->ref_clock; 92 gen_parms->general_params.ref_clock = priv->ref_clock;
92 93
93 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer); 94 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
94 if (ret < 0) { 95 if (ret < 0) {
@@ -118,6 +119,7 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
118 struct wl128x_general_parms_cmd *gen_parms; 119 struct wl128x_general_parms_cmd *gen_parms;
119 struct wl128x_ini_general_params *gp = 120 struct wl128x_ini_general_params *gp =
120 &((struct wl128x_nvs_file *)wl->nvs)->general_params; 121 &((struct wl128x_nvs_file *)wl->nvs)->general_params;
122 struct wl12xx_priv *priv = wl->priv;
121 bool answer = false; 123 bool answer = false;
122 int ret; 124 int ret;
123 125
@@ -141,8 +143,8 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
141 answer = true; 143 answer = true;
142 144
143 /* Replace REF and TCXO CLKs with the ones from platform data */ 145 /* Replace REF and TCXO CLKs with the ones from platform data */
144 gen_parms->general_params.ref_clock = wl->ref_clock; 146 gen_parms->general_params.ref_clock = priv->ref_clock;
145 gen_parms->general_params.tcxo_ref_clock = wl->tcxo_clock; 147 gen_parms->general_params.tcxo_ref_clock = priv->tcxo_clock;
146 148
147 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer); 149 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
148 if (ret < 0) { 150 if (ret < 0) {
@@ -172,7 +174,7 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
172 struct wl1271_nvs_file *nvs = (struct wl1271_nvs_file *)wl->nvs; 174 struct wl1271_nvs_file *nvs = (struct wl1271_nvs_file *)wl->nvs;
173 struct wl1271_radio_parms_cmd *radio_parms; 175 struct wl1271_radio_parms_cmd *radio_parms;
174 struct wl1271_ini_general_params *gp = &nvs->general_params; 176 struct wl1271_ini_general_params *gp = &nvs->general_params;
175 int ret; 177 int ret, fem_idx;
176 178
177 if (!wl->nvs) 179 if (!wl->nvs)
178 return -ENODEV; 180 return -ENODEV;
@@ -183,11 +185,13 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
183 185
184 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM; 186 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
185 187
188 fem_idx = WL12XX_FEM_TO_NVS_ENTRY(gp->tx_bip_fem_manufacturer);
189
186 /* 2.4GHz parameters */ 190 /* 2.4GHz parameters */
187 memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2, 191 memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
188 sizeof(struct wl1271_ini_band_params_2)); 192 sizeof(struct wl1271_ini_band_params_2));
189 memcpy(&radio_parms->dyn_params_2, 193 memcpy(&radio_parms->dyn_params_2,
190 &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params, 194 &nvs->dyn_radio_params_2[fem_idx].params,
191 sizeof(struct wl1271_ini_fem_params_2)); 195 sizeof(struct wl1271_ini_fem_params_2));
192 196
193 /* 5GHz parameters */ 197 /* 5GHz parameters */
@@ -195,7 +199,7 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
195 &nvs->stat_radio_params_5, 199 &nvs->stat_radio_params_5,
196 sizeof(struct wl1271_ini_band_params_5)); 200 sizeof(struct wl1271_ini_band_params_5));
197 memcpy(&radio_parms->dyn_params_5, 201 memcpy(&radio_parms->dyn_params_5,
198 &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params, 202 &nvs->dyn_radio_params_5[fem_idx].params,
199 sizeof(struct wl1271_ini_fem_params_5)); 203 sizeof(struct wl1271_ini_fem_params_5));
200 204
201 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ", 205 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
@@ -214,7 +218,7 @@ int wl128x_cmd_radio_parms(struct wl1271 *wl)
214 struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs; 218 struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs;
215 struct wl128x_radio_parms_cmd *radio_parms; 219 struct wl128x_radio_parms_cmd *radio_parms;
216 struct wl128x_ini_general_params *gp = &nvs->general_params; 220 struct wl128x_ini_general_params *gp = &nvs->general_params;
217 int ret; 221 int ret, fem_idx;
218 222
219 if (!wl->nvs) 223 if (!wl->nvs)
220 return -ENODEV; 224 return -ENODEV;
@@ -225,11 +229,13 @@ int wl128x_cmd_radio_parms(struct wl1271 *wl)
225 229
226 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM; 230 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
227 231
232 fem_idx = WL12XX_FEM_TO_NVS_ENTRY(gp->tx_bip_fem_manufacturer);
233
228 /* 2.4GHz parameters */ 234 /* 2.4GHz parameters */
229 memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2, 235 memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
230 sizeof(struct wl128x_ini_band_params_2)); 236 sizeof(struct wl128x_ini_band_params_2));
231 memcpy(&radio_parms->dyn_params_2, 237 memcpy(&radio_parms->dyn_params_2,
232 &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params, 238 &nvs->dyn_radio_params_2[fem_idx].params,
233 sizeof(struct wl128x_ini_fem_params_2)); 239 sizeof(struct wl128x_ini_fem_params_2));
234 240
235 /* 5GHz parameters */ 241 /* 5GHz parameters */
@@ -237,7 +243,7 @@ int wl128x_cmd_radio_parms(struct wl1271 *wl)
237 &nvs->stat_radio_params_5, 243 &nvs->stat_radio_params_5,
238 sizeof(struct wl128x_ini_band_params_5)); 244 sizeof(struct wl128x_ini_band_params_5));
239 memcpy(&radio_parms->dyn_params_5, 245 memcpy(&radio_parms->dyn_params_5,
240 &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params, 246 &nvs->dyn_radio_params_5[fem_idx].params,
241 sizeof(struct wl128x_ini_fem_params_5)); 247 sizeof(struct wl128x_ini_fem_params_5));
242 248
243 radio_parms->fem_vendor_and_options = nvs->fem_vendor_and_options; 249 radio_parms->fem_vendor_and_options = nvs->fem_vendor_and_options;
diff --git a/drivers/net/wireless/ti/wl12xx/debugfs.c b/drivers/net/wireless/ti/wl12xx/debugfs.c
new file mode 100644
index 00000000000..0521cbf858c
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/debugfs.c
@@ -0,0 +1,243 @@
1/*
2 * This file is part of wl12xx
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Copyright (C) 2011-2012 Texas Instruments
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 *
21 */
22
23#include "../wlcore/debugfs.h"
24#include "../wlcore/wlcore.h"
25
26#include "wl12xx.h"
27#include "acx.h"
28#include "debugfs.h"
29
30#define WL12XX_DEBUGFS_FWSTATS_FILE(a, b, c) \
31 DEBUGFS_FWSTATS_FILE(a, b, c, wl12xx_acx_statistics)
32
33WL12XX_DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, "%u");
34
35WL12XX_DEBUGFS_FWSTATS_FILE(rx, out_of_mem, "%u");
36WL12XX_DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, "%u");
37WL12XX_DEBUGFS_FWSTATS_FILE(rx, hw_stuck, "%u");
38WL12XX_DEBUGFS_FWSTATS_FILE(rx, dropped, "%u");
39WL12XX_DEBUGFS_FWSTATS_FILE(rx, fcs_err, "%u");
40WL12XX_DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, "%u");
41WL12XX_DEBUGFS_FWSTATS_FILE(rx, path_reset, "%u");
42WL12XX_DEBUGFS_FWSTATS_FILE(rx, reset_counter, "%u");
43
44WL12XX_DEBUGFS_FWSTATS_FILE(dma, rx_requested, "%u");
45WL12XX_DEBUGFS_FWSTATS_FILE(dma, rx_errors, "%u");
46WL12XX_DEBUGFS_FWSTATS_FILE(dma, tx_requested, "%u");
47WL12XX_DEBUGFS_FWSTATS_FILE(dma, tx_errors, "%u");
48
49WL12XX_DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, "%u");
50WL12XX_DEBUGFS_FWSTATS_FILE(isr, fiqs, "%u");
51WL12XX_DEBUGFS_FWSTATS_FILE(isr, rx_headers, "%u");
52WL12XX_DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, "%u");
53WL12XX_DEBUGFS_FWSTATS_FILE(isr, rx_rdys, "%u");
54WL12XX_DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
55WL12XX_DEBUGFS_FWSTATS_FILE(isr, tx_procs, "%u");
56WL12XX_DEBUGFS_FWSTATS_FILE(isr, decrypt_done, "%u");
57WL12XX_DEBUGFS_FWSTATS_FILE(isr, dma0_done, "%u");
58WL12XX_DEBUGFS_FWSTATS_FILE(isr, dma1_done, "%u");
59WL12XX_DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, "%u");
60WL12XX_DEBUGFS_FWSTATS_FILE(isr, commands, "%u");
61WL12XX_DEBUGFS_FWSTATS_FILE(isr, rx_procs, "%u");
62WL12XX_DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, "%u");
63WL12XX_DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, "%u");
64WL12XX_DEBUGFS_FWSTATS_FILE(isr, pci_pm, "%u");
65WL12XX_DEBUGFS_FWSTATS_FILE(isr, wakeups, "%u");
66WL12XX_DEBUGFS_FWSTATS_FILE(isr, low_rssi, "%u");
67
68WL12XX_DEBUGFS_FWSTATS_FILE(wep, addr_key_count, "%u");
69WL12XX_DEBUGFS_FWSTATS_FILE(wep, default_key_count, "%u");
70/* skipping wep.reserved */
71WL12XX_DEBUGFS_FWSTATS_FILE(wep, key_not_found, "%u");
72WL12XX_DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, "%u");
73WL12XX_DEBUGFS_FWSTATS_FILE(wep, packets, "%u");
74WL12XX_DEBUGFS_FWSTATS_FILE(wep, interrupt, "%u");
75
76WL12XX_DEBUGFS_FWSTATS_FILE(pwr, ps_enter, "%u");
77WL12XX_DEBUGFS_FWSTATS_FILE(pwr, elp_enter, "%u");
78WL12XX_DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, "%u");
79WL12XX_DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, "%u");
80WL12XX_DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, "%u");
81WL12XX_DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, "%u");
82WL12XX_DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, "%u");
83WL12XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, "%u");
84WL12XX_DEBUGFS_FWSTATS_FILE(pwr, power_save_off, "%u");
85WL12XX_DEBUGFS_FWSTATS_FILE(pwr, enable_ps, "%u");
86WL12XX_DEBUGFS_FWSTATS_FILE(pwr, disable_ps, "%u");
87WL12XX_DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, "%u");
88/* skipping cont_miss_bcns_spread for now */
89WL12XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, "%u");
90
91WL12XX_DEBUGFS_FWSTATS_FILE(mic, rx_pkts, "%u");
92WL12XX_DEBUGFS_FWSTATS_FILE(mic, calc_failure, "%u");
93
94WL12XX_DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, "%u");
95WL12XX_DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, "%u");
96WL12XX_DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, "%u");
97WL12XX_DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, "%u");
98WL12XX_DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, "%u");
99WL12XX_DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, "%u");
100
101WL12XX_DEBUGFS_FWSTATS_FILE(event, heart_beat, "%u");
102WL12XX_DEBUGFS_FWSTATS_FILE(event, calibration, "%u");
103WL12XX_DEBUGFS_FWSTATS_FILE(event, rx_mismatch, "%u");
104WL12XX_DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, "%u");
105WL12XX_DEBUGFS_FWSTATS_FILE(event, rx_pool, "%u");
106WL12XX_DEBUGFS_FWSTATS_FILE(event, oom_late, "%u");
107WL12XX_DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, "%u");
108WL12XX_DEBUGFS_FWSTATS_FILE(event, tx_stuck, "%u");
109
110WL12XX_DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, "%u");
111WL12XX_DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, "%u");
112WL12XX_DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, "%u");
113WL12XX_DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, "%u");
114WL12XX_DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, "%u");
115WL12XX_DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, "%u");
116WL12XX_DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, "%u");
117
118WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, "%u");
119WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, "%u");
120WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data,
121 "%u");
122WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, "%u");
123WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, "%u");
124
125int wl12xx_debugfs_add_files(struct wl1271 *wl,
126 struct dentry *rootdir)
127{
128 int ret = 0;
129 struct dentry *entry, *stats, *moddir;
130
131 moddir = debugfs_create_dir(KBUILD_MODNAME, rootdir);
132 if (!moddir || IS_ERR(moddir)) {
133 entry = moddir;
134 goto err;
135 }
136
137 stats = debugfs_create_dir("fw_stats", moddir);
138 if (!stats || IS_ERR(stats)) {
139 entry = stats;
140 goto err;
141 }
142
143 DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow);
144
145 DEBUGFS_FWSTATS_ADD(rx, out_of_mem);
146 DEBUGFS_FWSTATS_ADD(rx, hdr_overflow);
147 DEBUGFS_FWSTATS_ADD(rx, hw_stuck);
148 DEBUGFS_FWSTATS_ADD(rx, dropped);
149 DEBUGFS_FWSTATS_ADD(rx, fcs_err);
150 DEBUGFS_FWSTATS_ADD(rx, xfr_hint_trig);
151 DEBUGFS_FWSTATS_ADD(rx, path_reset);
152 DEBUGFS_FWSTATS_ADD(rx, reset_counter);
153
154 DEBUGFS_FWSTATS_ADD(dma, rx_requested);
155 DEBUGFS_FWSTATS_ADD(dma, rx_errors);
156 DEBUGFS_FWSTATS_ADD(dma, tx_requested);
157 DEBUGFS_FWSTATS_ADD(dma, tx_errors);
158
159 DEBUGFS_FWSTATS_ADD(isr, cmd_cmplt);
160 DEBUGFS_FWSTATS_ADD(isr, fiqs);
161 DEBUGFS_FWSTATS_ADD(isr, rx_headers);
162 DEBUGFS_FWSTATS_ADD(isr, rx_mem_overflow);
163 DEBUGFS_FWSTATS_ADD(isr, rx_rdys);
164 DEBUGFS_FWSTATS_ADD(isr, irqs);
165 DEBUGFS_FWSTATS_ADD(isr, tx_procs);
166 DEBUGFS_FWSTATS_ADD(isr, decrypt_done);
167 DEBUGFS_FWSTATS_ADD(isr, dma0_done);
168 DEBUGFS_FWSTATS_ADD(isr, dma1_done);
169 DEBUGFS_FWSTATS_ADD(isr, tx_exch_complete);
170 DEBUGFS_FWSTATS_ADD(isr, commands);
171 DEBUGFS_FWSTATS_ADD(isr, rx_procs);
172 DEBUGFS_FWSTATS_ADD(isr, hw_pm_mode_changes);
173 DEBUGFS_FWSTATS_ADD(isr, host_acknowledges);
174 DEBUGFS_FWSTATS_ADD(isr, pci_pm);
175 DEBUGFS_FWSTATS_ADD(isr, wakeups);
176 DEBUGFS_FWSTATS_ADD(isr, low_rssi);
177
178 DEBUGFS_FWSTATS_ADD(wep, addr_key_count);
179 DEBUGFS_FWSTATS_ADD(wep, default_key_count);
180 /* skipping wep.reserved */
181 DEBUGFS_FWSTATS_ADD(wep, key_not_found);
182 DEBUGFS_FWSTATS_ADD(wep, decrypt_fail);
183 DEBUGFS_FWSTATS_ADD(wep, packets);
184 DEBUGFS_FWSTATS_ADD(wep, interrupt);
185
186 DEBUGFS_FWSTATS_ADD(pwr, ps_enter);
187 DEBUGFS_FWSTATS_ADD(pwr, elp_enter);
188 DEBUGFS_FWSTATS_ADD(pwr, missing_bcns);
189 DEBUGFS_FWSTATS_ADD(pwr, wake_on_host);
190 DEBUGFS_FWSTATS_ADD(pwr, wake_on_timer_exp);
191 DEBUGFS_FWSTATS_ADD(pwr, tx_with_ps);
192 DEBUGFS_FWSTATS_ADD(pwr, tx_without_ps);
193 DEBUGFS_FWSTATS_ADD(pwr, rcvd_beacons);
194 DEBUGFS_FWSTATS_ADD(pwr, power_save_off);
195 DEBUGFS_FWSTATS_ADD(pwr, enable_ps);
196 DEBUGFS_FWSTATS_ADD(pwr, disable_ps);
197 DEBUGFS_FWSTATS_ADD(pwr, fix_tsf_ps);
198 /* skipping cont_miss_bcns_spread for now */
199 DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_beacons);
200
201 DEBUGFS_FWSTATS_ADD(mic, rx_pkts);
202 DEBUGFS_FWSTATS_ADD(mic, calc_failure);
203
204 DEBUGFS_FWSTATS_ADD(aes, encrypt_fail);
205 DEBUGFS_FWSTATS_ADD(aes, decrypt_fail);
206 DEBUGFS_FWSTATS_ADD(aes, encrypt_packets);
207 DEBUGFS_FWSTATS_ADD(aes, decrypt_packets);
208 DEBUGFS_FWSTATS_ADD(aes, encrypt_interrupt);
209 DEBUGFS_FWSTATS_ADD(aes, decrypt_interrupt);
210
211 DEBUGFS_FWSTATS_ADD(event, heart_beat);
212 DEBUGFS_FWSTATS_ADD(event, calibration);
213 DEBUGFS_FWSTATS_ADD(event, rx_mismatch);
214 DEBUGFS_FWSTATS_ADD(event, rx_mem_empty);
215 DEBUGFS_FWSTATS_ADD(event, rx_pool);
216 DEBUGFS_FWSTATS_ADD(event, oom_late);
217 DEBUGFS_FWSTATS_ADD(event, phy_transmit_error);
218 DEBUGFS_FWSTATS_ADD(event, tx_stuck);
219
220 DEBUGFS_FWSTATS_ADD(ps, pspoll_timeouts);
221 DEBUGFS_FWSTATS_ADD(ps, upsd_timeouts);
222 DEBUGFS_FWSTATS_ADD(ps, upsd_max_sptime);
223 DEBUGFS_FWSTATS_ADD(ps, upsd_max_apturn);
224 DEBUGFS_FWSTATS_ADD(ps, pspoll_max_apturn);
225 DEBUGFS_FWSTATS_ADD(ps, pspoll_utilization);
226 DEBUGFS_FWSTATS_ADD(ps, upsd_utilization);
227
228 DEBUGFS_FWSTATS_ADD(rxpipe, rx_prep_beacon_drop);
229 DEBUGFS_FWSTATS_ADD(rxpipe, descr_host_int_trig_rx_data);
230 DEBUGFS_FWSTATS_ADD(rxpipe, beacon_buffer_thres_host_int_trig_rx_data);
231 DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data);
232 DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
233
234 return 0;
235
236err:
237 if (IS_ERR(entry))
238 ret = PTR_ERR(entry);
239 else
240 ret = -ENOMEM;
241
242 return ret;
243}
diff --git a/drivers/net/wireless/ti/wl12xx/debugfs.h b/drivers/net/wireless/ti/wl12xx/debugfs.h
new file mode 100644
index 00000000000..96898e291b7
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/debugfs.h
@@ -0,0 +1,28 @@
1/*
2 * This file is part of wl12xx
3 *
4 * Copyright (C) 2012 Texas Instruments. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __WL12XX_DEBUGFS_H__
23#define __WL12XX_DEBUGFS_H__
24
25int wl12xx_debugfs_add_files(struct wl1271 *wl,
26 struct dentry *rootdir);
27
28#endif /* __WL12XX_DEBUGFS_H__ */
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index d7dd3def07b..47ba2e0017f 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -39,6 +39,10 @@
39#include "reg.h" 39#include "reg.h"
40#include "cmd.h" 40#include "cmd.h"
41#include "acx.h" 41#include "acx.h"
42#include "debugfs.h"
43
44static char *fref_param;
45static char *tcxo_param;
42 46
43static struct wlcore_conf wl12xx_conf = { 47static struct wlcore_conf wl12xx_conf = {
44 .sg = { 48 .sg = {
@@ -212,7 +216,7 @@ static struct wlcore_conf wl12xx_conf = {
212 .suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM, 216 .suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM,
213 .suspend_listen_interval = 3, 217 .suspend_listen_interval = 3,
214 .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED, 218 .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
215 .bcn_filt_ie_count = 2, 219 .bcn_filt_ie_count = 3,
216 .bcn_filt_ie = { 220 .bcn_filt_ie = {
217 [0] = { 221 [0] = {
218 .ie = WLAN_EID_CHANNEL_SWITCH, 222 .ie = WLAN_EID_CHANNEL_SWITCH,
@@ -222,9 +226,13 @@ static struct wlcore_conf wl12xx_conf = {
222 .ie = WLAN_EID_HT_OPERATION, 226 .ie = WLAN_EID_HT_OPERATION,
223 .rule = CONF_BCN_RULE_PASS_ON_CHANGE, 227 .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
224 }, 228 },
229 [2] = {
230 .ie = WLAN_EID_ERP_INFO,
231 .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
232 },
225 }, 233 },
226 .synch_fail_thold = 10, 234 .synch_fail_thold = 12,
227 .bss_lose_timeout = 100, 235 .bss_lose_timeout = 400,
228 .beacon_rx_timeout = 10000, 236 .beacon_rx_timeout = 10000,
229 .broadcast_timeout = 20000, 237 .broadcast_timeout = 20000,
230 .rx_broadcast_in_ps = 1, 238 .rx_broadcast_in_ps = 1,
@@ -234,10 +242,11 @@ static struct wlcore_conf wl12xx_conf = {
234 .psm_entry_retries = 8, 242 .psm_entry_retries = 8,
235 .psm_exit_retries = 16, 243 .psm_exit_retries = 16,
236 .psm_entry_nullfunc_retries = 3, 244 .psm_entry_nullfunc_retries = 3,
237 .dynamic_ps_timeout = 40, 245 .dynamic_ps_timeout = 200,
238 .forced_ps = false, 246 .forced_ps = false,
239 .keep_alive_interval = 55000, 247 .keep_alive_interval = 55000,
240 .max_listen_interval = 20, 248 .max_listen_interval = 20,
249 .sta_sleep_auth = WL1271_PSM_ILLEGAL,
241 }, 250 },
242 .itrim = { 251 .itrim = {
243 .enable = false, 252 .enable = false,
@@ -245,7 +254,7 @@ static struct wlcore_conf wl12xx_conf = {
245 }, 254 },
246 .pm_config = { 255 .pm_config = {
247 .host_clk_settling_time = 5000, 256 .host_clk_settling_time = 5000,
248 .host_fast_wakeup_support = false 257 .host_fast_wakeup_support = CONF_FAST_WAKEUP_DISABLE,
249 }, 258 },
250 .roam_trigger = { 259 .roam_trigger = {
251 .trigger_pacing = 1, 260 .trigger_pacing = 1,
@@ -305,8 +314,8 @@ static struct wlcore_conf wl12xx_conf = {
305 .swallow_period = 5, 314 .swallow_period = 5,
306 .n_divider_fref_set_1 = 0xff, /* default */ 315 .n_divider_fref_set_1 = 0xff, /* default */
307 .n_divider_fref_set_2 = 12, 316 .n_divider_fref_set_2 = 12,
308 .m_divider_fref_set_1 = 148, 317 .m_divider_fref_set_1 = 0xffff,
309 .m_divider_fref_set_2 = 0xffff, /* default */ 318 .m_divider_fref_set_2 = 148, /* default */
310 .coex_pll_stabilization_time = 0xffffffff, /* default */ 319 .coex_pll_stabilization_time = 0xffffffff, /* default */
311 .ldo_stabilization_time = 0xffff, /* default */ 320 .ldo_stabilization_time = 0xffff, /* default */
312 .fm_disturbed_band_margin = 0xff, /* default */ 321 .fm_disturbed_band_margin = 0xff, /* default */
@@ -589,11 +598,13 @@ static const int wl12xx_rtable[REG_TABLE_LEN] = {
589#define WL128X_FW_NAME_SINGLE "ti-connectivity/wl128x-fw-4-sr.bin" 598#define WL128X_FW_NAME_SINGLE "ti-connectivity/wl128x-fw-4-sr.bin"
590#define WL128X_PLT_FW_NAME "ti-connectivity/wl128x-fw-4-plt.bin" 599#define WL128X_PLT_FW_NAME "ti-connectivity/wl128x-fw-4-plt.bin"
591 600
592static void wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len) 601static int wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
593{ 602{
603 int ret;
604
594 if (wl->chip.id != CHIP_ID_1283_PG20) { 605 if (wl->chip.id != CHIP_ID_1283_PG20) {
595 struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map; 606 struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
596 struct wl1271_rx_mem_pool_addr rx_mem_addr; 607 struct wl127x_rx_mem_pool_addr rx_mem_addr;
597 608
598 /* 609 /*
599 * Choose the block we want to read 610 * Choose the block we want to read
@@ -607,9 +618,13 @@ static void wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
607 618
608 rx_mem_addr.addr_extra = rx_mem_addr.addr + 4; 619 rx_mem_addr.addr_extra = rx_mem_addr.addr + 4;
609 620
610 wl1271_write(wl, WL1271_SLV_REG_DATA, 621 ret = wlcore_write(wl, WL1271_SLV_REG_DATA, &rx_mem_addr,
611 &rx_mem_addr, sizeof(rx_mem_addr), false); 622 sizeof(rx_mem_addr), false);
623 if (ret < 0)
624 return ret;
612 } 625 }
626
627 return 0;
613} 628}
614 629
615static int wl12xx_identify_chip(struct wl1271 *wl) 630static int wl12xx_identify_chip(struct wl1271 *wl)
@@ -621,10 +636,8 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
621 wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete", 636 wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
622 wl->chip.id); 637 wl->chip.id);
623 638
624 /* clear the alignment quirk, since we don't support it */ 639 wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
625 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN; 640 WLCORE_QUIRK_TKIP_HEADER_SPACE;
626
627 wl->quirks |= WLCORE_QUIRK_LEGACY_NVS;
628 wl->sr_fw_name = WL127X_FW_NAME_SINGLE; 641 wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
629 wl->mr_fw_name = WL127X_FW_NAME_MULTI; 642 wl->mr_fw_name = WL127X_FW_NAME_MULTI;
630 memcpy(&wl->conf.mem, &wl12xx_default_priv_conf.mem_wl127x, 643 memcpy(&wl->conf.mem, &wl12xx_default_priv_conf.mem_wl127x,
@@ -639,10 +652,8 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
639 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)", 652 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
640 wl->chip.id); 653 wl->chip.id);
641 654
642 /* clear the alignment quirk, since we don't support it */ 655 wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
643 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN; 656 WLCORE_QUIRK_TKIP_HEADER_SPACE;
644
645 wl->quirks |= WLCORE_QUIRK_LEGACY_NVS;
646 wl->plt_fw_name = WL127X_PLT_FW_NAME; 657 wl->plt_fw_name = WL127X_PLT_FW_NAME;
647 wl->sr_fw_name = WL127X_FW_NAME_SINGLE; 658 wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
648 wl->mr_fw_name = WL127X_FW_NAME_MULTI; 659 wl->mr_fw_name = WL127X_FW_NAME_MULTI;
@@ -660,6 +671,11 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
660 wl->plt_fw_name = WL128X_PLT_FW_NAME; 671 wl->plt_fw_name = WL128X_PLT_FW_NAME;
661 wl->sr_fw_name = WL128X_FW_NAME_SINGLE; 672 wl->sr_fw_name = WL128X_FW_NAME_SINGLE;
662 wl->mr_fw_name = WL128X_FW_NAME_MULTI; 673 wl->mr_fw_name = WL128X_FW_NAME_MULTI;
674
675 /* wl128x requires TX blocksize alignment */
676 wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN |
677 WLCORE_QUIRK_TKIP_HEADER_SPACE;
678
663 break; 679 break;
664 case CHIP_ID_1283_PG10: 680 case CHIP_ID_1283_PG10:
665 default: 681 default:
@@ -672,64 +688,95 @@ out:
672 return ret; 688 return ret;
673} 689}
674 690
675static void wl12xx_top_reg_write(struct wl1271 *wl, int addr, u16 val) 691static int __must_check wl12xx_top_reg_write(struct wl1271 *wl, int addr,
692 u16 val)
676{ 693{
694 int ret;
695
677 /* write address >> 1 + 0x30000 to OCP_POR_CTR */ 696 /* write address >> 1 + 0x30000 to OCP_POR_CTR */
678 addr = (addr >> 1) + 0x30000; 697 addr = (addr >> 1) + 0x30000;
679 wl1271_write32(wl, WL12XX_OCP_POR_CTR, addr); 698 ret = wlcore_write32(wl, WL12XX_OCP_POR_CTR, addr);
699 if (ret < 0)
700 goto out;
680 701
681 /* write value to OCP_POR_WDATA */ 702 /* write value to OCP_POR_WDATA */
682 wl1271_write32(wl, WL12XX_OCP_DATA_WRITE, val); 703 ret = wlcore_write32(wl, WL12XX_OCP_DATA_WRITE, val);
704 if (ret < 0)
705 goto out;
683 706
684 /* write 1 to OCP_CMD */ 707 /* write 1 to OCP_CMD */
685 wl1271_write32(wl, WL12XX_OCP_CMD, OCP_CMD_WRITE); 708 ret = wlcore_write32(wl, WL12XX_OCP_CMD, OCP_CMD_WRITE);
709 if (ret < 0)
710 goto out;
711
712out:
713 return ret;
686} 714}
687 715
688static u16 wl12xx_top_reg_read(struct wl1271 *wl, int addr) 716static int __must_check wl12xx_top_reg_read(struct wl1271 *wl, int addr,
717 u16 *out)
689{ 718{
690 u32 val; 719 u32 val;
691 int timeout = OCP_CMD_LOOP; 720 int timeout = OCP_CMD_LOOP;
721 int ret;
692 722
693 /* write address >> 1 + 0x30000 to OCP_POR_CTR */ 723 /* write address >> 1 + 0x30000 to OCP_POR_CTR */
694 addr = (addr >> 1) + 0x30000; 724 addr = (addr >> 1) + 0x30000;
695 wl1271_write32(wl, WL12XX_OCP_POR_CTR, addr); 725 ret = wlcore_write32(wl, WL12XX_OCP_POR_CTR, addr);
726 if (ret < 0)
727 return ret;
696 728
697 /* write 2 to OCP_CMD */ 729 /* write 2 to OCP_CMD */
698 wl1271_write32(wl, WL12XX_OCP_CMD, OCP_CMD_READ); 730 ret = wlcore_write32(wl, WL12XX_OCP_CMD, OCP_CMD_READ);
731 if (ret < 0)
732 return ret;
699 733
700 /* poll for data ready */ 734 /* poll for data ready */
701 do { 735 do {
702 val = wl1271_read32(wl, WL12XX_OCP_DATA_READ); 736 ret = wlcore_read32(wl, WL12XX_OCP_DATA_READ, &val);
737 if (ret < 0)
738 return ret;
703 } while (!(val & OCP_READY_MASK) && --timeout); 739 } while (!(val & OCP_READY_MASK) && --timeout);
704 740
705 if (!timeout) { 741 if (!timeout) {
706 wl1271_warning("Top register access timed out."); 742 wl1271_warning("Top register access timed out.");
707 return 0xffff; 743 return -ETIMEDOUT;
708 } 744 }
709 745
710 /* check data status and return if OK */ 746 /* check data status and return if OK */
711 if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK) 747 if ((val & OCP_STATUS_MASK) != OCP_STATUS_OK) {
712 return val & 0xffff;
713 else {
714 wl1271_warning("Top register access returned error."); 748 wl1271_warning("Top register access returned error.");
715 return 0xffff; 749 return -EIO;
716 } 750 }
751
752 if (out)
753 *out = val & 0xffff;
754
755 return 0;
717} 756}
718 757
719static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl) 758static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl)
720{ 759{
721 u16 spare_reg; 760 u16 spare_reg;
761 int ret;
722 762
723 /* Mask bits [2] & [8:4] in the sys_clk_cfg register */ 763 /* Mask bits [2] & [8:4] in the sys_clk_cfg register */
724 spare_reg = wl12xx_top_reg_read(wl, WL_SPARE_REG); 764 ret = wl12xx_top_reg_read(wl, WL_SPARE_REG, &spare_reg);
765 if (ret < 0)
766 return ret;
767
725 if (spare_reg == 0xFFFF) 768 if (spare_reg == 0xFFFF)
726 return -EFAULT; 769 return -EFAULT;
727 spare_reg |= (BIT(3) | BIT(5) | BIT(6)); 770 spare_reg |= (BIT(3) | BIT(5) | BIT(6));
728 wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg); 771 ret = wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg);
772 if (ret < 0)
773 return ret;
729 774
730 /* Enable FREF_CLK_REQ & mux MCS and coex PLLs to FREF */ 775 /* Enable FREF_CLK_REQ & mux MCS and coex PLLs to FREF */
731 wl12xx_top_reg_write(wl, SYS_CLK_CFG_REG, 776 ret = wl12xx_top_reg_write(wl, SYS_CLK_CFG_REG,
732 WL_CLK_REQ_TYPE_PG2 | MCS_PLL_CLK_SEL_FREF); 777 WL_CLK_REQ_TYPE_PG2 | MCS_PLL_CLK_SEL_FREF);
778 if (ret < 0)
779 return ret;
733 780
734 /* Delay execution for 15msec, to let the HW settle */ 781 /* Delay execution for 15msec, to let the HW settle */
735 mdelay(15); 782 mdelay(15);
@@ -740,8 +787,12 @@ static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl)
740static bool wl128x_is_tcxo_valid(struct wl1271 *wl) 787static bool wl128x_is_tcxo_valid(struct wl1271 *wl)
741{ 788{
742 u16 tcxo_detection; 789 u16 tcxo_detection;
790 int ret;
791
792 ret = wl12xx_top_reg_read(wl, TCXO_CLK_DETECT_REG, &tcxo_detection);
793 if (ret < 0)
794 return false;
743 795
744 tcxo_detection = wl12xx_top_reg_read(wl, TCXO_CLK_DETECT_REG);
745 if (tcxo_detection & TCXO_DET_FAILED) 796 if (tcxo_detection & TCXO_DET_FAILED)
746 return false; 797 return false;
747 798
@@ -751,8 +802,12 @@ static bool wl128x_is_tcxo_valid(struct wl1271 *wl)
751static bool wl128x_is_fref_valid(struct wl1271 *wl) 802static bool wl128x_is_fref_valid(struct wl1271 *wl)
752{ 803{
753 u16 fref_detection; 804 u16 fref_detection;
805 int ret;
806
807 ret = wl12xx_top_reg_read(wl, FREF_CLK_DETECT_REG, &fref_detection);
808 if (ret < 0)
809 return false;
754 810
755 fref_detection = wl12xx_top_reg_read(wl, FREF_CLK_DETECT_REG);
756 if (fref_detection & FREF_CLK_DETECT_FAIL) 811 if (fref_detection & FREF_CLK_DETECT_FAIL)
757 return false; 812 return false;
758 813
@@ -761,11 +816,21 @@ static bool wl128x_is_fref_valid(struct wl1271 *wl)
761 816
762static int wl128x_manually_configure_mcs_pll(struct wl1271 *wl) 817static int wl128x_manually_configure_mcs_pll(struct wl1271 *wl)
763{ 818{
764 wl12xx_top_reg_write(wl, MCS_PLL_M_REG, MCS_PLL_M_REG_VAL); 819 int ret;
765 wl12xx_top_reg_write(wl, MCS_PLL_N_REG, MCS_PLL_N_REG_VAL);
766 wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG, MCS_PLL_CONFIG_REG_VAL);
767 820
768 return 0; 821 ret = wl12xx_top_reg_write(wl, MCS_PLL_M_REG, MCS_PLL_M_REG_VAL);
822 if (ret < 0)
823 goto out;
824
825 ret = wl12xx_top_reg_write(wl, MCS_PLL_N_REG, MCS_PLL_N_REG_VAL);
826 if (ret < 0)
827 goto out;
828
829 ret = wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG,
830 MCS_PLL_CONFIG_REG_VAL);
831
832out:
833 return ret;
769} 834}
770 835
771static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk) 836static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk)
@@ -773,30 +838,40 @@ static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk)
773 u16 spare_reg; 838 u16 spare_reg;
774 u16 pll_config; 839 u16 pll_config;
775 u8 input_freq; 840 u8 input_freq;
841 struct wl12xx_priv *priv = wl->priv;
842 int ret;
776 843
777 /* Mask bits [3:1] in the sys_clk_cfg register */ 844 /* Mask bits [3:1] in the sys_clk_cfg register */
778 spare_reg = wl12xx_top_reg_read(wl, WL_SPARE_REG); 845 ret = wl12xx_top_reg_read(wl, WL_SPARE_REG, &spare_reg);
846 if (ret < 0)
847 return ret;
848
779 if (spare_reg == 0xFFFF) 849 if (spare_reg == 0xFFFF)
780 return -EFAULT; 850 return -EFAULT;
781 spare_reg |= BIT(2); 851 spare_reg |= BIT(2);
782 wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg); 852 ret = wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg);
853 if (ret < 0)
854 return ret;
783 855
784 /* Handle special cases of the TCXO clock */ 856 /* Handle special cases of the TCXO clock */
785 if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_8 || 857 if (priv->tcxo_clock == WL12XX_TCXOCLOCK_16_8 ||
786 wl->tcxo_clock == WL12XX_TCXOCLOCK_33_6) 858 priv->tcxo_clock == WL12XX_TCXOCLOCK_33_6)
787 return wl128x_manually_configure_mcs_pll(wl); 859 return wl128x_manually_configure_mcs_pll(wl);
788 860
789 /* Set the input frequency according to the selected clock source */ 861 /* Set the input frequency according to the selected clock source */
790 input_freq = (clk & 1) + 1; 862 input_freq = (clk & 1) + 1;
791 863
792 pll_config = wl12xx_top_reg_read(wl, MCS_PLL_CONFIG_REG); 864 ret = wl12xx_top_reg_read(wl, MCS_PLL_CONFIG_REG, &pll_config);
865 if (ret < 0)
866 return ret;
867
793 if (pll_config == 0xFFFF) 868 if (pll_config == 0xFFFF)
794 return -EFAULT; 869 return -EFAULT;
795 pll_config |= (input_freq << MCS_SEL_IN_FREQ_SHIFT); 870 pll_config |= (input_freq << MCS_SEL_IN_FREQ_SHIFT);
796 pll_config |= MCS_PLL_ENABLE_HP; 871 pll_config |= MCS_PLL_ENABLE_HP;
797 wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG, pll_config); 872 ret = wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG, pll_config);
798 873
799 return 0; 874 return ret;
800} 875}
801 876
802/* 877/*
@@ -808,26 +883,31 @@ static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk)
808 */ 883 */
809static int wl128x_boot_clk(struct wl1271 *wl, int *selected_clock) 884static int wl128x_boot_clk(struct wl1271 *wl, int *selected_clock)
810{ 885{
886 struct wl12xx_priv *priv = wl->priv;
811 u16 sys_clk_cfg; 887 u16 sys_clk_cfg;
888 int ret;
812 889
813 /* For XTAL-only modes, FREF will be used after switching from TCXO */ 890 /* For XTAL-only modes, FREF will be used after switching from TCXO */
814 if (wl->ref_clock == WL12XX_REFCLOCK_26_XTAL || 891 if (priv->ref_clock == WL12XX_REFCLOCK_26_XTAL ||
815 wl->ref_clock == WL12XX_REFCLOCK_38_XTAL) { 892 priv->ref_clock == WL12XX_REFCLOCK_38_XTAL) {
816 if (!wl128x_switch_tcxo_to_fref(wl)) 893 if (!wl128x_switch_tcxo_to_fref(wl))
817 return -EINVAL; 894 return -EINVAL;
818 goto fref_clk; 895 goto fref_clk;
819 } 896 }
820 897
821 /* Query the HW, to determine which clock source we should use */ 898 /* Query the HW, to determine which clock source we should use */
822 sys_clk_cfg = wl12xx_top_reg_read(wl, SYS_CLK_CFG_REG); 899 ret = wl12xx_top_reg_read(wl, SYS_CLK_CFG_REG, &sys_clk_cfg);
900 if (ret < 0)
901 return ret;
902
823 if (sys_clk_cfg == 0xFFFF) 903 if (sys_clk_cfg == 0xFFFF)
824 return -EINVAL; 904 return -EINVAL;
825 if (sys_clk_cfg & PRCM_CM_EN_MUX_WLAN_FREF) 905 if (sys_clk_cfg & PRCM_CM_EN_MUX_WLAN_FREF)
826 goto fref_clk; 906 goto fref_clk;
827 907
828 /* If TCXO is either 32.736MHz or 16.368MHz, switch to FREF */ 908 /* If TCXO is either 32.736MHz or 16.368MHz, switch to FREF */
829 if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_368 || 909 if (priv->tcxo_clock == WL12XX_TCXOCLOCK_16_368 ||
830 wl->tcxo_clock == WL12XX_TCXOCLOCK_32_736) { 910 priv->tcxo_clock == WL12XX_TCXOCLOCK_32_736) {
831 if (!wl128x_switch_tcxo_to_fref(wl)) 911 if (!wl128x_switch_tcxo_to_fref(wl))
832 return -EINVAL; 912 return -EINVAL;
833 goto fref_clk; 913 goto fref_clk;
@@ -836,14 +916,14 @@ static int wl128x_boot_clk(struct wl1271 *wl, int *selected_clock)
836 /* TCXO clock is selected */ 916 /* TCXO clock is selected */
837 if (!wl128x_is_tcxo_valid(wl)) 917 if (!wl128x_is_tcxo_valid(wl))
838 return -EINVAL; 918 return -EINVAL;
839 *selected_clock = wl->tcxo_clock; 919 *selected_clock = priv->tcxo_clock;
840 goto config_mcs_pll; 920 goto config_mcs_pll;
841 921
842fref_clk: 922fref_clk:
843 /* FREF clock is selected */ 923 /* FREF clock is selected */
844 if (!wl128x_is_fref_valid(wl)) 924 if (!wl128x_is_fref_valid(wl))
845 return -EINVAL; 925 return -EINVAL;
846 *selected_clock = wl->ref_clock; 926 *selected_clock = priv->ref_clock;
847 927
848config_mcs_pll: 928config_mcs_pll:
849 return wl128x_configure_mcs_pll(wl, *selected_clock); 929 return wl128x_configure_mcs_pll(wl, *selected_clock);
@@ -851,69 +931,98 @@ config_mcs_pll:
851 931
852static int wl127x_boot_clk(struct wl1271 *wl) 932static int wl127x_boot_clk(struct wl1271 *wl)
853{ 933{
934 struct wl12xx_priv *priv = wl->priv;
854 u32 pause; 935 u32 pause;
855 u32 clk; 936 u32 clk;
937 int ret;
856 938
857 if (WL127X_PG_GET_MAJOR(wl->hw_pg_ver) < 3) 939 if (WL127X_PG_GET_MAJOR(wl->hw_pg_ver) < 3)
858 wl->quirks |= WLCORE_QUIRK_END_OF_TRANSACTION; 940 wl->quirks |= WLCORE_QUIRK_END_OF_TRANSACTION;
859 941
860 if (wl->ref_clock == CONF_REF_CLK_19_2_E || 942 if (priv->ref_clock == CONF_REF_CLK_19_2_E ||
861 wl->ref_clock == CONF_REF_CLK_38_4_E || 943 priv->ref_clock == CONF_REF_CLK_38_4_E ||
862 wl->ref_clock == CONF_REF_CLK_38_4_M_XTAL) 944 priv->ref_clock == CONF_REF_CLK_38_4_M_XTAL)
863 /* ref clk: 19.2/38.4/38.4-XTAL */ 945 /* ref clk: 19.2/38.4/38.4-XTAL */
864 clk = 0x3; 946 clk = 0x3;
865 else if (wl->ref_clock == CONF_REF_CLK_26_E || 947 else if (priv->ref_clock == CONF_REF_CLK_26_E ||
866 wl->ref_clock == CONF_REF_CLK_52_E) 948 priv->ref_clock == CONF_REF_CLK_26_M_XTAL ||
949 priv->ref_clock == CONF_REF_CLK_52_E)
867 /* ref clk: 26/52 */ 950 /* ref clk: 26/52 */
868 clk = 0x5; 951 clk = 0x5;
869 else 952 else
870 return -EINVAL; 953 return -EINVAL;
871 954
872 if (wl->ref_clock != CONF_REF_CLK_19_2_E) { 955 if (priv->ref_clock != CONF_REF_CLK_19_2_E) {
873 u16 val; 956 u16 val;
874 /* Set clock type (open drain) */ 957 /* Set clock type (open drain) */
875 val = wl12xx_top_reg_read(wl, OCP_REG_CLK_TYPE); 958 ret = wl12xx_top_reg_read(wl, OCP_REG_CLK_TYPE, &val);
959 if (ret < 0)
960 goto out;
961
876 val &= FREF_CLK_TYPE_BITS; 962 val &= FREF_CLK_TYPE_BITS;
877 wl12xx_top_reg_write(wl, OCP_REG_CLK_TYPE, val); 963 ret = wl12xx_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
964 if (ret < 0)
965 goto out;
878 966
879 /* Set clock pull mode (no pull) */ 967 /* Set clock pull mode (no pull) */
880 val = wl12xx_top_reg_read(wl, OCP_REG_CLK_PULL); 968 ret = wl12xx_top_reg_read(wl, OCP_REG_CLK_PULL, &val);
969 if (ret < 0)
970 goto out;
971
881 val |= NO_PULL; 972 val |= NO_PULL;
882 wl12xx_top_reg_write(wl, OCP_REG_CLK_PULL, val); 973 ret = wl12xx_top_reg_write(wl, OCP_REG_CLK_PULL, val);
974 if (ret < 0)
975 goto out;
883 } else { 976 } else {
884 u16 val; 977 u16 val;
885 /* Set clock polarity */ 978 /* Set clock polarity */
886 val = wl12xx_top_reg_read(wl, OCP_REG_CLK_POLARITY); 979 ret = wl12xx_top_reg_read(wl, OCP_REG_CLK_POLARITY, &val);
980 if (ret < 0)
981 goto out;
982
887 val &= FREF_CLK_POLARITY_BITS; 983 val &= FREF_CLK_POLARITY_BITS;
888 val |= CLK_REQ_OUTN_SEL; 984 val |= CLK_REQ_OUTN_SEL;
889 wl12xx_top_reg_write(wl, OCP_REG_CLK_POLARITY, val); 985 ret = wl12xx_top_reg_write(wl, OCP_REG_CLK_POLARITY, val);
986 if (ret < 0)
987 goto out;
890 } 988 }
891 989
892 wl1271_write32(wl, WL12XX_PLL_PARAMETERS, clk); 990 ret = wlcore_write32(wl, WL12XX_PLL_PARAMETERS, clk);
991 if (ret < 0)
992 goto out;
893 993
894 pause = wl1271_read32(wl, WL12XX_PLL_PARAMETERS); 994 ret = wlcore_read32(wl, WL12XX_PLL_PARAMETERS, &pause);
995 if (ret < 0)
996 goto out;
895 997
896 wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause); 998 wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause);
897 999
898 pause &= ~(WU_COUNTER_PAUSE_VAL); 1000 pause &= ~(WU_COUNTER_PAUSE_VAL);
899 pause |= WU_COUNTER_PAUSE_VAL; 1001 pause |= WU_COUNTER_PAUSE_VAL;
900 wl1271_write32(wl, WL12XX_WU_COUNTER_PAUSE, pause); 1002 ret = wlcore_write32(wl, WL12XX_WU_COUNTER_PAUSE, pause);
901 1003
902 return 0; 1004out:
1005 return ret;
903} 1006}
904 1007
905static int wl1271_boot_soft_reset(struct wl1271 *wl) 1008static int wl1271_boot_soft_reset(struct wl1271 *wl)
906{ 1009{
907 unsigned long timeout; 1010 unsigned long timeout;
908 u32 boot_data; 1011 u32 boot_data;
1012 int ret = 0;
909 1013
910 /* perform soft reset */ 1014 /* perform soft reset */
911 wl1271_write32(wl, WL12XX_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT); 1015 ret = wlcore_write32(wl, WL12XX_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT);
1016 if (ret < 0)
1017 goto out;
912 1018
913 /* SOFT_RESET is self clearing */ 1019 /* SOFT_RESET is self clearing */
914 timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME); 1020 timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME);
915 while (1) { 1021 while (1) {
916 boot_data = wl1271_read32(wl, WL12XX_SLV_SOFT_RESET); 1022 ret = wlcore_read32(wl, WL12XX_SLV_SOFT_RESET, &boot_data);
1023 if (ret < 0)
1024 goto out;
1025
917 wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data); 1026 wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data);
918 if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0) 1027 if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0)
919 break; 1028 break;
@@ -929,16 +1038,20 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
929 } 1038 }
930 1039
931 /* disable Rx/Tx */ 1040 /* disable Rx/Tx */
932 wl1271_write32(wl, WL12XX_ENABLE, 0x0); 1041 ret = wlcore_write32(wl, WL12XX_ENABLE, 0x0);
1042 if (ret < 0)
1043 goto out;
933 1044
934 /* disable auto calibration on start*/ 1045 /* disable auto calibration on start*/
935 wl1271_write32(wl, WL12XX_SPARE_A2, 0xffff); 1046 ret = wlcore_write32(wl, WL12XX_SPARE_A2, 0xffff);
936 1047
937 return 0; 1048out:
1049 return ret;
938} 1050}
939 1051
940static int wl12xx_pre_boot(struct wl1271 *wl) 1052static int wl12xx_pre_boot(struct wl1271 *wl)
941{ 1053{
1054 struct wl12xx_priv *priv = wl->priv;
942 int ret = 0; 1055 int ret = 0;
943 u32 clk; 1056 u32 clk;
944 int selected_clock = -1; 1057 int selected_clock = -1;
@@ -954,30 +1067,43 @@ static int wl12xx_pre_boot(struct wl1271 *wl)
954 } 1067 }
955 1068
956 /* Continue the ELP wake up sequence */ 1069 /* Continue the ELP wake up sequence */
957 wl1271_write32(wl, WL12XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL); 1070 ret = wlcore_write32(wl, WL12XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
1071 if (ret < 0)
1072 goto out;
1073
958 udelay(500); 1074 udelay(500);
959 1075
960 wlcore_set_partition(wl, &wl->ptable[PART_DRPW]); 1076 ret = wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
1077 if (ret < 0)
1078 goto out;
961 1079
962 /* Read-modify-write DRPW_SCRATCH_START register (see next state) 1080 /* Read-modify-write DRPW_SCRATCH_START register (see next state)
963 to be used by DRPw FW. The RTRIM value will be added by the FW 1081 to be used by DRPw FW. The RTRIM value will be added by the FW
964 before taking DRPw out of reset */ 1082 before taking DRPw out of reset */
965 1083
966 clk = wl1271_read32(wl, WL12XX_DRPW_SCRATCH_START); 1084 ret = wlcore_read32(wl, WL12XX_DRPW_SCRATCH_START, &clk);
1085 if (ret < 0)
1086 goto out;
967 1087
968 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk); 1088 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
969 1089
970 if (wl->chip.id == CHIP_ID_1283_PG20) 1090 if (wl->chip.id == CHIP_ID_1283_PG20)
971 clk |= ((selected_clock & 0x3) << 1) << 4; 1091 clk |= ((selected_clock & 0x3) << 1) << 4;
972 else 1092 else
973 clk |= (wl->ref_clock << 1) << 4; 1093 clk |= (priv->ref_clock << 1) << 4;
974 1094
975 wl1271_write32(wl, WL12XX_DRPW_SCRATCH_START, clk); 1095 ret = wlcore_write32(wl, WL12XX_DRPW_SCRATCH_START, clk);
1096 if (ret < 0)
1097 goto out;
976 1098
977 wlcore_set_partition(wl, &wl->ptable[PART_WORK]); 1099 ret = wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
1100 if (ret < 0)
1101 goto out;
978 1102
979 /* Disable interrupts */ 1103 /* Disable interrupts */
980 wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL); 1104 ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
1105 if (ret < 0)
1106 goto out;
981 1107
982 ret = wl1271_boot_soft_reset(wl); 1108 ret = wl1271_boot_soft_reset(wl);
983 if (ret < 0) 1109 if (ret < 0)
@@ -987,47 +1113,72 @@ out:
987 return ret; 1113 return ret;
988} 1114}
989 1115
990static void wl12xx_pre_upload(struct wl1271 *wl) 1116static int wl12xx_pre_upload(struct wl1271 *wl)
991{ 1117{
992 u32 tmp; 1118 u32 tmp;
1119 u16 polarity;
1120 int ret;
993 1121
994 /* write firmware's last address (ie. it's length) to 1122 /* write firmware's last address (ie. it's length) to
995 * ACX_EEPROMLESS_IND_REG */ 1123 * ACX_EEPROMLESS_IND_REG */
996 wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG"); 1124 wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG");
997 1125
998 wl1271_write32(wl, WL12XX_EEPROMLESS_IND, WL12XX_EEPROMLESS_IND); 1126 ret = wlcore_write32(wl, WL12XX_EEPROMLESS_IND, WL12XX_EEPROMLESS_IND);
1127 if (ret < 0)
1128 goto out;
999 1129
1000 tmp = wlcore_read_reg(wl, REG_CHIP_ID_B); 1130 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &tmp);
1131 if (ret < 0)
1132 goto out;
1001 1133
1002 wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp); 1134 wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
1003 1135
1004 /* 6. read the EEPROM parameters */ 1136 /* 6. read the EEPROM parameters */
1005 tmp = wl1271_read32(wl, WL12XX_SCR_PAD2); 1137 ret = wlcore_read32(wl, WL12XX_SCR_PAD2, &tmp);
1138 if (ret < 0)
1139 goto out;
1006 1140
1007 /* WL1271: The reference driver skips steps 7 to 10 (jumps directly 1141 /* WL1271: The reference driver skips steps 7 to 10 (jumps directly
1008 * to upload_fw) */ 1142 * to upload_fw) */
1009 1143
1010 if (wl->chip.id == CHIP_ID_1283_PG20) 1144 if (wl->chip.id == CHIP_ID_1283_PG20) {
1011 wl12xx_top_reg_write(wl, SDIO_IO_DS, HCI_IO_DS_6MA); 1145 ret = wl12xx_top_reg_write(wl, SDIO_IO_DS, HCI_IO_DS_6MA);
1012} 1146 if (ret < 0)
1013 1147 goto out;
1014static void wl12xx_enable_interrupts(struct wl1271 *wl) 1148 }
1015{
1016 u32 polarity;
1017 1149
1018 polarity = wl12xx_top_reg_read(wl, OCP_REG_POLARITY); 1150 /* polarity must be set before the firmware is loaded */
1151 ret = wl12xx_top_reg_read(wl, OCP_REG_POLARITY, &polarity);
1152 if (ret < 0)
1153 goto out;
1019 1154
1020 /* We use HIGH polarity, so unset the LOW bit */ 1155 /* We use HIGH polarity, so unset the LOW bit */
1021 polarity &= ~POLARITY_LOW; 1156 polarity &= ~POLARITY_LOW;
1022 wl12xx_top_reg_write(wl, OCP_REG_POLARITY, polarity); 1157 ret = wl12xx_top_reg_write(wl, OCP_REG_POLARITY, polarity);
1023 1158
1024 wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_ALL_EVENTS_VECTOR); 1159out:
1160 return ret;
1161}
1162
1163static int wl12xx_enable_interrupts(struct wl1271 *wl)
1164{
1165 int ret;
1166
1167 ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
1168 WL12XX_ACX_ALL_EVENTS_VECTOR);
1169 if (ret < 0)
1170 goto out;
1025 1171
1026 wlcore_enable_interrupts(wl); 1172 wlcore_enable_interrupts(wl);
1027 wlcore_write_reg(wl, REG_INTERRUPT_MASK, 1173 ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
1028 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK)); 1174 WL1271_ACX_INTR_ALL & ~(WL12XX_INTR_MASK));
1175 if (ret < 0)
1176 goto out;
1177
1178 ret = wlcore_write32(wl, WL12XX_HI_CFG, HI_CFG_DEF_VAL);
1029 1179
1030 wl1271_write32(wl, WL12XX_HI_CFG, HI_CFG_DEF_VAL); 1180out:
1181 return ret;
1031} 1182}
1032 1183
1033static int wl12xx_boot(struct wl1271 *wl) 1184static int wl12xx_boot(struct wl1271 *wl)
@@ -1042,7 +1193,9 @@ static int wl12xx_boot(struct wl1271 *wl)
1042 if (ret < 0) 1193 if (ret < 0)
1043 goto out; 1194 goto out;
1044 1195
1045 wl12xx_pre_upload(wl); 1196 ret = wl12xx_pre_upload(wl);
1197 if (ret < 0)
1198 goto out;
1046 1199
1047 ret = wlcore_boot_upload_firmware(wl); 1200 ret = wlcore_boot_upload_firmware(wl);
1048 if (ret < 0) 1201 if (ret < 0)
@@ -1052,22 +1205,30 @@ static int wl12xx_boot(struct wl1271 *wl)
1052 if (ret < 0) 1205 if (ret < 0)
1053 goto out; 1206 goto out;
1054 1207
1055 wl12xx_enable_interrupts(wl); 1208 ret = wl12xx_enable_interrupts(wl);
1056 1209
1057out: 1210out:
1058 return ret; 1211 return ret;
1059} 1212}
1060 1213
1061static void wl12xx_trigger_cmd(struct wl1271 *wl, int cmd_box_addr, 1214static int wl12xx_trigger_cmd(struct wl1271 *wl, int cmd_box_addr,
1062 void *buf, size_t len) 1215 void *buf, size_t len)
1063{ 1216{
1064 wl1271_write(wl, cmd_box_addr, buf, len, false); 1217 int ret;
1065 wlcore_write_reg(wl, REG_INTERRUPT_TRIG, WL12XX_INTR_TRIG_CMD); 1218
1219 ret = wlcore_write(wl, cmd_box_addr, buf, len, false);
1220 if (ret < 0)
1221 return ret;
1222
1223 ret = wlcore_write_reg(wl, REG_INTERRUPT_TRIG, WL12XX_INTR_TRIG_CMD);
1224
1225 return ret;
1066} 1226}
1067 1227
1068static void wl12xx_ack_event(struct wl1271 *wl) 1228static int wl12xx_ack_event(struct wl1271 *wl)
1069{ 1229{
1070 wlcore_write_reg(wl, REG_INTERRUPT_TRIG, WL12XX_INTR_TRIG_EVENT_ACK); 1230 return wlcore_write_reg(wl, REG_INTERRUPT_TRIG,
1231 WL12XX_INTR_TRIG_EVENT_ACK);
1071} 1232}
1072 1233
1073static u32 wl12xx_calc_tx_blocks(struct wl1271 *wl, u32 len, u32 spare_blks) 1234static u32 wl12xx_calc_tx_blocks(struct wl1271 *wl, u32 len, u32 spare_blks)
@@ -1147,12 +1308,13 @@ static u32 wl12xx_get_rx_packet_len(struct wl1271 *wl, void *rx_data,
1147 return data_len - sizeof(*desc) - desc->pad_len; 1308 return data_len - sizeof(*desc) - desc->pad_len;
1148} 1309}
1149 1310
1150static void wl12xx_tx_delayed_compl(struct wl1271 *wl) 1311static int wl12xx_tx_delayed_compl(struct wl1271 *wl)
1151{ 1312{
1152 if (wl->fw_status->tx_results_counter == (wl->tx_results_count & 0xff)) 1313 if (wl->fw_status_1->tx_results_counter ==
1153 return; 1314 (wl->tx_results_count & 0xff))
1315 return 0;
1154 1316
1155 wl1271_tx_complete(wl); 1317 return wlcore_tx_complete(wl);
1156} 1318}
1157 1319
1158static int wl12xx_hw_init(struct wl1271 *wl) 1320static int wl12xx_hw_init(struct wl1271 *wl)
@@ -1253,45 +1415,144 @@ static bool wl12xx_mac_in_fuse(struct wl1271 *wl)
1253 return supported; 1415 return supported;
1254} 1416}
1255 1417
1256static void wl12xx_get_fuse_mac(struct wl1271 *wl) 1418static int wl12xx_get_fuse_mac(struct wl1271 *wl)
1257{ 1419{
1258 u32 mac1, mac2; 1420 u32 mac1, mac2;
1421 int ret;
1259 1422
1260 wlcore_set_partition(wl, &wl->ptable[PART_DRPW]); 1423 ret = wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
1424 if (ret < 0)
1425 goto out;
1261 1426
1262 mac1 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_1); 1427 ret = wlcore_read32(wl, WL12XX_REG_FUSE_BD_ADDR_1, &mac1);
1263 mac2 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_2); 1428 if (ret < 0)
1429 goto out;
1430
1431 ret = wlcore_read32(wl, WL12XX_REG_FUSE_BD_ADDR_2, &mac2);
1432 if (ret < 0)
1433 goto out;
1264 1434
1265 /* these are the two parts of the BD_ADDR */ 1435 /* these are the two parts of the BD_ADDR */
1266 wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) + 1436 wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) +
1267 ((mac1 & 0xff000000) >> 24); 1437 ((mac1 & 0xff000000) >> 24);
1268 wl->fuse_nic_addr = mac1 & 0xffffff; 1438 wl->fuse_nic_addr = mac1 & 0xffffff;
1269 1439
1270 wlcore_set_partition(wl, &wl->ptable[PART_DOWN]); 1440 ret = wlcore_set_partition(wl, &wl->ptable[PART_DOWN]);
1441
1442out:
1443 return ret;
1271} 1444}
1272 1445
1273static s8 wl12xx_get_pg_ver(struct wl1271 *wl) 1446static int wl12xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
1274{ 1447{
1275 u32 die_info; 1448 u16 die_info;
1449 int ret;
1276 1450
1277 if (wl->chip.id == CHIP_ID_1283_PG20) 1451 if (wl->chip.id == CHIP_ID_1283_PG20)
1278 die_info = wl12xx_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1); 1452 ret = wl12xx_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1,
1453 &die_info);
1279 else 1454 else
1280 die_info = wl12xx_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1); 1455 ret = wl12xx_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1,
1456 &die_info);
1457
1458 if (ret >= 0 && ver)
1459 *ver = (s8)((die_info & PG_VER_MASK) >> PG_VER_OFFSET);
1281 1460
1282 return (s8) (die_info & PG_VER_MASK) >> PG_VER_OFFSET; 1461 return ret;
1283} 1462}
1284 1463
1285static void wl12xx_get_mac(struct wl1271 *wl) 1464static int wl12xx_get_mac(struct wl1271 *wl)
1286{ 1465{
1287 if (wl12xx_mac_in_fuse(wl)) 1466 if (wl12xx_mac_in_fuse(wl))
1288 wl12xx_get_fuse_mac(wl); 1467 return wl12xx_get_fuse_mac(wl);
1468
1469 return 0;
1470}
1471
1472static void wl12xx_set_tx_desc_csum(struct wl1271 *wl,
1473 struct wl1271_tx_hw_descr *desc,
1474 struct sk_buff *skb)
1475{
1476 desc->wl12xx_reserved = 0;
1477}
1478
1479static int wl12xx_plt_init(struct wl1271 *wl)
1480{
1481 int ret;
1482
1483 ret = wl->ops->boot(wl);
1484 if (ret < 0)
1485 goto out;
1486
1487 ret = wl->ops->hw_init(wl);
1488 if (ret < 0)
1489 goto out_irq_disable;
1490
1491 ret = wl1271_acx_init_mem_config(wl);
1492 if (ret < 0)
1493 goto out_irq_disable;
1494
1495 ret = wl12xx_acx_mem_cfg(wl);
1496 if (ret < 0)
1497 goto out_free_memmap;
1498
1499 /* Enable data path */
1500 ret = wl1271_cmd_data_path(wl, 1);
1501 if (ret < 0)
1502 goto out_free_memmap;
1503
1504 /* Configure for CAM power saving (ie. always active) */
1505 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
1506 if (ret < 0)
1507 goto out_free_memmap;
1508
1509 /* configure PM */
1510 ret = wl1271_acx_pm_config(wl);
1511 if (ret < 0)
1512 goto out_free_memmap;
1513
1514 goto out;
1515
1516out_free_memmap:
1517 kfree(wl->target_mem_map);
1518 wl->target_mem_map = NULL;
1519
1520out_irq_disable:
1521 mutex_unlock(&wl->mutex);
1522 /* Unlocking the mutex in the middle of handling is
1523 inherently unsafe. In this case we deem it safe to do,
1524 because we need to let any possibly pending IRQ out of
1525 the system (and while we are WL1271_STATE_OFF the IRQ
1526 work function will not do anything.) Also, any other
1527 possible concurrent operations will fail due to the
1528 current state, hence the wl1271 struct should be safe. */
1529 wlcore_disable_interrupts(wl);
1530 mutex_lock(&wl->mutex);
1531out:
1532 return ret;
1533}
1534
1535static int wl12xx_get_spare_blocks(struct wl1271 *wl, bool is_gem)
1536{
1537 if (is_gem)
1538 return WL12XX_TX_HW_BLOCK_GEM_SPARE;
1539
1540 return WL12XX_TX_HW_BLOCK_SPARE_DEFAULT;
1541}
1542
1543static int wl12xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
1544 struct ieee80211_vif *vif,
1545 struct ieee80211_sta *sta,
1546 struct ieee80211_key_conf *key_conf)
1547{
1548 return wlcore_set_key(wl, cmd, vif, sta, key_conf);
1289} 1549}
1290 1550
1291static struct wlcore_ops wl12xx_ops = { 1551static struct wlcore_ops wl12xx_ops = {
1292 .identify_chip = wl12xx_identify_chip, 1552 .identify_chip = wl12xx_identify_chip,
1293 .identify_fw = wl12xx_identify_fw, 1553 .identify_fw = wl12xx_identify_fw,
1294 .boot = wl12xx_boot, 1554 .boot = wl12xx_boot,
1555 .plt_init = wl12xx_plt_init,
1295 .trigger_cmd = wl12xx_trigger_cmd, 1556 .trigger_cmd = wl12xx_trigger_cmd,
1296 .ack_event = wl12xx_ack_event, 1557 .ack_event = wl12xx_ack_event,
1297 .calc_tx_blocks = wl12xx_calc_tx_blocks, 1558 .calc_tx_blocks = wl12xx_calc_tx_blocks,
@@ -1306,6 +1567,13 @@ static struct wlcore_ops wl12xx_ops = {
1306 .sta_get_ap_rate_mask = wl12xx_sta_get_ap_rate_mask, 1567 .sta_get_ap_rate_mask = wl12xx_sta_get_ap_rate_mask,
1307 .get_pg_ver = wl12xx_get_pg_ver, 1568 .get_pg_ver = wl12xx_get_pg_ver,
1308 .get_mac = wl12xx_get_mac, 1569 .get_mac = wl12xx_get_mac,
1570 .set_tx_desc_csum = wl12xx_set_tx_desc_csum,
1571 .set_rx_csum = NULL,
1572 .ap_get_mimo_wide_rate_mask = NULL,
1573 .debugfs_init = wl12xx_debugfs_add_files,
1574 .get_spare_blocks = wl12xx_get_spare_blocks,
1575 .set_key = wl12xx_set_key,
1576 .pre_pkt_send = NULL,
1309}; 1577};
1310 1578
1311static struct ieee80211_sta_ht_cap wl12xx_ht_cap = { 1579static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
@@ -1323,6 +1591,7 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
1323 1591
1324static int __devinit wl12xx_probe(struct platform_device *pdev) 1592static int __devinit wl12xx_probe(struct platform_device *pdev)
1325{ 1593{
1594 struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
1326 struct wl1271 *wl; 1595 struct wl1271 *wl;
1327 struct ieee80211_hw *hw; 1596 struct ieee80211_hw *hw;
1328 struct wl12xx_priv *priv; 1597 struct wl12xx_priv *priv;
@@ -1334,19 +1603,63 @@ static int __devinit wl12xx_probe(struct platform_device *pdev)
1334 } 1603 }
1335 1604
1336 wl = hw->priv; 1605 wl = hw->priv;
1606 priv = wl->priv;
1337 wl->ops = &wl12xx_ops; 1607 wl->ops = &wl12xx_ops;
1338 wl->ptable = wl12xx_ptable; 1608 wl->ptable = wl12xx_ptable;
1339 wl->rtable = wl12xx_rtable; 1609 wl->rtable = wl12xx_rtable;
1340 wl->num_tx_desc = 16; 1610 wl->num_tx_desc = 16;
1341 wl->normal_tx_spare = WL12XX_TX_HW_BLOCK_SPARE_DEFAULT; 1611 wl->num_rx_desc = 8;
1342 wl->gem_tx_spare = WL12XX_TX_HW_BLOCK_GEM_SPARE;
1343 wl->band_rate_to_idx = wl12xx_band_rate_to_idx; 1612 wl->band_rate_to_idx = wl12xx_band_rate_to_idx;
1344 wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX; 1613 wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX;
1345 wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0; 1614 wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0;
1346 wl->fw_status_priv_len = 0; 1615 wl->fw_status_priv_len = 0;
1347 memcpy(&wl->ht_cap, &wl12xx_ht_cap, sizeof(wl12xx_ht_cap)); 1616 wl->stats.fw_stats_len = sizeof(struct wl12xx_acx_statistics);
1617 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, &wl12xx_ht_cap);
1618 wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, &wl12xx_ht_cap);
1348 wl12xx_conf_init(wl); 1619 wl12xx_conf_init(wl);
1349 1620
1621 if (!fref_param) {
1622 priv->ref_clock = pdata->board_ref_clock;
1623 } else {
1624 if (!strcmp(fref_param, "19.2"))
1625 priv->ref_clock = WL12XX_REFCLOCK_19;
1626 else if (!strcmp(fref_param, "26"))
1627 priv->ref_clock = WL12XX_REFCLOCK_26;
1628 else if (!strcmp(fref_param, "26x"))
1629 priv->ref_clock = WL12XX_REFCLOCK_26_XTAL;
1630 else if (!strcmp(fref_param, "38.4"))
1631 priv->ref_clock = WL12XX_REFCLOCK_38;
1632 else if (!strcmp(fref_param, "38.4x"))
1633 priv->ref_clock = WL12XX_REFCLOCK_38_XTAL;
1634 else if (!strcmp(fref_param, "52"))
1635 priv->ref_clock = WL12XX_REFCLOCK_52;
1636 else
1637 wl1271_error("Invalid fref parameter %s", fref_param);
1638 }
1639
1640 if (!tcxo_param) {
1641 priv->tcxo_clock = pdata->board_tcxo_clock;
1642 } else {
1643 if (!strcmp(tcxo_param, "19.2"))
1644 priv->tcxo_clock = WL12XX_TCXOCLOCK_19_2;
1645 else if (!strcmp(tcxo_param, "26"))
1646 priv->tcxo_clock = WL12XX_TCXOCLOCK_26;
1647 else if (!strcmp(tcxo_param, "38.4"))
1648 priv->tcxo_clock = WL12XX_TCXOCLOCK_38_4;
1649 else if (!strcmp(tcxo_param, "52"))
1650 priv->tcxo_clock = WL12XX_TCXOCLOCK_52;
1651 else if (!strcmp(tcxo_param, "16.368"))
1652 priv->tcxo_clock = WL12XX_TCXOCLOCK_16_368;
1653 else if (!strcmp(tcxo_param, "32.736"))
1654 priv->tcxo_clock = WL12XX_TCXOCLOCK_32_736;
1655 else if (!strcmp(tcxo_param, "16.8"))
1656 priv->tcxo_clock = WL12XX_TCXOCLOCK_16_8;
1657 else if (!strcmp(tcxo_param, "33.6"))
1658 priv->tcxo_clock = WL12XX_TCXOCLOCK_33_6;
1659 else
1660 wl1271_error("Invalid tcxo parameter %s", tcxo_param);
1661 }
1662
1350 return wlcore_probe(wl, pdev); 1663 return wlcore_probe(wl, pdev);
1351} 1664}
1352 1665
@@ -1378,6 +1691,13 @@ static void __exit wl12xx_exit(void)
1378} 1691}
1379module_exit(wl12xx_exit); 1692module_exit(wl12xx_exit);
1380 1693
1694module_param_named(fref, fref_param, charp, 0);
1695MODULE_PARM_DESC(fref, "FREF clock: 19.2, 26, 26x, 38.4, 38.4x, 52");
1696
1697module_param_named(tcxo, tcxo_param, charp, 0);
1698MODULE_PARM_DESC(tcxo,
1699 "TCXO clock: 19.2, 26, 38.4, 52, 16.368, 32.736, 16.8, 33.6");
1700
1381MODULE_LICENSE("GPL v2"); 1701MODULE_LICENSE("GPL v2");
1382MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); 1702MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
1383MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE); 1703MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
index 74cd332e23e..de113241087 100644
--- a/drivers/net/wireless/ti/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -24,8 +24,16 @@
24 24
25#include "conf.h" 25#include "conf.h"
26 26
27struct wl127x_rx_mem_pool_addr {
28 u32 addr;
29 u32 addr_extra;
30};
31
27struct wl12xx_priv { 32struct wl12xx_priv {
28 struct wl12xx_priv_conf conf; 33 struct wl12xx_priv_conf conf;
34
35 int ref_clock;
36 int tcxo_clock;
29}; 37};
30 38
31#endif /* __WL12XX_PRIV_H__ */ 39#endif /* __WL12XX_PRIV_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/Kconfig b/drivers/net/wireless/ti/wl18xx/Kconfig
new file mode 100644
index 00000000000..1cfdb254882
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/Kconfig
@@ -0,0 +1,7 @@
1config WL18XX
2 tristate "TI wl18xx support"
3 depends on MAC80211
4 select WLCORE
5 ---help---
6 This module adds support for wireless adapters based on TI
7 WiLink 8 chipsets.
diff --git a/drivers/net/wireless/ti/wl18xx/Makefile b/drivers/net/wireless/ti/wl18xx/Makefile
new file mode 100644
index 00000000000..67c098734c7
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/Makefile
@@ -0,0 +1,3 @@
1wl18xx-objs = main.o acx.o tx.o io.o debugfs.o
2
3obj-$(CONFIG_WL18XX) += wl18xx.o
diff --git a/drivers/net/wireless/ti/wl18xx/acx.c b/drivers/net/wireless/ti/wl18xx/acx.c
new file mode 100644
index 00000000000..72840e23bf5
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/acx.c
@@ -0,0 +1,111 @@
1/*
2 * This file is part of wl18xx
3 *
4 * Copyright (C) 2011 Texas Instruments Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#include "../wlcore/cmd.h"
23#include "../wlcore/debug.h"
24#include "../wlcore/acx.h"
25
26#include "acx.h"
27
28int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
29 u32 sdio_blk_size, u32 extra_mem_blks,
30 u32 len_field_size)
31{
32 struct wl18xx_acx_host_config_bitmap *bitmap_conf;
33 int ret;
34
35 wl1271_debug(DEBUG_ACX, "acx cfg bitmap %d blk %d spare %d field %d",
36 host_cfg_bitmap, sdio_blk_size, extra_mem_blks,
37 len_field_size);
38
39 bitmap_conf = kzalloc(sizeof(*bitmap_conf), GFP_KERNEL);
40 if (!bitmap_conf) {
41 ret = -ENOMEM;
42 goto out;
43 }
44
45 bitmap_conf->host_cfg_bitmap = cpu_to_le32(host_cfg_bitmap);
46 bitmap_conf->host_sdio_block_size = cpu_to_le32(sdio_blk_size);
47 bitmap_conf->extra_mem_blocks = cpu_to_le32(extra_mem_blks);
48 bitmap_conf->length_field_size = cpu_to_le32(len_field_size);
49
50 ret = wl1271_cmd_configure(wl, ACX_HOST_IF_CFG_BITMAP,
51 bitmap_conf, sizeof(*bitmap_conf));
52 if (ret < 0) {
53 wl1271_warning("wl1271 bitmap config opt failed: %d", ret);
54 goto out;
55 }
56
57out:
58 kfree(bitmap_conf);
59
60 return ret;
61}
62
63int wl18xx_acx_set_checksum_state(struct wl1271 *wl)
64{
65 struct wl18xx_acx_checksum_state *acx;
66 int ret;
67
68 wl1271_debug(DEBUG_ACX, "acx checksum state");
69
70 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
71 if (!acx) {
72 ret = -ENOMEM;
73 goto out;
74 }
75
76 acx->checksum_state = CHECKSUM_OFFLOAD_ENABLED;
77
78 ret = wl1271_cmd_configure(wl, ACX_CHECKSUM_CONFIG, acx, sizeof(*acx));
79 if (ret < 0) {
80 wl1271_warning("failed to set Tx checksum state: %d", ret);
81 goto out;
82 }
83
84out:
85 kfree(acx);
86 return ret;
87}
88
89int wl18xx_acx_clear_statistics(struct wl1271 *wl)
90{
91 struct wl18xx_acx_clear_statistics *acx;
92 int ret = 0;
93
94 wl1271_debug(DEBUG_ACX, "acx clear statistics");
95
96 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
97 if (!acx) {
98 ret = -ENOMEM;
99 goto out;
100 }
101
102 ret = wl1271_cmd_configure(wl, ACX_CLEAR_STATISTICS, acx, sizeof(*acx));
103 if (ret < 0) {
104 wl1271_warning("failed to clear firmware statistics: %d", ret);
105 goto out;
106 }
107
108out:
109 kfree(acx);
110 return ret;
111}
diff --git a/drivers/net/wireless/ti/wl18xx/acx.h b/drivers/net/wireless/ti/wl18xx/acx.h
new file mode 100644
index 00000000000..e2609a6b734
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/acx.h
@@ -0,0 +1,287 @@
1/*
2 * This file is part of wl18xx
3 *
4 * Copyright (C) 2011 Texas Instruments. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __WL18XX_ACX_H__
23#define __WL18XX_ACX_H__
24
25#include "../wlcore/wlcore.h"
26#include "../wlcore/acx.h"
27
28enum {
29 ACX_CLEAR_STATISTICS = 0x0047,
30};
31
32/* numbers of bits the length field takes (add 1 for the actual number) */
33#define WL18XX_HOST_IF_LEN_SIZE_FIELD 15
34
35#define WL18XX_ACX_EVENTS_VECTOR (WL1271_ACX_INTR_WATCHDOG | \
36 WL1271_ACX_INTR_INIT_COMPLETE | \
37 WL1271_ACX_INTR_EVENT_A | \
38 WL1271_ACX_INTR_EVENT_B | \
39 WL1271_ACX_INTR_CMD_COMPLETE | \
40 WL1271_ACX_INTR_HW_AVAILABLE | \
41 WL1271_ACX_INTR_DATA | \
42 WL1271_ACX_SW_INTR_WATCHDOG)
43
44#define WL18XX_INTR_MASK (WL1271_ACX_INTR_WATCHDOG | \
45 WL1271_ACX_INTR_EVENT_A | \
46 WL1271_ACX_INTR_EVENT_B | \
47 WL1271_ACX_INTR_HW_AVAILABLE | \
48 WL1271_ACX_INTR_DATA | \
49 WL1271_ACX_SW_INTR_WATCHDOG)
50
51struct wl18xx_acx_host_config_bitmap {
52 struct acx_header header;
53
54 __le32 host_cfg_bitmap;
55
56 __le32 host_sdio_block_size;
57
58 /* extra mem blocks per frame in TX. */
59 __le32 extra_mem_blocks;
60
61 /*
62 * number of bits of the length field in the first TX word
63 * (up to 15 - for using the entire 16 bits).
64 */
65 __le32 length_field_size;
66
67} __packed;
68
69enum {
70 CHECKSUM_OFFLOAD_DISABLED = 0,
71 CHECKSUM_OFFLOAD_ENABLED = 1,
72 CHECKSUM_OFFLOAD_FAKE_RX = 2,
73 CHECKSUM_OFFLOAD_INVALID = 0xFF
74};
75
76struct wl18xx_acx_checksum_state {
77 struct acx_header header;
78
79 /* enum acx_checksum_state */
80 u8 checksum_state;
81 u8 pad[3];
82} __packed;
83
84
85struct wl18xx_acx_error_stats {
86 u32 error_frame;
87 u32 error_null_Frame_tx_start;
88 u32 error_numll_frame_cts_start;
89 u32 error_bar_retry;
90 u32 error_frame_cts_nul_flid;
91} __packed;
92
93struct wl18xx_acx_debug_stats {
94 u32 debug1;
95 u32 debug2;
96 u32 debug3;
97 u32 debug4;
98 u32 debug5;
99 u32 debug6;
100} __packed;
101
102struct wl18xx_acx_ring_stats {
103 u32 prepared_descs;
104 u32 tx_cmplt;
105} __packed;
106
107struct wl18xx_acx_tx_stats {
108 u32 tx_prepared_descs;
109 u32 tx_cmplt;
110 u32 tx_template_prepared;
111 u32 tx_data_prepared;
112 u32 tx_template_programmed;
113 u32 tx_data_programmed;
114 u32 tx_burst_programmed;
115 u32 tx_starts;
116 u32 tx_imm_resp;
117 u32 tx_start_templates;
118 u32 tx_start_int_templates;
119 u32 tx_start_fw_gen;
120 u32 tx_start_data;
121 u32 tx_start_null_frame;
122 u32 tx_exch;
123 u32 tx_retry_template;
124 u32 tx_retry_data;
125 u32 tx_exch_pending;
126 u32 tx_exch_expiry;
127 u32 tx_done_template;
128 u32 tx_done_data;
129 u32 tx_done_int_template;
130 u32 tx_frame_checksum;
131 u32 tx_checksum_result;
132 u32 frag_called;
133 u32 frag_mpdu_alloc_failed;
134 u32 frag_init_called;
135 u32 frag_in_process_called;
136 u32 frag_tkip_called;
137 u32 frag_key_not_found;
138 u32 frag_need_fragmentation;
139 u32 frag_bad_mblk_num;
140 u32 frag_failed;
141 u32 frag_cache_hit;
142 u32 frag_cache_miss;
143} __packed;
144
145struct wl18xx_acx_rx_stats {
146 u32 rx_beacon_early_term;
147 u32 rx_out_of_mpdu_nodes;
148 u32 rx_hdr_overflow;
149 u32 rx_dropped_frame;
150 u32 rx_done_stage;
151 u32 rx_done;
152 u32 rx_defrag;
153 u32 rx_defrag_end;
154 u32 rx_cmplt;
155 u32 rx_pre_complt;
156 u32 rx_cmplt_task;
157 u32 rx_phy_hdr;
158 u32 rx_timeout;
159 u32 rx_timeout_wa;
160 u32 rx_wa_density_dropped_frame;
161 u32 rx_wa_ba_not_expected;
162 u32 rx_frame_checksum;
163 u32 rx_checksum_result;
164 u32 defrag_called;
165 u32 defrag_init_called;
166 u32 defrag_in_process_called;
167 u32 defrag_tkip_called;
168 u32 defrag_need_defrag;
169 u32 defrag_decrypt_failed;
170 u32 decrypt_key_not_found;
171 u32 defrag_need_decrypt;
172 u32 rx_tkip_replays;
173} __packed;
174
175struct wl18xx_acx_isr_stats {
176 u32 irqs;
177} __packed;
178
179#define PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD 10
180
181struct wl18xx_acx_pwr_stats {
182 u32 missing_bcns_cnt;
183 u32 rcvd_bcns_cnt;
184 u32 connection_out_of_sync;
185 u32 cont_miss_bcns_spread[PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD];
186 u32 rcvd_awake_bcns_cnt;
187} __packed;
188
189struct wl18xx_acx_event_stats {
190 u32 calibration;
191 u32 rx_mismatch;
192 u32 rx_mem_empty;
193} __packed;
194
195struct wl18xx_acx_ps_poll_stats {
196 u32 ps_poll_timeouts;
197 u32 upsd_timeouts;
198 u32 upsd_max_ap_turn;
199 u32 ps_poll_max_ap_turn;
200 u32 ps_poll_utilization;
201 u32 upsd_utilization;
202} __packed;
203
204struct wl18xx_acx_rx_filter_stats {
205 u32 beacon_filter;
206 u32 arp_filter;
207 u32 mc_filter;
208 u32 dup_filter;
209 u32 data_filter;
210 u32 ibss_filter;
211 u32 protection_filter;
212 u32 accum_arp_pend_requests;
213 u32 max_arp_queue_dep;
214} __packed;
215
216struct wl18xx_acx_rx_rate_stats {
217 u32 rx_frames_per_rates[50];
218} __packed;
219
220#define AGGR_STATS_TX_AGG 16
221#define AGGR_STATS_TX_RATE 16
222#define AGGR_STATS_RX_SIZE_LEN 16
223
224struct wl18xx_acx_aggr_stats {
225 u32 tx_agg_vs_rate[AGGR_STATS_TX_AGG * AGGR_STATS_TX_RATE];
226 u32 rx_size[AGGR_STATS_RX_SIZE_LEN];
227} __packed;
228
229#define PIPE_STATS_HW_FIFO 11
230
231struct wl18xx_acx_pipeline_stats {
232 u32 hs_tx_stat_fifo_int;
233 u32 hs_rx_stat_fifo_int;
234 u32 tcp_tx_stat_fifo_int;
235 u32 tcp_rx_stat_fifo_int;
236 u32 enc_tx_stat_fifo_int;
237 u32 enc_rx_stat_fifo_int;
238 u32 rx_complete_stat_fifo_int;
239 u32 pre_proc_swi;
240 u32 post_proc_swi;
241 u32 sec_frag_swi;
242 u32 pre_to_defrag_swi;
243 u32 defrag_to_csum_swi;
244 u32 csum_to_rx_xfer_swi;
245 u32 dec_packet_in;
246 u32 dec_packet_in_fifo_full;
247 u32 dec_packet_out;
248 u32 cs_rx_packet_in;
249 u32 cs_rx_packet_out;
250 u16 pipeline_fifo_full[PIPE_STATS_HW_FIFO];
251} __packed;
252
253struct wl18xx_acx_mem_stats {
254 u32 rx_free_mem_blks;
255 u32 tx_free_mem_blks;
256 u32 fwlog_free_mem_blks;
257 u32 fw_gen_free_mem_blks;
258} __packed;
259
260struct wl18xx_acx_statistics {
261 struct acx_header header;
262
263 struct wl18xx_acx_error_stats error;
264 struct wl18xx_acx_debug_stats debug;
265 struct wl18xx_acx_tx_stats tx;
266 struct wl18xx_acx_rx_stats rx;
267 struct wl18xx_acx_isr_stats isr;
268 struct wl18xx_acx_pwr_stats pwr;
269 struct wl18xx_acx_ps_poll_stats ps_poll;
270 struct wl18xx_acx_rx_filter_stats rx_filter;
271 struct wl18xx_acx_rx_rate_stats rx_rate;
272 struct wl18xx_acx_aggr_stats aggr_size;
273 struct wl18xx_acx_pipeline_stats pipeline;
274 struct wl18xx_acx_mem_stats mem;
275} __packed;
276
277struct wl18xx_acx_clear_statistics {
278 struct acx_header header;
279};
280
281int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
282 u32 sdio_blk_size, u32 extra_mem_blks,
283 u32 len_field_size);
284int wl18xx_acx_set_checksum_state(struct wl1271 *wl);
285int wl18xx_acx_clear_statistics(struct wl1271 *wl);
286
287#endif /* __WL18XX_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/conf.h b/drivers/net/wireless/ti/wl18xx/conf.h
new file mode 100644
index 00000000000..fac0b7e87e7
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/conf.h
@@ -0,0 +1,92 @@
1/*
2 * This file is part of wl18xx
3 *
4 * Copyright (C) 2011 Texas Instruments Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __WL18XX_CONF_H__
23#define __WL18XX_CONF_H__
24
25#define WL18XX_CONF_MAGIC 0x10e100ca
26#define WL18XX_CONF_VERSION (WLCORE_CONF_VERSION | 0x0002)
27#define WL18XX_CONF_MASK 0x0000ffff
28#define WL18XX_CONF_SIZE (WLCORE_CONF_SIZE + \
29 sizeof(struct wl18xx_priv_conf))
30
31#define NUM_OF_CHANNELS_11_ABG 150
32#define NUM_OF_CHANNELS_11_P 7
33#define WL18XX_NUM_OF_SUB_BANDS 9
34#define SRF_TABLE_LEN 16
35#define PIN_MUXING_SIZE 2
36
37struct wl18xx_mac_and_phy_params {
38 u8 phy_standalone;
39 u8 rdl;
40 u8 enable_clpc;
41 u8 enable_tx_low_pwr_on_siso_rdl;
42 u8 auto_detect;
43 u8 dedicated_fem;
44
45 u8 low_band_component;
46
47 /* Bit 0: One Hot, Bit 1: Control Enable, Bit 2: 1.8V, Bit 3: 3V */
48 u8 low_band_component_type;
49
50 u8 high_band_component;
51
52 /* Bit 0: One Hot, Bit 1: Control Enable, Bit 2: 1.8V, Bit 3: 3V */
53 u8 high_band_component_type;
54 u8 number_of_assembled_ant2_4;
55 u8 number_of_assembled_ant5;
56 u8 pin_muxing_platform_options[PIN_MUXING_SIZE];
57 u8 external_pa_dc2dc;
58 u8 tcxo_ldo_voltage;
59 u8 xtal_itrim_val;
60 u8 srf_state;
61 u8 srf1[SRF_TABLE_LEN];
62 u8 srf2[SRF_TABLE_LEN];
63 u8 srf3[SRF_TABLE_LEN];
64 u8 io_configuration;
65 u8 sdio_configuration;
66 u8 settings;
67 u8 rx_profile;
68 u8 per_chan_pwr_limit_arr_11abg[NUM_OF_CHANNELS_11_ABG];
69 u8 pwr_limit_reference_11_abg;
70 u8 per_chan_pwr_limit_arr_11p[NUM_OF_CHANNELS_11_P];
71 u8 pwr_limit_reference_11p;
72 u8 per_sub_band_tx_trace_loss[WL18XX_NUM_OF_SUB_BANDS];
73 u8 per_sub_band_rx_trace_loss[WL18XX_NUM_OF_SUB_BANDS];
74 u8 primary_clock_setting_time;
75 u8 clock_valid_on_wake_up;
76 u8 secondary_clock_setting_time;
77 u8 board_type;
78 /* enable point saturation */
79 u8 psat;
80 /* low/medium/high Tx power in dBm */
81 s8 low_power_val;
82 s8 med_power_val;
83 s8 high_power_val;
84 u8 padding[1];
85} __packed;
86
87struct wl18xx_priv_conf {
88 /* this structure is copied wholesale to FW */
89 struct wl18xx_mac_and_phy_params phy;
90} __packed;
91
92#endif /* __WL18XX_CONF_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
new file mode 100644
index 00000000000..3ce6f1039af
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -0,0 +1,403 @@
1/*
2 * This file is part of wl18xx
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Copyright (C) 2011-2012 Texas Instruments
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 *
21 */
22
23#include "../wlcore/debugfs.h"
24#include "../wlcore/wlcore.h"
25
26#include "wl18xx.h"
27#include "acx.h"
28#include "debugfs.h"
29
30#define WL18XX_DEBUGFS_FWSTATS_FILE(a, b, c) \
31 DEBUGFS_FWSTATS_FILE(a, b, c, wl18xx_acx_statistics)
32#define WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(a, b, c) \
33 DEBUGFS_FWSTATS_FILE_ARRAY(a, b, c, wl18xx_acx_statistics)
34
35
36WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug1, "%u");
37WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug2, "%u");
38WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug3, "%u");
39WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug4, "%u");
40WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug5, "%u");
41WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug6, "%u");
42
43WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame, "%u");
44WL18XX_DEBUGFS_FWSTATS_FILE(error, error_null_Frame_tx_start, "%u");
45WL18XX_DEBUGFS_FWSTATS_FILE(error, error_numll_frame_cts_start, "%u");
46WL18XX_DEBUGFS_FWSTATS_FILE(error, error_bar_retry, "%u");
47WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_cts_nul_flid, "%u");
48
49WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_prepared_descs, "%u");
50WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cmplt, "%u");
51WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_template_prepared, "%u");
52WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_data_prepared, "%u");
53WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_template_programmed, "%u");
54WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_data_programmed, "%u");
55WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_burst_programmed, "%u");
56WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_starts, "%u");
57WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_imm_resp, "%u");
58WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_templates, "%u");
59WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_int_templates, "%u");
60WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_fw_gen, "%u");
61WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_data, "%u");
62WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_null_frame, "%u");
63WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch, "%u");
64WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_template, "%u");
65WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_data, "%u");
66WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_pending, "%u");
67WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_expiry, "%u");
68WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_template, "%u");
69WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_data, "%u");
70WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_int_template, "%u");
71WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_frame_checksum, "%u");
72WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_checksum_result, "%u");
73WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_called, "%u");
74WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_mpdu_alloc_failed, "%u");
75WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_init_called, "%u");
76WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_in_process_called, "%u");
77WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_tkip_called, "%u");
78WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_key_not_found, "%u");
79WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_need_fragmentation, "%u");
80WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_bad_mblk_num, "%u");
81WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_failed, "%u");
82WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_cache_hit, "%u");
83WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_cache_miss, "%u");
84
85WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_beacon_early_term, "%u");
86WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_out_of_mpdu_nodes, "%u");
87WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_hdr_overflow, "%u");
88WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_dropped_frame, "%u");
89WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_done, "%u");
90WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_defrag, "%u");
91WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_defrag_end, "%u");
92WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_cmplt, "%u");
93WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_pre_complt, "%u");
94WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_cmplt_task, "%u");
95WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_phy_hdr, "%u");
96WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout, "%u");
97WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout_wa, "%u");
98WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_density_dropped_frame, "%u");
99WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_ba_not_expected, "%u");
100WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_frame_checksum, "%u");
101WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_checksum_result, "%u");
102WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_called, "%u");
103WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_init_called, "%u");
104WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_in_process_called, "%u");
105WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_tkip_called, "%u");
106WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_need_defrag, "%u");
107WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_decrypt_failed, "%u");
108WL18XX_DEBUGFS_FWSTATS_FILE(rx, decrypt_key_not_found, "%u");
109WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_need_decrypt, "%u");
110WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_tkip_replays, "%u");
111
112WL18XX_DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
113
114WL18XX_DEBUGFS_FWSTATS_FILE(pwr, missing_bcns_cnt, "%u");
115WL18XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_bcns_cnt, "%u");
116WL18XX_DEBUGFS_FWSTATS_FILE(pwr, connection_out_of_sync, "%u");
117WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pwr, cont_miss_bcns_spread,
118 PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD);
119WL18XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_bcns_cnt, "%u");
120
121
122WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_timeouts, "%u");
123WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_timeouts, "%u");
124WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_max_ap_turn, "%u");
125WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_max_ap_turn, "%u");
126WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_utilization, "%u");
127WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_utilization, "%u");
128
129WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, beacon_filter, "%u");
130WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, arp_filter, "%u");
131WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, mc_filter, "%u");
132WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, dup_filter, "%u");
133WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, data_filter, "%u");
134WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, ibss_filter, "%u");
135WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u");
136WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u");
137WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
138
139WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u");
140
141WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
142 AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
143WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, rx_size,
144 AGGR_STATS_RX_SIZE_LEN);
145
146WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, hs_tx_stat_fifo_int, "%u");
147WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_tx_stat_fifo_int, "%u");
148WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_rx_stat_fifo_int, "%u");
149WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_tx_stat_fifo_int, "%u");
150WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_rx_stat_fifo_int, "%u");
151WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, rx_complete_stat_fifo_int, "%u");
152WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_proc_swi, "%u");
153WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, post_proc_swi, "%u");
154WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, sec_frag_swi, "%u");
155WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_to_defrag_swi, "%u");
156WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, defrag_to_csum_swi, "%u");
157WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, csum_to_rx_xfer_swi, "%u");
158WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in, "%u");
159WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in_fifo_full, "%u");
160WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_out, "%u");
161WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_in, "%u");
162WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_out, "%u");
163
164WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pipeline, pipeline_fifo_full,
165 PIPE_STATS_HW_FIFO);
166
167WL18XX_DEBUGFS_FWSTATS_FILE(mem, rx_free_mem_blks, "%u");
168WL18XX_DEBUGFS_FWSTATS_FILE(mem, tx_free_mem_blks, "%u");
169WL18XX_DEBUGFS_FWSTATS_FILE(mem, fwlog_free_mem_blks, "%u");
170WL18XX_DEBUGFS_FWSTATS_FILE(mem, fw_gen_free_mem_blks, "%u");
171
172static ssize_t conf_read(struct file *file, char __user *user_buf,
173 size_t count, loff_t *ppos)
174{
175 struct wl1271 *wl = file->private_data;
176 struct wl18xx_priv *priv = wl->priv;
177 struct wlcore_conf_header header;
178 char *buf, *pos;
179 size_t len;
180 int ret;
181
182 len = WL18XX_CONF_SIZE;
183 buf = kmalloc(len, GFP_KERNEL);
184 if (!buf)
185 return -ENOMEM;
186
187 header.magic = cpu_to_le32(WL18XX_CONF_MAGIC);
188 header.version = cpu_to_le32(WL18XX_CONF_VERSION);
189 header.checksum = 0;
190
191 mutex_lock(&wl->mutex);
192
193 pos = buf;
194 memcpy(pos, &header, sizeof(header));
195 pos += sizeof(header);
196 memcpy(pos, &wl->conf, sizeof(wl->conf));
197 pos += sizeof(wl->conf);
198 memcpy(pos, &priv->conf, sizeof(priv->conf));
199
200 mutex_unlock(&wl->mutex);
201
202 ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
203
204 kfree(buf);
205 return ret;
206}
207
208static const struct file_operations conf_ops = {
209 .read = conf_read,
210 .open = simple_open,
211 .llseek = default_llseek,
212};
213
214static ssize_t clear_fw_stats_write(struct file *file,
215 const char __user *user_buf,
216 size_t count, loff_t *ppos)
217{
218 struct wl1271 *wl = file->private_data;
219 int ret;
220
221 mutex_lock(&wl->mutex);
222
223 if (wl->state == WL1271_STATE_OFF)
224 goto out;
225
226 ret = wl18xx_acx_clear_statistics(wl);
227 if (ret < 0) {
228 count = ret;
229 goto out;
230 }
231out:
232 mutex_unlock(&wl->mutex);
233 return count;
234}
235
236static const struct file_operations clear_fw_stats_ops = {
237 .write = clear_fw_stats_write,
238 .open = simple_open,
239 .llseek = default_llseek,
240};
241
242int wl18xx_debugfs_add_files(struct wl1271 *wl,
243 struct dentry *rootdir)
244{
245 int ret = 0;
246 struct dentry *entry, *stats, *moddir;
247
248 moddir = debugfs_create_dir(KBUILD_MODNAME, rootdir);
249 if (!moddir || IS_ERR(moddir)) {
250 entry = moddir;
251 goto err;
252 }
253
254 stats = debugfs_create_dir("fw_stats", moddir);
255 if (!stats || IS_ERR(stats)) {
256 entry = stats;
257 goto err;
258 }
259
260 DEBUGFS_ADD(clear_fw_stats, stats);
261
262 DEBUGFS_FWSTATS_ADD(debug, debug1);
263 DEBUGFS_FWSTATS_ADD(debug, debug2);
264 DEBUGFS_FWSTATS_ADD(debug, debug3);
265 DEBUGFS_FWSTATS_ADD(debug, debug4);
266 DEBUGFS_FWSTATS_ADD(debug, debug5);
267 DEBUGFS_FWSTATS_ADD(debug, debug6);
268
269 DEBUGFS_FWSTATS_ADD(error, error_frame);
270 DEBUGFS_FWSTATS_ADD(error, error_null_Frame_tx_start);
271 DEBUGFS_FWSTATS_ADD(error, error_numll_frame_cts_start);
272 DEBUGFS_FWSTATS_ADD(error, error_bar_retry);
273 DEBUGFS_FWSTATS_ADD(error, error_frame_cts_nul_flid);
274
275 DEBUGFS_FWSTATS_ADD(tx, tx_prepared_descs);
276 DEBUGFS_FWSTATS_ADD(tx, tx_cmplt);
277 DEBUGFS_FWSTATS_ADD(tx, tx_template_prepared);
278 DEBUGFS_FWSTATS_ADD(tx, tx_data_prepared);
279 DEBUGFS_FWSTATS_ADD(tx, tx_template_programmed);
280 DEBUGFS_FWSTATS_ADD(tx, tx_data_programmed);
281 DEBUGFS_FWSTATS_ADD(tx, tx_burst_programmed);
282 DEBUGFS_FWSTATS_ADD(tx, tx_starts);
283 DEBUGFS_FWSTATS_ADD(tx, tx_imm_resp);
284 DEBUGFS_FWSTATS_ADD(tx, tx_start_templates);
285 DEBUGFS_FWSTATS_ADD(tx, tx_start_int_templates);
286 DEBUGFS_FWSTATS_ADD(tx, tx_start_fw_gen);
287 DEBUGFS_FWSTATS_ADD(tx, tx_start_data);
288 DEBUGFS_FWSTATS_ADD(tx, tx_start_null_frame);
289 DEBUGFS_FWSTATS_ADD(tx, tx_exch);
290 DEBUGFS_FWSTATS_ADD(tx, tx_retry_template);
291 DEBUGFS_FWSTATS_ADD(tx, tx_retry_data);
292 DEBUGFS_FWSTATS_ADD(tx, tx_exch_pending);
293 DEBUGFS_FWSTATS_ADD(tx, tx_exch_expiry);
294 DEBUGFS_FWSTATS_ADD(tx, tx_done_template);
295 DEBUGFS_FWSTATS_ADD(tx, tx_done_data);
296 DEBUGFS_FWSTATS_ADD(tx, tx_done_int_template);
297 DEBUGFS_FWSTATS_ADD(tx, tx_frame_checksum);
298 DEBUGFS_FWSTATS_ADD(tx, tx_checksum_result);
299 DEBUGFS_FWSTATS_ADD(tx, frag_called);
300 DEBUGFS_FWSTATS_ADD(tx, frag_mpdu_alloc_failed);
301 DEBUGFS_FWSTATS_ADD(tx, frag_init_called);
302 DEBUGFS_FWSTATS_ADD(tx, frag_in_process_called);
303 DEBUGFS_FWSTATS_ADD(tx, frag_tkip_called);
304 DEBUGFS_FWSTATS_ADD(tx, frag_key_not_found);
305 DEBUGFS_FWSTATS_ADD(tx, frag_need_fragmentation);
306 DEBUGFS_FWSTATS_ADD(tx, frag_bad_mblk_num);
307 DEBUGFS_FWSTATS_ADD(tx, frag_failed);
308 DEBUGFS_FWSTATS_ADD(tx, frag_cache_hit);
309 DEBUGFS_FWSTATS_ADD(tx, frag_cache_miss);
310
311 DEBUGFS_FWSTATS_ADD(rx, rx_beacon_early_term);
312 DEBUGFS_FWSTATS_ADD(rx, rx_out_of_mpdu_nodes);
313 DEBUGFS_FWSTATS_ADD(rx, rx_hdr_overflow);
314 DEBUGFS_FWSTATS_ADD(rx, rx_dropped_frame);
315 DEBUGFS_FWSTATS_ADD(rx, rx_done);
316 DEBUGFS_FWSTATS_ADD(rx, rx_defrag);
317 DEBUGFS_FWSTATS_ADD(rx, rx_defrag_end);
318 DEBUGFS_FWSTATS_ADD(rx, rx_cmplt);
319 DEBUGFS_FWSTATS_ADD(rx, rx_pre_complt);
320 DEBUGFS_FWSTATS_ADD(rx, rx_cmplt_task);
321 DEBUGFS_FWSTATS_ADD(rx, rx_phy_hdr);
322 DEBUGFS_FWSTATS_ADD(rx, rx_timeout);
323 DEBUGFS_FWSTATS_ADD(rx, rx_timeout_wa);
324 DEBUGFS_FWSTATS_ADD(rx, rx_wa_density_dropped_frame);
325 DEBUGFS_FWSTATS_ADD(rx, rx_wa_ba_not_expected);
326 DEBUGFS_FWSTATS_ADD(rx, rx_frame_checksum);
327 DEBUGFS_FWSTATS_ADD(rx, rx_checksum_result);
328 DEBUGFS_FWSTATS_ADD(rx, defrag_called);
329 DEBUGFS_FWSTATS_ADD(rx, defrag_init_called);
330 DEBUGFS_FWSTATS_ADD(rx, defrag_in_process_called);
331 DEBUGFS_FWSTATS_ADD(rx, defrag_tkip_called);
332 DEBUGFS_FWSTATS_ADD(rx, defrag_need_defrag);
333 DEBUGFS_FWSTATS_ADD(rx, defrag_decrypt_failed);
334 DEBUGFS_FWSTATS_ADD(rx, decrypt_key_not_found);
335 DEBUGFS_FWSTATS_ADD(rx, defrag_need_decrypt);
336 DEBUGFS_FWSTATS_ADD(rx, rx_tkip_replays);
337
338 DEBUGFS_FWSTATS_ADD(isr, irqs);
339
340 DEBUGFS_FWSTATS_ADD(pwr, missing_bcns_cnt);
341 DEBUGFS_FWSTATS_ADD(pwr, rcvd_bcns_cnt);
342 DEBUGFS_FWSTATS_ADD(pwr, connection_out_of_sync);
343 DEBUGFS_FWSTATS_ADD(pwr, cont_miss_bcns_spread);
344 DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_bcns_cnt);
345
346 DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_timeouts);
347 DEBUGFS_FWSTATS_ADD(ps_poll, upsd_timeouts);
348 DEBUGFS_FWSTATS_ADD(ps_poll, upsd_max_ap_turn);
349 DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_max_ap_turn);
350 DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_utilization);
351 DEBUGFS_FWSTATS_ADD(ps_poll, upsd_utilization);
352
353 DEBUGFS_FWSTATS_ADD(rx_filter, beacon_filter);
354 DEBUGFS_FWSTATS_ADD(rx_filter, arp_filter);
355 DEBUGFS_FWSTATS_ADD(rx_filter, mc_filter);
356 DEBUGFS_FWSTATS_ADD(rx_filter, dup_filter);
357 DEBUGFS_FWSTATS_ADD(rx_filter, data_filter);
358 DEBUGFS_FWSTATS_ADD(rx_filter, ibss_filter);
359 DEBUGFS_FWSTATS_ADD(rx_filter, protection_filter);
360 DEBUGFS_FWSTATS_ADD(rx_filter, accum_arp_pend_requests);
361 DEBUGFS_FWSTATS_ADD(rx_filter, max_arp_queue_dep);
362
363 DEBUGFS_FWSTATS_ADD(rx_rate, rx_frames_per_rates);
364
365 DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_vs_rate);
366 DEBUGFS_FWSTATS_ADD(aggr_size, rx_size);
367
368 DEBUGFS_FWSTATS_ADD(pipeline, hs_tx_stat_fifo_int);
369 DEBUGFS_FWSTATS_ADD(pipeline, tcp_tx_stat_fifo_int);
370 DEBUGFS_FWSTATS_ADD(pipeline, tcp_rx_stat_fifo_int);
371 DEBUGFS_FWSTATS_ADD(pipeline, enc_tx_stat_fifo_int);
372 DEBUGFS_FWSTATS_ADD(pipeline, enc_rx_stat_fifo_int);
373 DEBUGFS_FWSTATS_ADD(pipeline, rx_complete_stat_fifo_int);
374 DEBUGFS_FWSTATS_ADD(pipeline, pre_proc_swi);
375 DEBUGFS_FWSTATS_ADD(pipeline, post_proc_swi);
376 DEBUGFS_FWSTATS_ADD(pipeline, sec_frag_swi);
377 DEBUGFS_FWSTATS_ADD(pipeline, pre_to_defrag_swi);
378 DEBUGFS_FWSTATS_ADD(pipeline, defrag_to_csum_swi);
379 DEBUGFS_FWSTATS_ADD(pipeline, csum_to_rx_xfer_swi);
380 DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in);
381 DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in_fifo_full);
382 DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_out);
383 DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_in);
384 DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_out);
385 DEBUGFS_FWSTATS_ADD(pipeline, pipeline_fifo_full);
386
387 DEBUGFS_FWSTATS_ADD(mem, rx_free_mem_blks);
388 DEBUGFS_FWSTATS_ADD(mem, tx_free_mem_blks);
389 DEBUGFS_FWSTATS_ADD(mem, fwlog_free_mem_blks);
390 DEBUGFS_FWSTATS_ADD(mem, fw_gen_free_mem_blks);
391
392 DEBUGFS_ADD(conf, moddir);
393
394 return 0;
395
396err:
397 if (IS_ERR(entry))
398 ret = PTR_ERR(entry);
399 else
400 ret = -ENOMEM;
401
402 return ret;
403}
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.h b/drivers/net/wireless/ti/wl18xx/debugfs.h
new file mode 100644
index 00000000000..ed679bebf62
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.h
@@ -0,0 +1,28 @@
1/*
2 * This file is part of wl18xx
3 *
4 * Copyright (C) 2012 Texas Instruments. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __WL18XX_DEBUGFS_H__
23#define __WL18XX_DEBUGFS_H__
24
25int wl18xx_debugfs_add_files(struct wl1271 *wl,
26 struct dentry *rootdir);
27
28#endif /* __WL18XX_DEBUGFS_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/io.c b/drivers/net/wireless/ti/wl18xx/io.c
new file mode 100644
index 00000000000..0c06ccfd1b8
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/io.c
@@ -0,0 +1,75 @@
1/*
2 * This file is part of wl18xx
3 *
4 * Copyright (C) 2011 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#include "../wlcore/wlcore.h"
23#include "../wlcore/io.h"
24
25#include "io.h"
26
27int wl18xx_top_reg_write(struct wl1271 *wl, int addr, u16 val)
28{
29 u32 tmp;
30 int ret;
31
32 if (WARN_ON(addr % 2))
33 return -EINVAL;
34
35 if ((addr % 4) == 0) {
36 ret = wlcore_read32(wl, addr, &tmp);
37 if (ret < 0)
38 goto out;
39
40 tmp = (tmp & 0xffff0000) | val;
41 ret = wlcore_write32(wl, addr, tmp);
42 } else {
43 ret = wlcore_read32(wl, addr - 2, &tmp);
44 if (ret < 0)
45 goto out;
46
47 tmp = (tmp & 0xffff) | (val << 16);
48 ret = wlcore_write32(wl, addr - 2, tmp);
49 }
50
51out:
52 return ret;
53}
54
55int wl18xx_top_reg_read(struct wl1271 *wl, int addr, u16 *out)
56{
57 u32 val;
58 int ret;
59
60 if (WARN_ON(addr % 2))
61 return -EINVAL;
62
63 if ((addr % 4) == 0) {
64 /* address is 4-bytes aligned */
65 ret = wlcore_read32(wl, addr, &val);
66 if (ret >= 0 && out)
67 *out = val & 0xffff;
68 } else {
69 ret = wlcore_read32(wl, addr - 2, &val);
70 if (ret >= 0 && out)
71 *out = (val & 0xffff0000) >> 16;
72 }
73
74 return ret;
75}
diff --git a/drivers/net/wireless/ti/wl18xx/io.h b/drivers/net/wireless/ti/wl18xx/io.h
new file mode 100644
index 00000000000..c32ae30277d
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/io.h
@@ -0,0 +1,28 @@
1/*
2 * This file is part of wl18xx
3 *
4 * Copyright (C) 2011 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __WL18XX_IO_H__
23#define __WL18XX_IO_H__
24
25int __must_check wl18xx_top_reg_write(struct wl1271 *wl, int addr, u16 val);
26int __must_check wl18xx_top_reg_read(struct wl1271 *wl, int addr, u16 *out);
27
28#endif /* __WL18XX_IO_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
new file mode 100644
index 00000000000..5e583be8f67
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -0,0 +1,1542 @@
1/*
2 * This file is part of wl18xx
3 *
4 * Copyright (C) 2011 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#include <linux/module.h>
23#include <linux/platform_device.h>
24#include <linux/ip.h>
25#include <linux/firmware.h>
26
27#include "../wlcore/wlcore.h"
28#include "../wlcore/debug.h"
29#include "../wlcore/io.h"
30#include "../wlcore/acx.h"
31#include "../wlcore/tx.h"
32#include "../wlcore/rx.h"
33#include "../wlcore/io.h"
34#include "../wlcore/boot.h"
35
36#include "reg.h"
37#include "conf.h"
38#include "acx.h"
39#include "tx.h"
40#include "wl18xx.h"
41#include "io.h"
42#include "debugfs.h"
43
44#define WL18XX_RX_CHECKSUM_MASK 0x40
45
46static char *ht_mode_param = "default";
47static char *board_type_param = "hdk";
48static bool checksum_param = false;
49static bool enable_11a_param = true;
50static int num_rx_desc_param = -1;
51
52/* phy paramters */
53static int dc2dc_param = -1;
54static int n_antennas_2_param = -1;
55static int n_antennas_5_param = -1;
56static int low_band_component_param = -1;
57static int low_band_component_type_param = -1;
58static int high_band_component_param = -1;
59static int high_band_component_type_param = -1;
60static int pwr_limit_reference_11_abg_param = -1;
61
62static const u8 wl18xx_rate_to_idx_2ghz[] = {
63 /* MCS rates are used only with 11n */
64 15, /* WL18XX_CONF_HW_RXTX_RATE_MCS15 */
65 14, /* WL18XX_CONF_HW_RXTX_RATE_MCS14 */
66 13, /* WL18XX_CONF_HW_RXTX_RATE_MCS13 */
67 12, /* WL18XX_CONF_HW_RXTX_RATE_MCS12 */
68 11, /* WL18XX_CONF_HW_RXTX_RATE_MCS11 */
69 10, /* WL18XX_CONF_HW_RXTX_RATE_MCS10 */
70 9, /* WL18XX_CONF_HW_RXTX_RATE_MCS9 */
71 8, /* WL18XX_CONF_HW_RXTX_RATE_MCS8 */
72 7, /* WL18XX_CONF_HW_RXTX_RATE_MCS7 */
73 6, /* WL18XX_CONF_HW_RXTX_RATE_MCS6 */
74 5, /* WL18XX_CONF_HW_RXTX_RATE_MCS5 */
75 4, /* WL18XX_CONF_HW_RXTX_RATE_MCS4 */
76 3, /* WL18XX_CONF_HW_RXTX_RATE_MCS3 */
77 2, /* WL18XX_CONF_HW_RXTX_RATE_MCS2 */
78 1, /* WL18XX_CONF_HW_RXTX_RATE_MCS1 */
79 0, /* WL18XX_CONF_HW_RXTX_RATE_MCS0 */
80
81 11, /* WL18XX_CONF_HW_RXTX_RATE_54 */
82 10, /* WL18XX_CONF_HW_RXTX_RATE_48 */
83 9, /* WL18XX_CONF_HW_RXTX_RATE_36 */
84 8, /* WL18XX_CONF_HW_RXTX_RATE_24 */
85
86 /* TI-specific rate */
87 CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_22 */
88
89 7, /* WL18XX_CONF_HW_RXTX_RATE_18 */
90 6, /* WL18XX_CONF_HW_RXTX_RATE_12 */
91 3, /* WL18XX_CONF_HW_RXTX_RATE_11 */
92 5, /* WL18XX_CONF_HW_RXTX_RATE_9 */
93 4, /* WL18XX_CONF_HW_RXTX_RATE_6 */
94 2, /* WL18XX_CONF_HW_RXTX_RATE_5_5 */
95 1, /* WL18XX_CONF_HW_RXTX_RATE_2 */
96 0 /* WL18XX_CONF_HW_RXTX_RATE_1 */
97};
98
99static const u8 wl18xx_rate_to_idx_5ghz[] = {
100 /* MCS rates are used only with 11n */
101 15, /* WL18XX_CONF_HW_RXTX_RATE_MCS15 */
102 14, /* WL18XX_CONF_HW_RXTX_RATE_MCS14 */
103 13, /* WL18XX_CONF_HW_RXTX_RATE_MCS13 */
104 12, /* WL18XX_CONF_HW_RXTX_RATE_MCS12 */
105 11, /* WL18XX_CONF_HW_RXTX_RATE_MCS11 */
106 10, /* WL18XX_CONF_HW_RXTX_RATE_MCS10 */
107 9, /* WL18XX_CONF_HW_RXTX_RATE_MCS9 */
108 8, /* WL18XX_CONF_HW_RXTX_RATE_MCS8 */
109 7, /* WL18XX_CONF_HW_RXTX_RATE_MCS7 */
110 6, /* WL18XX_CONF_HW_RXTX_RATE_MCS6 */
111 5, /* WL18XX_CONF_HW_RXTX_RATE_MCS5 */
112 4, /* WL18XX_CONF_HW_RXTX_RATE_MCS4 */
113 3, /* WL18XX_CONF_HW_RXTX_RATE_MCS3 */
114 2, /* WL18XX_CONF_HW_RXTX_RATE_MCS2 */
115 1, /* WL18XX_CONF_HW_RXTX_RATE_MCS1 */
116 0, /* WL18XX_CONF_HW_RXTX_RATE_MCS0 */
117
118 7, /* WL18XX_CONF_HW_RXTX_RATE_54 */
119 6, /* WL18XX_CONF_HW_RXTX_RATE_48 */
120 5, /* WL18XX_CONF_HW_RXTX_RATE_36 */
121 4, /* WL18XX_CONF_HW_RXTX_RATE_24 */
122
123 /* TI-specific rate */
124 CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_22 */
125
126 3, /* WL18XX_CONF_HW_RXTX_RATE_18 */
127 2, /* WL18XX_CONF_HW_RXTX_RATE_12 */
128 CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_11 */
129 1, /* WL18XX_CONF_HW_RXTX_RATE_9 */
130 0, /* WL18XX_CONF_HW_RXTX_RATE_6 */
131 CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_5_5 */
132 CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_2 */
133 CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_1 */
134};
135
136static const u8 *wl18xx_band_rate_to_idx[] = {
137 [IEEE80211_BAND_2GHZ] = wl18xx_rate_to_idx_2ghz,
138 [IEEE80211_BAND_5GHZ] = wl18xx_rate_to_idx_5ghz
139};
140
141enum wl18xx_hw_rates {
142 WL18XX_CONF_HW_RXTX_RATE_MCS15 = 0,
143 WL18XX_CONF_HW_RXTX_RATE_MCS14,
144 WL18XX_CONF_HW_RXTX_RATE_MCS13,
145 WL18XX_CONF_HW_RXTX_RATE_MCS12,
146 WL18XX_CONF_HW_RXTX_RATE_MCS11,
147 WL18XX_CONF_HW_RXTX_RATE_MCS10,
148 WL18XX_CONF_HW_RXTX_RATE_MCS9,
149 WL18XX_CONF_HW_RXTX_RATE_MCS8,
150 WL18XX_CONF_HW_RXTX_RATE_MCS7,
151 WL18XX_CONF_HW_RXTX_RATE_MCS6,
152 WL18XX_CONF_HW_RXTX_RATE_MCS5,
153 WL18XX_CONF_HW_RXTX_RATE_MCS4,
154 WL18XX_CONF_HW_RXTX_RATE_MCS3,
155 WL18XX_CONF_HW_RXTX_RATE_MCS2,
156 WL18XX_CONF_HW_RXTX_RATE_MCS1,
157 WL18XX_CONF_HW_RXTX_RATE_MCS0,
158 WL18XX_CONF_HW_RXTX_RATE_54,
159 WL18XX_CONF_HW_RXTX_RATE_48,
160 WL18XX_CONF_HW_RXTX_RATE_36,
161 WL18XX_CONF_HW_RXTX_RATE_24,
162 WL18XX_CONF_HW_RXTX_RATE_22,
163 WL18XX_CONF_HW_RXTX_RATE_18,
164 WL18XX_CONF_HW_RXTX_RATE_12,
165 WL18XX_CONF_HW_RXTX_RATE_11,
166 WL18XX_CONF_HW_RXTX_RATE_9,
167 WL18XX_CONF_HW_RXTX_RATE_6,
168 WL18XX_CONF_HW_RXTX_RATE_5_5,
169 WL18XX_CONF_HW_RXTX_RATE_2,
170 WL18XX_CONF_HW_RXTX_RATE_1,
171 WL18XX_CONF_HW_RXTX_RATE_MAX,
172};
173
174static struct wlcore_conf wl18xx_conf = {
175 .sg = {
176 .params = {
177 [CONF_SG_ACL_BT_MASTER_MIN_BR] = 10,
178 [CONF_SG_ACL_BT_MASTER_MAX_BR] = 180,
179 [CONF_SG_ACL_BT_SLAVE_MIN_BR] = 10,
180 [CONF_SG_ACL_BT_SLAVE_MAX_BR] = 180,
181 [CONF_SG_ACL_BT_MASTER_MIN_EDR] = 10,
182 [CONF_SG_ACL_BT_MASTER_MAX_EDR] = 80,
183 [CONF_SG_ACL_BT_SLAVE_MIN_EDR] = 10,
184 [CONF_SG_ACL_BT_SLAVE_MAX_EDR] = 80,
185 [CONF_SG_ACL_WLAN_PS_MASTER_BR] = 8,
186 [CONF_SG_ACL_WLAN_PS_SLAVE_BR] = 8,
187 [CONF_SG_ACL_WLAN_PS_MASTER_EDR] = 20,
188 [CONF_SG_ACL_WLAN_PS_SLAVE_EDR] = 20,
189 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR] = 20,
190 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR] = 35,
191 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR] = 16,
192 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR] = 35,
193 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR] = 32,
194 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR] = 50,
195 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR] = 28,
196 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR] = 50,
197 [CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR] = 10,
198 [CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR] = 20,
199 [CONF_SG_ACL_PASSIVE_SCAN_BT_BR] = 75,
200 [CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR] = 15,
201 [CONF_SG_ACL_PASSIVE_SCAN_BT_EDR] = 27,
202 [CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR] = 17,
203 /* active scan params */
204 [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
205 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
206 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
207 /* passive scan params */
208 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR] = 800,
209 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR] = 200,
210 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
211 /* passive scan in dual antenna params */
212 [CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0,
213 [CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN] = 0,
214 [CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN] = 0,
215 /* general params */
216 [CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1,
217 [CONF_SG_ANTENNA_CONFIGURATION] = 0,
218 [CONF_SG_BEACON_MISS_PERCENT] = 60,
219 [CONF_SG_DHCP_TIME] = 5000,
220 [CONF_SG_RXT] = 1200,
221 [CONF_SG_TXT] = 1000,
222 [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
223 [CONF_SG_GENERAL_USAGE_BIT_MAP] = 3,
224 [CONF_SG_HV3_MAX_SERVED] = 6,
225 [CONF_SG_PS_POLL_TIMEOUT] = 10,
226 [CONF_SG_UPSD_TIMEOUT] = 10,
227 [CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2,
228 [CONF_SG_STA_RX_WINDOW_AFTER_DTIM] = 5,
229 [CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 30,
230 /* AP params */
231 [CONF_AP_BEACON_MISS_TX] = 3,
232 [CONF_AP_RX_WINDOW_AFTER_BEACON] = 10,
233 [CONF_AP_BEACON_WINDOW_INTERVAL] = 2,
234 [CONF_AP_CONNECTION_PROTECTION_TIME] = 0,
235 [CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25,
236 [CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25,
237 /* CTS Diluting params */
238 [CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH] = 0,
239 [CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER] = 0,
240 },
241 .state = CONF_SG_PROTECTIVE,
242 },
243 .rx = {
244 .rx_msdu_life_time = 512000,
245 .packet_detection_threshold = 0,
246 .ps_poll_timeout = 15,
247 .upsd_timeout = 15,
248 .rts_threshold = IEEE80211_MAX_RTS_THRESHOLD,
249 .rx_cca_threshold = 0,
250 .irq_blk_threshold = 0xFFFF,
251 .irq_pkt_threshold = 0,
252 .irq_timeout = 600,
253 .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
254 },
255 .tx = {
256 .tx_energy_detection = 0,
257 .sta_rc_conf = {
258 .enabled_rates = 0,
259 .short_retry_limit = 10,
260 .long_retry_limit = 10,
261 .aflags = 0,
262 },
263 .ac_conf_count = 4,
264 .ac_conf = {
265 [CONF_TX_AC_BE] = {
266 .ac = CONF_TX_AC_BE,
267 .cw_min = 15,
268 .cw_max = 63,
269 .aifsn = 3,
270 .tx_op_limit = 0,
271 },
272 [CONF_TX_AC_BK] = {
273 .ac = CONF_TX_AC_BK,
274 .cw_min = 15,
275 .cw_max = 63,
276 .aifsn = 7,
277 .tx_op_limit = 0,
278 },
279 [CONF_TX_AC_VI] = {
280 .ac = CONF_TX_AC_VI,
281 .cw_min = 15,
282 .cw_max = 63,
283 .aifsn = CONF_TX_AIFS_PIFS,
284 .tx_op_limit = 3008,
285 },
286 [CONF_TX_AC_VO] = {
287 .ac = CONF_TX_AC_VO,
288 .cw_min = 15,
289 .cw_max = 63,
290 .aifsn = CONF_TX_AIFS_PIFS,
291 .tx_op_limit = 1504,
292 },
293 },
294 .max_tx_retries = 100,
295 .ap_aging_period = 300,
296 .tid_conf_count = 4,
297 .tid_conf = {
298 [CONF_TX_AC_BE] = {
299 .queue_id = CONF_TX_AC_BE,
300 .channel_type = CONF_CHANNEL_TYPE_EDCF,
301 .tsid = CONF_TX_AC_BE,
302 .ps_scheme = CONF_PS_SCHEME_LEGACY,
303 .ack_policy = CONF_ACK_POLICY_LEGACY,
304 .apsd_conf = {0, 0},
305 },
306 [CONF_TX_AC_BK] = {
307 .queue_id = CONF_TX_AC_BK,
308 .channel_type = CONF_CHANNEL_TYPE_EDCF,
309 .tsid = CONF_TX_AC_BK,
310 .ps_scheme = CONF_PS_SCHEME_LEGACY,
311 .ack_policy = CONF_ACK_POLICY_LEGACY,
312 .apsd_conf = {0, 0},
313 },
314 [CONF_TX_AC_VI] = {
315 .queue_id = CONF_TX_AC_VI,
316 .channel_type = CONF_CHANNEL_TYPE_EDCF,
317 .tsid = CONF_TX_AC_VI,
318 .ps_scheme = CONF_PS_SCHEME_LEGACY,
319 .ack_policy = CONF_ACK_POLICY_LEGACY,
320 .apsd_conf = {0, 0},
321 },
322 [CONF_TX_AC_VO] = {
323 .queue_id = CONF_TX_AC_VO,
324 .channel_type = CONF_CHANNEL_TYPE_EDCF,
325 .tsid = CONF_TX_AC_VO,
326 .ps_scheme = CONF_PS_SCHEME_LEGACY,
327 .ack_policy = CONF_ACK_POLICY_LEGACY,
328 .apsd_conf = {0, 0},
329 },
330 },
331 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
332 .tx_compl_timeout = 350,
333 .tx_compl_threshold = 10,
334 .basic_rate = CONF_HW_BIT_RATE_1MBPS,
335 .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
336 .tmpl_short_retry_limit = 10,
337 .tmpl_long_retry_limit = 10,
338 .tx_watchdog_timeout = 5000,
339 },
340 .conn = {
341 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
342 .listen_interval = 1,
343 .suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM,
344 .suspend_listen_interval = 3,
345 .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
346 .bcn_filt_ie_count = 3,
347 .bcn_filt_ie = {
348 [0] = {
349 .ie = WLAN_EID_CHANNEL_SWITCH,
350 .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
351 },
352 [1] = {
353 .ie = WLAN_EID_HT_OPERATION,
354 .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
355 },
356 [2] = {
357 .ie = WLAN_EID_ERP_INFO,
358 .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
359 },
360 },
361 .synch_fail_thold = 12,
362 .bss_lose_timeout = 400,
363 .beacon_rx_timeout = 10000,
364 .broadcast_timeout = 20000,
365 .rx_broadcast_in_ps = 1,
366 .ps_poll_threshold = 10,
367 .bet_enable = CONF_BET_MODE_ENABLE,
368 .bet_max_consecutive = 50,
369 .psm_entry_retries = 8,
370 .psm_exit_retries = 16,
371 .psm_entry_nullfunc_retries = 3,
372 .dynamic_ps_timeout = 200,
373 .forced_ps = false,
374 .keep_alive_interval = 55000,
375 .max_listen_interval = 20,
376 .sta_sleep_auth = WL1271_PSM_ILLEGAL,
377 },
378 .itrim = {
379 .enable = false,
380 .timeout = 50000,
381 },
382 .pm_config = {
383 .host_clk_settling_time = 5000,
384 .host_fast_wakeup_support = CONF_FAST_WAKEUP_DISABLE,
385 },
386 .roam_trigger = {
387 .trigger_pacing = 1,
388 .avg_weight_rssi_beacon = 20,
389 .avg_weight_rssi_data = 10,
390 .avg_weight_snr_beacon = 20,
391 .avg_weight_snr_data = 10,
392 },
393 .scan = {
394 .min_dwell_time_active = 7500,
395 .max_dwell_time_active = 30000,
396 .min_dwell_time_passive = 100000,
397 .max_dwell_time_passive = 100000,
398 .num_probe_reqs = 2,
399 .split_scan_timeout = 50000,
400 },
401 .sched_scan = {
402 /*
403 * Values are in TU/1000 but since sched scan FW command
404 * params are in TUs rounding up may occur.
405 */
406 .base_dwell_time = 7500,
407 .max_dwell_time_delta = 22500,
408 /* based on 250bits per probe @1Mbps */
409 .dwell_time_delta_per_probe = 2000,
410 /* based on 250bits per probe @6Mbps (plus a bit more) */
411 .dwell_time_delta_per_probe_5 = 350,
412 .dwell_time_passive = 100000,
413 .dwell_time_dfs = 150000,
414 .num_probe_reqs = 2,
415 .rssi_threshold = -90,
416 .snr_threshold = 0,
417 },
418 .ht = {
419 .rx_ba_win_size = 10,
420 .tx_ba_win_size = 64,
421 .inactivity_timeout = 10000,
422 .tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP,
423 },
424 .mem = {
425 .num_stations = 1,
426 .ssid_profiles = 1,
427 .rx_block_num = 40,
428 .tx_min_block_num = 40,
429 .dynamic_memory = 1,
430 .min_req_tx_blocks = 45,
431 .min_req_rx_blocks = 22,
432 .tx_min = 27,
433 },
434 .fm_coex = {
435 .enable = true,
436 .swallow_period = 5,
437 .n_divider_fref_set_1 = 0xff, /* default */
438 .n_divider_fref_set_2 = 12,
439 .m_divider_fref_set_1 = 0xffff,
440 .m_divider_fref_set_2 = 148, /* default */
441 .coex_pll_stabilization_time = 0xffffffff, /* default */
442 .ldo_stabilization_time = 0xffff, /* default */
443 .fm_disturbed_band_margin = 0xff, /* default */
444 .swallow_clk_diff = 0xff, /* default */
445 },
446 .rx_streaming = {
447 .duration = 150,
448 .queues = 0x1,
449 .interval = 20,
450 .always = 0,
451 },
452 .fwlog = {
453 .mode = WL12XX_FWLOG_ON_DEMAND,
454 .mem_blocks = 2,
455 .severity = 0,
456 .timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED,
457 .output = WL12XX_FWLOG_OUTPUT_HOST,
458 .threshold = 0,
459 },
460 .rate = {
461 .rate_retry_score = 32000,
462 .per_add = 8192,
463 .per_th1 = 2048,
464 .per_th2 = 4096,
465 .max_per = 8100,
466 .inverse_curiosity_factor = 5,
467 .tx_fail_low_th = 4,
468 .tx_fail_high_th = 10,
469 .per_alpha_shift = 4,
470 .per_add_shift = 13,
471 .per_beta1_shift = 10,
472 .per_beta2_shift = 8,
473 .rate_check_up = 2,
474 .rate_check_down = 12,
475 .rate_retry_policy = {
476 0x00, 0x00, 0x00, 0x00, 0x00,
477 0x00, 0x00, 0x00, 0x00, 0x00,
478 0x00, 0x00, 0x00,
479 },
480 },
481 .hangover = {
482 .recover_time = 0,
483 .hangover_period = 20,
484 .dynamic_mode = 1,
485 .early_termination_mode = 1,
486 .max_period = 20,
487 .min_period = 1,
488 .increase_delta = 1,
489 .decrease_delta = 2,
490 .quiet_time = 4,
491 .increase_time = 1,
492 .window_size = 16,
493 },
494};
495
496static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
497 .phy = {
498 .phy_standalone = 0x00,
499 .primary_clock_setting_time = 0x05,
500 .clock_valid_on_wake_up = 0x00,
501 .secondary_clock_setting_time = 0x05,
502 .rdl = 0x01,
503 .auto_detect = 0x00,
504 .dedicated_fem = FEM_NONE,
505 .low_band_component = COMPONENT_2_WAY_SWITCH,
506 .low_band_component_type = 0x05,
507 .high_band_component = COMPONENT_2_WAY_SWITCH,
508 .high_band_component_type = 0x09,
509 .tcxo_ldo_voltage = 0x00,
510 .xtal_itrim_val = 0x04,
511 .srf_state = 0x00,
512 .io_configuration = 0x01,
513 .sdio_configuration = 0x00,
514 .settings = 0x00,
515 .enable_clpc = 0x00,
516 .enable_tx_low_pwr_on_siso_rdl = 0x00,
517 .rx_profile = 0x00,
518 .pwr_limit_reference_11_abg = 0xc8,
519 .psat = 0,
520 .low_power_val = 0x00,
521 .med_power_val = 0x0a,
522 .high_power_val = 0x1e,
523 .external_pa_dc2dc = 0,
524 .number_of_assembled_ant2_4 = 1,
525 .number_of_assembled_ant5 = 1,
526 },
527};
528
529static const struct wlcore_partition_set wl18xx_ptable[PART_TABLE_LEN] = {
530 [PART_TOP_PRCM_ELP_SOC] = {
531 .mem = { .start = 0x00A02000, .size = 0x00010000 },
532 .reg = { .start = 0x00807000, .size = 0x00005000 },
533 .mem2 = { .start = 0x00800000, .size = 0x0000B000 },
534 .mem3 = { .start = 0x00000000, .size = 0x00000000 },
535 },
536 [PART_DOWN] = {
537 .mem = { .start = 0x00000000, .size = 0x00014000 },
538 .reg = { .start = 0x00810000, .size = 0x0000BFFF },
539 .mem2 = { .start = 0x00000000, .size = 0x00000000 },
540 .mem3 = { .start = 0x00000000, .size = 0x00000000 },
541 },
542 [PART_BOOT] = {
543 .mem = { .start = 0x00700000, .size = 0x0000030c },
544 .reg = { .start = 0x00802000, .size = 0x00014578 },
545 .mem2 = { .start = 0x00B00404, .size = 0x00001000 },
546 .mem3 = { .start = 0x00C00000, .size = 0x00000400 },
547 },
548 [PART_WORK] = {
549 .mem = { .start = 0x00800000, .size = 0x000050FC },
550 .reg = { .start = 0x00B00404, .size = 0x00001000 },
551 .mem2 = { .start = 0x00C00000, .size = 0x00000400 },
552 .mem3 = { .start = 0x00000000, .size = 0x00000000 },
553 },
554 [PART_PHY_INIT] = {
555 .mem = { .start = 0x80926000,
556 .size = sizeof(struct wl18xx_mac_and_phy_params) },
557 .reg = { .start = 0x00000000, .size = 0x00000000 },
558 .mem2 = { .start = 0x00000000, .size = 0x00000000 },
559 .mem3 = { .start = 0x00000000, .size = 0x00000000 },
560 },
561};
562
563static const int wl18xx_rtable[REG_TABLE_LEN] = {
564 [REG_ECPU_CONTROL] = WL18XX_REG_ECPU_CONTROL,
565 [REG_INTERRUPT_NO_CLEAR] = WL18XX_REG_INTERRUPT_NO_CLEAR,
566 [REG_INTERRUPT_ACK] = WL18XX_REG_INTERRUPT_ACK,
567 [REG_COMMAND_MAILBOX_PTR] = WL18XX_REG_COMMAND_MAILBOX_PTR,
568 [REG_EVENT_MAILBOX_PTR] = WL18XX_REG_EVENT_MAILBOX_PTR,
569 [REG_INTERRUPT_TRIG] = WL18XX_REG_INTERRUPT_TRIG_H,
570 [REG_INTERRUPT_MASK] = WL18XX_REG_INTERRUPT_MASK,
571 [REG_PC_ON_RECOVERY] = WL18XX_SCR_PAD4,
572 [REG_CHIP_ID_B] = WL18XX_REG_CHIP_ID_B,
573 [REG_CMD_MBOX_ADDRESS] = WL18XX_CMD_MBOX_ADDRESS,
574
575 /* data access memory addresses, used with partition translation */
576 [REG_SLV_MEM_DATA] = WL18XX_SLV_MEM_DATA,
577 [REG_SLV_REG_DATA] = WL18XX_SLV_REG_DATA,
578
579 /* raw data access memory addresses */
580 [REG_RAW_FW_STATUS_ADDR] = WL18XX_FW_STATUS_ADDR,
581};
582
583static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
584 [CLOCK_CONFIG_16_2_M] = { 7, 104, 801, 4, true },
585 [CLOCK_CONFIG_16_368_M] = { 9, 132, 3751, 4, true },
586 [CLOCK_CONFIG_16_8_M] = { 7, 100, 0, 0, false },
587 [CLOCK_CONFIG_19_2_M] = { 8, 100, 0, 0, false },
588 [CLOCK_CONFIG_26_M] = { 13, 120, 0, 0, false },
589 [CLOCK_CONFIG_32_736_M] = { 9, 132, 3751, 4, true },
590 [CLOCK_CONFIG_33_6_M] = { 7, 100, 0, 0, false },
591 [CLOCK_CONFIG_38_468_M] = { 8, 100, 0, 0, false },
592 [CLOCK_CONFIG_52_M] = { 13, 120, 0, 0, false },
593};
594
595/* TODO: maybe move to a new header file? */
596#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw.bin"
597
598static int wl18xx_identify_chip(struct wl1271 *wl)
599{
600 int ret = 0;
601
602 switch (wl->chip.id) {
603 case CHIP_ID_185x_PG20:
604 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (185x PG20)",
605 wl->chip.id);
606 wl->sr_fw_name = WL18XX_FW_NAME;
607 /* wl18xx uses the same firmware for PLT */
608 wl->plt_fw_name = WL18XX_FW_NAME;
609 wl->quirks |= WLCORE_QUIRK_NO_ELP |
610 WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN |
611 WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN |
612 WLCORE_QUIRK_TX_PAD_LAST_FRAME;
613 break;
614 case CHIP_ID_185x_PG10:
615 wl1271_warning("chip id 0x%x (185x PG10) is deprecated",
616 wl->chip.id);
617 ret = -ENODEV;
618 goto out;
619
620 default:
621 wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
622 ret = -ENODEV;
623 goto out;
624 }
625
626out:
627 return ret;
628}
629
630static int wl18xx_set_clk(struct wl1271 *wl)
631{
632 u16 clk_freq;
633 int ret;
634
635 ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
636 if (ret < 0)
637 goto out;
638
639 /* TODO: PG2: apparently we need to read the clk type */
640
641 ret = wl18xx_top_reg_read(wl, PRIMARY_CLK_DETECT, &clk_freq);
642 if (ret < 0)
643 goto out;
644
645 wl1271_debug(DEBUG_BOOT, "clock freq %d (%d, %d, %d, %d, %s)", clk_freq,
646 wl18xx_clk_table[clk_freq].n, wl18xx_clk_table[clk_freq].m,
647 wl18xx_clk_table[clk_freq].p, wl18xx_clk_table[clk_freq].q,
648 wl18xx_clk_table[clk_freq].swallow ? "swallow" : "spit");
649
650 ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_N,
651 wl18xx_clk_table[clk_freq].n);
652 if (ret < 0)
653 goto out;
654
655 ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_M,
656 wl18xx_clk_table[clk_freq].m);
657 if (ret < 0)
658 goto out;
659
660 if (wl18xx_clk_table[clk_freq].swallow) {
661 /* first the 16 lower bits */
662 ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_Q_FACTOR_CFG_1,
663 wl18xx_clk_table[clk_freq].q &
664 PLLSH_WCS_PLL_Q_FACTOR_CFG_1_MASK);
665 if (ret < 0)
666 goto out;
667
668 /* then the 16 higher bits, masked out */
669 ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_Q_FACTOR_CFG_2,
670 (wl18xx_clk_table[clk_freq].q >> 16) &
671 PLLSH_WCS_PLL_Q_FACTOR_CFG_2_MASK);
672 if (ret < 0)
673 goto out;
674
675 /* first the 16 lower bits */
676 ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_P_FACTOR_CFG_1,
677 wl18xx_clk_table[clk_freq].p &
678 PLLSH_WCS_PLL_P_FACTOR_CFG_1_MASK);
679 if (ret < 0)
680 goto out;
681
682 /* then the 16 higher bits, masked out */
683 ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_P_FACTOR_CFG_2,
684 (wl18xx_clk_table[clk_freq].p >> 16) &
685 PLLSH_WCS_PLL_P_FACTOR_CFG_2_MASK);
686 } else {
687 ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_SWALLOW_EN,
688 PLLSH_WCS_PLL_SWALLOW_EN_VAL2);
689 }
690
691out:
692 return ret;
693}
694
695static int wl18xx_boot_soft_reset(struct wl1271 *wl)
696{
697 int ret;
698
699 /* disable Rx/Tx */
700 ret = wlcore_write32(wl, WL18XX_ENABLE, 0x0);
701 if (ret < 0)
702 goto out;
703
704 /* disable auto calibration on start*/
705 ret = wlcore_write32(wl, WL18XX_SPARE_A2, 0xffff);
706
707out:
708 return ret;
709}
710
711static int wl18xx_pre_boot(struct wl1271 *wl)
712{
713 int ret;
714
715 ret = wl18xx_set_clk(wl);
716 if (ret < 0)
717 goto out;
718
719 /* Continue the ELP wake up sequence */
720 ret = wlcore_write32(wl, WL18XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
721 if (ret < 0)
722 goto out;
723
724 udelay(500);
725
726 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
727 if (ret < 0)
728 goto out;
729
730 /* Disable interrupts */
731 ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
732 if (ret < 0)
733 goto out;
734
735 ret = wl18xx_boot_soft_reset(wl);
736
737out:
738 return ret;
739}
740
741static int wl18xx_pre_upload(struct wl1271 *wl)
742{
743 u32 tmp;
744 int ret;
745
746 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
747 if (ret < 0)
748 goto out;
749
750 /* TODO: check if this is all needed */
751 ret = wlcore_write32(wl, WL18XX_EEPROMLESS_IND, WL18XX_EEPROMLESS_IND);
752 if (ret < 0)
753 goto out;
754
755 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &tmp);
756 if (ret < 0)
757 goto out;
758
759 wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
760
761 ret = wlcore_read32(wl, WL18XX_SCR_PAD2, &tmp);
762
763out:
764 return ret;
765}
766
767static int wl18xx_set_mac_and_phy(struct wl1271 *wl)
768{
769 struct wl18xx_priv *priv = wl->priv;
770 int ret;
771
772 ret = wlcore_set_partition(wl, &wl->ptable[PART_PHY_INIT]);
773 if (ret < 0)
774 goto out;
775
776 ret = wlcore_write(wl, WL18XX_PHY_INIT_MEM_ADDR, (u8 *)&priv->conf.phy,
777 sizeof(struct wl18xx_mac_and_phy_params), false);
778
779out:
780 return ret;
781}
782
783static int wl18xx_enable_interrupts(struct wl1271 *wl)
784{
785 u32 event_mask, intr_mask;
786 int ret;
787
788 event_mask = WL18XX_ACX_EVENTS_VECTOR;
789 intr_mask = WL18XX_INTR_MASK;
790
791 ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK, event_mask);
792 if (ret < 0)
793 goto out;
794
795 wlcore_enable_interrupts(wl);
796
797 ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
798 WL1271_ACX_INTR_ALL & ~intr_mask);
799
800out:
801 return ret;
802}
803
804static int wl18xx_boot(struct wl1271 *wl)
805{
806 int ret;
807
808 ret = wl18xx_pre_boot(wl);
809 if (ret < 0)
810 goto out;
811
812 ret = wl18xx_pre_upload(wl);
813 if (ret < 0)
814 goto out;
815
816 ret = wlcore_boot_upload_firmware(wl);
817 if (ret < 0)
818 goto out;
819
820 ret = wl18xx_set_mac_and_phy(wl);
821 if (ret < 0)
822 goto out;
823
824 ret = wlcore_boot_run_firmware(wl);
825 if (ret < 0)
826 goto out;
827
828 ret = wl18xx_enable_interrupts(wl);
829
830out:
831 return ret;
832}
833
834static int wl18xx_trigger_cmd(struct wl1271 *wl, int cmd_box_addr,
835 void *buf, size_t len)
836{
837 struct wl18xx_priv *priv = wl->priv;
838
839 memcpy(priv->cmd_buf, buf, len);
840 memset(priv->cmd_buf + len, 0, WL18XX_CMD_MAX_SIZE - len);
841
842 return wlcore_write(wl, cmd_box_addr, priv->cmd_buf,
843 WL18XX_CMD_MAX_SIZE, false);
844}
845
846static int wl18xx_ack_event(struct wl1271 *wl)
847{
848 return wlcore_write_reg(wl, REG_INTERRUPT_TRIG,
849 WL18XX_INTR_TRIG_EVENT_ACK);
850}
851
852static u32 wl18xx_calc_tx_blocks(struct wl1271 *wl, u32 len, u32 spare_blks)
853{
854 u32 blk_size = WL18XX_TX_HW_BLOCK_SIZE;
855 return (len + blk_size - 1) / blk_size + spare_blks;
856}
857
858static void
859wl18xx_set_tx_desc_blocks(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
860 u32 blks, u32 spare_blks)
861{
862 desc->wl18xx_mem.total_mem_blocks = blks;
863}
864
865static void
866wl18xx_set_tx_desc_data_len(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
867 struct sk_buff *skb)
868{
869 desc->length = cpu_to_le16(skb->len);
870
871 /* if only the last frame is to be padded, we unset this bit on Tx */
872 if (wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME)
873 desc->wl18xx_mem.ctrl = WL18XX_TX_CTRL_NOT_PADDED;
874 else
875 desc->wl18xx_mem.ctrl = 0;
876
877 wl1271_debug(DEBUG_TX, "tx_fill_hdr: hlid: %d "
878 "len: %d life: %d mem: %d", desc->hlid,
879 le16_to_cpu(desc->length),
880 le16_to_cpu(desc->life_time),
881 desc->wl18xx_mem.total_mem_blocks);
882}
883
884static enum wl_rx_buf_align
885wl18xx_get_rx_buf_align(struct wl1271 *wl, u32 rx_desc)
886{
887 if (rx_desc & RX_BUF_PADDED_PAYLOAD)
888 return WLCORE_RX_BUF_PADDED;
889
890 return WLCORE_RX_BUF_ALIGNED;
891}
892
893static u32 wl18xx_get_rx_packet_len(struct wl1271 *wl, void *rx_data,
894 u32 data_len)
895{
896 struct wl1271_rx_descriptor *desc = rx_data;
897
898 /* invalid packet */
899 if (data_len < sizeof(*desc))
900 return 0;
901
902 return data_len - sizeof(*desc);
903}
904
905static void wl18xx_tx_immediate_completion(struct wl1271 *wl)
906{
907 wl18xx_tx_immediate_complete(wl);
908}
909
910static int wl18xx_set_host_cfg_bitmap(struct wl1271 *wl, u32 extra_mem_blk)
911{
912 int ret;
913 u32 sdio_align_size = 0;
914 u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE |
915 HOST_IF_CFG_ADD_RX_ALIGNMENT;
916
917 /* Enable Tx SDIO padding */
918 if (wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN) {
919 host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK;
920 sdio_align_size = WL12XX_BUS_BLOCK_SIZE;
921 }
922
923 /* Enable Rx SDIO padding */
924 if (wl->quirks & WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN) {
925 host_cfg_bitmap |= HOST_IF_CFG_RX_PAD_TO_SDIO_BLK;
926 sdio_align_size = WL12XX_BUS_BLOCK_SIZE;
927 }
928
929 ret = wl18xx_acx_host_if_cfg_bitmap(wl, host_cfg_bitmap,
930 sdio_align_size, extra_mem_blk,
931 WL18XX_HOST_IF_LEN_SIZE_FIELD);
932 if (ret < 0)
933 return ret;
934
935 return 0;
936}
937
938static int wl18xx_hw_init(struct wl1271 *wl)
939{
940 int ret;
941 struct wl18xx_priv *priv = wl->priv;
942
943 /* (re)init private structures. Relevant on recovery as well. */
944 priv->last_fw_rls_idx = 0;
945 priv->extra_spare_vif_count = 0;
946
947 /* set the default amount of spare blocks in the bitmap */
948 ret = wl18xx_set_host_cfg_bitmap(wl, WL18XX_TX_HW_BLOCK_SPARE);
949 if (ret < 0)
950 return ret;
951
952 if (checksum_param) {
953 ret = wl18xx_acx_set_checksum_state(wl);
954 if (ret != 0)
955 return ret;
956 }
957
958 return ret;
959}
960
961static void wl18xx_set_tx_desc_csum(struct wl1271 *wl,
962 struct wl1271_tx_hw_descr *desc,
963 struct sk_buff *skb)
964{
965 u32 ip_hdr_offset;
966 struct iphdr *ip_hdr;
967
968 if (!checksum_param) {
969 desc->wl18xx_checksum_data = 0;
970 return;
971 }
972
973 if (skb->ip_summed != CHECKSUM_PARTIAL) {
974 desc->wl18xx_checksum_data = 0;
975 return;
976 }
977
978 ip_hdr_offset = skb_network_header(skb) - skb_mac_header(skb);
979 if (WARN_ON(ip_hdr_offset >= (1<<7))) {
980 desc->wl18xx_checksum_data = 0;
981 return;
982 }
983
984 desc->wl18xx_checksum_data = ip_hdr_offset << 1;
985
986 /* FW is interested only in the LSB of the protocol TCP=0 UDP=1 */
987 ip_hdr = (void *)skb_network_header(skb);
988 desc->wl18xx_checksum_data |= (ip_hdr->protocol & 0x01);
989}
990
991static void wl18xx_set_rx_csum(struct wl1271 *wl,
992 struct wl1271_rx_descriptor *desc,
993 struct sk_buff *skb)
994{
995 if (desc->status & WL18XX_RX_CHECKSUM_MASK)
996 skb->ip_summed = CHECKSUM_UNNECESSARY;
997}
998
999/*
1000 * TODO: instead of having these two functions to get the rate mask,
1001 * we should modify the wlvif->rate_set instead
1002 */
1003static u32 wl18xx_sta_get_ap_rate_mask(struct wl1271 *wl,
1004 struct wl12xx_vif *wlvif)
1005{
1006 u32 hw_rate_set = wlvif->rate_set;
1007
1008 if (wlvif->channel_type == NL80211_CHAN_HT40MINUS ||
1009 wlvif->channel_type == NL80211_CHAN_HT40PLUS) {
1010 wl1271_debug(DEBUG_ACX, "using wide channel rate mask");
1011 hw_rate_set |= CONF_TX_RATE_USE_WIDE_CHAN;
1012
1013 /* we don't support MIMO in wide-channel mode */
1014 hw_rate_set &= ~CONF_TX_MIMO_RATES;
1015 }
1016
1017 return hw_rate_set;
1018}
1019
1020static u32 wl18xx_ap_get_mimo_wide_rate_mask(struct wl1271 *wl,
1021 struct wl12xx_vif *wlvif)
1022{
1023 if ((wlvif->channel_type == NL80211_CHAN_HT40MINUS ||
1024 wlvif->channel_type == NL80211_CHAN_HT40PLUS) &&
1025 !strcmp(ht_mode_param, "wide")) {
1026 wl1271_debug(DEBUG_ACX, "using wide channel rate mask");
1027 return CONF_TX_RATE_USE_WIDE_CHAN;
1028 } else if (!strcmp(ht_mode_param, "mimo")) {
1029 wl1271_debug(DEBUG_ACX, "using MIMO rate mask");
1030
1031 return CONF_TX_MIMO_RATES;
1032 } else {
1033 return 0;
1034 }
1035}
1036
1037static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
1038{
1039 u32 fuse;
1040 int ret;
1041
1042 ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
1043 if (ret < 0)
1044 goto out;
1045
1046 ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_1_3, &fuse);
1047 if (ret < 0)
1048 goto out;
1049
1050 if (ver)
1051 *ver = (fuse & WL18XX_PG_VER_MASK) >> WL18XX_PG_VER_OFFSET;
1052
1053 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1054
1055out:
1056 return ret;
1057}
1058
1059#define WL18XX_CONF_FILE_NAME "ti-connectivity/wl18xx-conf.bin"
1060static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
1061{
1062 struct wl18xx_priv *priv = wl->priv;
1063 struct wlcore_conf_file *conf_file;
1064 const struct firmware *fw;
1065 int ret;
1066
1067 ret = request_firmware(&fw, WL18XX_CONF_FILE_NAME, dev);
1068 if (ret < 0) {
1069 wl1271_error("could not get configuration binary %s: %d",
1070 WL18XX_CONF_FILE_NAME, ret);
1071 goto out_fallback;
1072 }
1073
1074 if (fw->size != WL18XX_CONF_SIZE) {
1075 wl1271_error("configuration binary file size is wrong, expected %zu got %zu",
1076 WL18XX_CONF_SIZE, fw->size);
1077 ret = -EINVAL;
1078 goto out;
1079 }
1080
1081 conf_file = (struct wlcore_conf_file *) fw->data;
1082
1083 if (conf_file->header.magic != cpu_to_le32(WL18XX_CONF_MAGIC)) {
1084 wl1271_error("configuration binary file magic number mismatch, "
1085 "expected 0x%0x got 0x%0x", WL18XX_CONF_MAGIC,
1086 conf_file->header.magic);
1087 ret = -EINVAL;
1088 goto out;
1089 }
1090
1091 if (conf_file->header.version != cpu_to_le32(WL18XX_CONF_VERSION)) {
1092 wl1271_error("configuration binary file version not supported, "
1093 "expected 0x%08x got 0x%08x",
1094 WL18XX_CONF_VERSION, conf_file->header.version);
1095 ret = -EINVAL;
1096 goto out;
1097 }
1098
1099 memcpy(&wl->conf, &conf_file->core, sizeof(wl18xx_conf));
1100 memcpy(&priv->conf, &conf_file->priv, sizeof(priv->conf));
1101
1102 goto out;
1103
1104out_fallback:
1105 wl1271_warning("falling back to default config");
1106
1107 /* apply driver default configuration */
1108 memcpy(&wl->conf, &wl18xx_conf, sizeof(wl18xx_conf));
1109 /* apply default private configuration */
1110 memcpy(&priv->conf, &wl18xx_default_priv_conf, sizeof(priv->conf));
1111
1112 /* For now we just fallback */
1113 return 0;
1114
1115out:
1116 release_firmware(fw);
1117 return ret;
1118}
1119
1120static int wl18xx_plt_init(struct wl1271 *wl)
1121{
1122 int ret;
1123
1124 ret = wlcore_write32(wl, WL18XX_SCR_PAD8, WL18XX_SCR_PAD8_PLT);
1125 if (ret < 0)
1126 return ret;
1127
1128 return wl->ops->boot(wl);
1129}
1130
1131static int wl18xx_get_mac(struct wl1271 *wl)
1132{
1133 u32 mac1, mac2;
1134 int ret;
1135
1136 ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
1137 if (ret < 0)
1138 goto out;
1139
1140 ret = wlcore_read32(wl, WL18XX_REG_FUSE_BD_ADDR_1, &mac1);
1141 if (ret < 0)
1142 goto out;
1143
1144 ret = wlcore_read32(wl, WL18XX_REG_FUSE_BD_ADDR_2, &mac2);
1145 if (ret < 0)
1146 goto out;
1147
1148 /* these are the two parts of the BD_ADDR */
1149 wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) +
1150 ((mac1 & 0xff000000) >> 24);
1151 wl->fuse_nic_addr = (mac1 & 0xffffff);
1152
1153 ret = wlcore_set_partition(wl, &wl->ptable[PART_DOWN]);
1154
1155out:
1156 return ret;
1157}
1158
1159static int wl18xx_handle_static_data(struct wl1271 *wl,
1160 struct wl1271_static_data *static_data)
1161{
1162 struct wl18xx_static_data_priv *static_data_priv =
1163 (struct wl18xx_static_data_priv *) static_data->priv;
1164
1165 wl1271_info("PHY firmware version: %s", static_data_priv->phy_version);
1166
1167 return 0;
1168}
1169
1170static int wl18xx_get_spare_blocks(struct wl1271 *wl, bool is_gem)
1171{
1172 struct wl18xx_priv *priv = wl->priv;
1173
1174 /* If we have VIFs requiring extra spare, indulge them */
1175 if (priv->extra_spare_vif_count)
1176 return WL18XX_TX_HW_EXTRA_BLOCK_SPARE;
1177
1178 return WL18XX_TX_HW_BLOCK_SPARE;
1179}
1180
1181static int wl18xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
1182 struct ieee80211_vif *vif,
1183 struct ieee80211_sta *sta,
1184 struct ieee80211_key_conf *key_conf)
1185{
1186 struct wl18xx_priv *priv = wl->priv;
1187 bool change_spare = false;
1188 int ret;
1189
1190 /*
1191 * when adding the first or removing the last GEM/TKIP interface,
1192 * we have to adjust the number of spare blocks.
1193 */
1194 change_spare = (key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
1195 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP) &&
1196 ((priv->extra_spare_vif_count == 0 && cmd == SET_KEY) ||
1197 (priv->extra_spare_vif_count == 1 && cmd == DISABLE_KEY));
1198
1199 /* no need to change spare - just regular set_key */
1200 if (!change_spare)
1201 return wlcore_set_key(wl, cmd, vif, sta, key_conf);
1202
1203 /*
1204 * stop the queues and flush to ensure the next packets are
1205 * in sync with FW spare block accounting
1206 */
1207 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
1208 wl1271_tx_flush(wl);
1209
1210 ret = wlcore_set_key(wl, cmd, vif, sta, key_conf);
1211 if (ret < 0)
1212 goto out;
1213
1214 /* key is now set, change the spare blocks */
1215 if (cmd == SET_KEY) {
1216 ret = wl18xx_set_host_cfg_bitmap(wl,
1217 WL18XX_TX_HW_EXTRA_BLOCK_SPARE);
1218 if (ret < 0)
1219 goto out;
1220
1221 priv->extra_spare_vif_count++;
1222 } else {
1223 ret = wl18xx_set_host_cfg_bitmap(wl,
1224 WL18XX_TX_HW_BLOCK_SPARE);
1225 if (ret < 0)
1226 goto out;
1227
1228 priv->extra_spare_vif_count--;
1229 }
1230
1231out:
1232 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
1233 return ret;
1234}
1235
1236static u32 wl18xx_pre_pkt_send(struct wl1271 *wl,
1237 u32 buf_offset, u32 last_len)
1238{
1239 if (wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) {
1240 struct wl1271_tx_hw_descr *last_desc;
1241
1242 /* get the last TX HW descriptor written to the aggr buf */
1243 last_desc = (struct wl1271_tx_hw_descr *)(wl->aggr_buf +
1244 buf_offset - last_len);
1245
1246 /* the last frame is padded up to an SDIO block */
1247 last_desc->wl18xx_mem.ctrl &= ~WL18XX_TX_CTRL_NOT_PADDED;
1248 return ALIGN(buf_offset, WL12XX_BUS_BLOCK_SIZE);
1249 }
1250
1251 /* no modifications */
1252 return buf_offset;
1253}
1254
1255static struct wlcore_ops wl18xx_ops = {
1256 .identify_chip = wl18xx_identify_chip,
1257 .boot = wl18xx_boot,
1258 .plt_init = wl18xx_plt_init,
1259 .trigger_cmd = wl18xx_trigger_cmd,
1260 .ack_event = wl18xx_ack_event,
1261 .calc_tx_blocks = wl18xx_calc_tx_blocks,
1262 .set_tx_desc_blocks = wl18xx_set_tx_desc_blocks,
1263 .set_tx_desc_data_len = wl18xx_set_tx_desc_data_len,
1264 .get_rx_buf_align = wl18xx_get_rx_buf_align,
1265 .get_rx_packet_len = wl18xx_get_rx_packet_len,
1266 .tx_immediate_compl = wl18xx_tx_immediate_completion,
1267 .tx_delayed_compl = NULL,
1268 .hw_init = wl18xx_hw_init,
1269 .set_tx_desc_csum = wl18xx_set_tx_desc_csum,
1270 .get_pg_ver = wl18xx_get_pg_ver,
1271 .set_rx_csum = wl18xx_set_rx_csum,
1272 .sta_get_ap_rate_mask = wl18xx_sta_get_ap_rate_mask,
1273 .ap_get_mimo_wide_rate_mask = wl18xx_ap_get_mimo_wide_rate_mask,
1274 .get_mac = wl18xx_get_mac,
1275 .debugfs_init = wl18xx_debugfs_add_files,
1276 .handle_static_data = wl18xx_handle_static_data,
1277 .get_spare_blocks = wl18xx_get_spare_blocks,
1278 .set_key = wl18xx_set_key,
1279 .pre_pkt_send = wl18xx_pre_pkt_send,
1280};
1281
1282/* HT cap appropriate for wide channels in 2Ghz */
1283static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_2ghz = {
1284 .cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 |
1285 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_DSSSCCK40,
1286 .ht_supported = true,
1287 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
1288 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
1289 .mcs = {
1290 .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
1291 .rx_highest = cpu_to_le16(150),
1292 .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
1293 },
1294};
1295
1296/* HT cap appropriate for wide channels in 5Ghz */
1297static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_5ghz = {
1298 .cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 |
1299 IEEE80211_HT_CAP_SUP_WIDTH_20_40,
1300 .ht_supported = true,
1301 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
1302 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
1303 .mcs = {
1304 .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
1305 .rx_highest = cpu_to_le16(150),
1306 .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
1307 },
1308};
1309
1310/* HT cap appropriate for SISO 20 */
1311static struct ieee80211_sta_ht_cap wl18xx_siso20_ht_cap = {
1312 .cap = IEEE80211_HT_CAP_SGI_20,
1313 .ht_supported = true,
1314 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
1315 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
1316 .mcs = {
1317 .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
1318 .rx_highest = cpu_to_le16(72),
1319 .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
1320 },
1321};
1322
1323/* HT cap appropriate for MIMO rates in 20mhz channel */
1324static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = {
1325 .cap = IEEE80211_HT_CAP_SGI_20,
1326 .ht_supported = true,
1327 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
1328 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
1329 .mcs = {
1330 .rx_mask = { 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, },
1331 .rx_highest = cpu_to_le16(144),
1332 .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
1333 },
1334};
1335
1336static int __devinit wl18xx_probe(struct platform_device *pdev)
1337{
1338 struct wl1271 *wl;
1339 struct ieee80211_hw *hw;
1340 struct wl18xx_priv *priv;
1341 int ret;
1342
1343 hw = wlcore_alloc_hw(sizeof(*priv));
1344 if (IS_ERR(hw)) {
1345 wl1271_error("can't allocate hw");
1346 ret = PTR_ERR(hw);
1347 goto out;
1348 }
1349
1350 wl = hw->priv;
1351 priv = wl->priv;
1352 wl->ops = &wl18xx_ops;
1353 wl->ptable = wl18xx_ptable;
1354 wl->rtable = wl18xx_rtable;
1355 wl->num_tx_desc = 32;
1356 wl->num_rx_desc = 32;
1357 wl->band_rate_to_idx = wl18xx_band_rate_to_idx;
1358 wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX;
1359 wl->hw_min_ht_rate = WL18XX_CONF_HW_RXTX_RATE_MCS0;
1360 wl->fw_status_priv_len = sizeof(struct wl18xx_fw_status_priv);
1361 wl->stats.fw_stats_len = sizeof(struct wl18xx_acx_statistics);
1362 wl->static_data_priv_len = sizeof(struct wl18xx_static_data_priv);
1363
1364 if (num_rx_desc_param != -1)
1365 wl->num_rx_desc = num_rx_desc_param;
1366
1367 ret = wl18xx_conf_init(wl, &pdev->dev);
1368 if (ret < 0)
1369 goto out_free;
1370
1371 if (!strcmp(board_type_param, "fpga")) {
1372 priv->conf.phy.board_type = BOARD_TYPE_FPGA_18XX;
1373 } else if (!strcmp(board_type_param, "hdk")) {
1374 priv->conf.phy.board_type = BOARD_TYPE_HDK_18XX;
1375 /* HACK! Just for now we hardcode HDK to 0x06 */
1376 priv->conf.phy.low_band_component_type = 0x06;
1377 } else if (!strcmp(board_type_param, "dvp")) {
1378 priv->conf.phy.board_type = BOARD_TYPE_DVP_18XX;
1379 } else if (!strcmp(board_type_param, "evb")) {
1380 priv->conf.phy.board_type = BOARD_TYPE_EVB_18XX;
1381 } else if (!strcmp(board_type_param, "com8")) {
1382 priv->conf.phy.board_type = BOARD_TYPE_COM8_18XX;
1383 /* HACK! Just for now we hardcode COM8 to 0x06 */
1384 priv->conf.phy.low_band_component_type = 0x06;
1385 } else {
1386 wl1271_error("invalid board type '%s'", board_type_param);
1387 ret = -EINVAL;
1388 goto out_free;
1389 }
1390
1391 /* If the module param is set, update it in conf */
1392 if (low_band_component_param != -1)
1393 priv->conf.phy.low_band_component = low_band_component_param;
1394 if (low_band_component_type_param != -1)
1395 priv->conf.phy.low_band_component_type =
1396 low_band_component_type_param;
1397 if (high_band_component_param != -1)
1398 priv->conf.phy.high_band_component = high_band_component_param;
1399 if (high_band_component_type_param != -1)
1400 priv->conf.phy.high_band_component_type =
1401 high_band_component_type_param;
1402 if (pwr_limit_reference_11_abg_param != -1)
1403 priv->conf.phy.pwr_limit_reference_11_abg =
1404 pwr_limit_reference_11_abg_param;
1405 if (n_antennas_2_param != -1)
1406 priv->conf.phy.number_of_assembled_ant2_4 = n_antennas_2_param;
1407 if (n_antennas_5_param != -1)
1408 priv->conf.phy.number_of_assembled_ant5 = n_antennas_5_param;
1409 if (dc2dc_param != -1)
1410 priv->conf.phy.external_pa_dc2dc = dc2dc_param;
1411
1412 if (!strcmp(ht_mode_param, "default")) {
1413 /*
1414 * Only support mimo with multiple antennas. Fall back to
1415 * siso20.
1416 */
1417 if (priv->conf.phy.number_of_assembled_ant2_4 >= 2)
1418 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
1419 &wl18xx_mimo_ht_cap_2ghz);
1420 else
1421 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
1422 &wl18xx_siso20_ht_cap);
1423
1424 /* 5Ghz is always wide */
1425 wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
1426 &wl18xx_siso40_ht_cap_5ghz);
1427 } else if (!strcmp(ht_mode_param, "wide")) {
1428 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
1429 &wl18xx_siso40_ht_cap_2ghz);
1430 wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
1431 &wl18xx_siso40_ht_cap_5ghz);
1432 } else if (!strcmp(ht_mode_param, "siso20")) {
1433 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
1434 &wl18xx_siso20_ht_cap);
1435 wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
1436 &wl18xx_siso20_ht_cap);
1437 } else {
1438 wl1271_error("invalid ht_mode '%s'", ht_mode_param);
1439 ret = -EINVAL;
1440 goto out_free;
1441 }
1442
1443 if (!checksum_param) {
1444 wl18xx_ops.set_rx_csum = NULL;
1445 wl18xx_ops.init_vif = NULL;
1446 }
1447
1448 wl->enable_11a = enable_11a_param;
1449
1450 return wlcore_probe(wl, pdev);
1451
1452out_free:
1453 wlcore_free_hw(wl);
1454out:
1455 return ret;
1456}
1457
1458static const struct platform_device_id wl18xx_id_table[] __devinitconst = {
1459 { "wl18xx", 0 },
1460 { } /* Terminating Entry */
1461};
1462MODULE_DEVICE_TABLE(platform, wl18xx_id_table);
1463
1464static struct platform_driver wl18xx_driver = {
1465 .probe = wl18xx_probe,
1466 .remove = __devexit_p(wlcore_remove),
1467 .id_table = wl18xx_id_table,
1468 .driver = {
1469 .name = "wl18xx_driver",
1470 .owner = THIS_MODULE,
1471 }
1472};
1473
1474static int __init wl18xx_init(void)
1475{
1476 return platform_driver_register(&wl18xx_driver);
1477}
1478module_init(wl18xx_init);
1479
1480static void __exit wl18xx_exit(void)
1481{
1482 platform_driver_unregister(&wl18xx_driver);
1483}
1484module_exit(wl18xx_exit);
1485
1486module_param_named(ht_mode, ht_mode_param, charp, S_IRUSR);
1487MODULE_PARM_DESC(ht_mode, "Force HT mode: wide or siso20");
1488
1489module_param_named(board_type, board_type_param, charp, S_IRUSR);
1490MODULE_PARM_DESC(board_type, "Board type: fpga, hdk (default), evb, com8 or "
1491 "dvp");
1492
1493module_param_named(checksum, checksum_param, bool, S_IRUSR);
1494MODULE_PARM_DESC(checksum, "Enable TCP checksum: boolean (defaults to false)");
1495
1496module_param_named(enable_11a, enable_11a_param, bool, S_IRUSR);
1497MODULE_PARM_DESC(enable_11a, "Enable 11a (5GHz): boolean (defaults to true)");
1498
1499module_param_named(dc2dc, dc2dc_param, int, S_IRUSR);
1500MODULE_PARM_DESC(dc2dc, "External DC2DC: u8 (defaults to 0)");
1501
1502module_param_named(n_antennas_2, n_antennas_2_param, int, S_IRUSR);
1503MODULE_PARM_DESC(n_antennas_2,
1504 "Number of installed 2.4GHz antennas: 1 (default) or 2");
1505
1506module_param_named(n_antennas_5, n_antennas_5_param, int, S_IRUSR);
1507MODULE_PARM_DESC(n_antennas_5,
1508 "Number of installed 5GHz antennas: 1 (default) or 2");
1509
1510module_param_named(low_band_component, low_band_component_param, int,
1511 S_IRUSR);
1512MODULE_PARM_DESC(low_band_component, "Low band component: u8 "
1513 "(default is 0x01)");
1514
1515module_param_named(low_band_component_type, low_band_component_type_param,
1516 int, S_IRUSR);
1517MODULE_PARM_DESC(low_band_component_type, "Low band component type: u8 "
1518 "(default is 0x05 or 0x06 depending on the board_type)");
1519
1520module_param_named(high_band_component, high_band_component_param, int,
1521 S_IRUSR);
1522MODULE_PARM_DESC(high_band_component, "High band component: u8, "
1523 "(default is 0x01)");
1524
1525module_param_named(high_band_component_type, high_band_component_type_param,
1526 int, S_IRUSR);
1527MODULE_PARM_DESC(high_band_component_type, "High band component type: u8 "
1528 "(default is 0x09)");
1529
1530module_param_named(pwr_limit_reference_11_abg,
1531 pwr_limit_reference_11_abg_param, int, S_IRUSR);
1532MODULE_PARM_DESC(pwr_limit_reference_11_abg, "Power limit reference: u8 "
1533 "(default is 0xc8)");
1534
1535module_param_named(num_rx_desc,
1536 num_rx_desc_param, int, S_IRUSR);
1537MODULE_PARM_DESC(num_rx_desc_param,
1538 "Number of Rx descriptors: u8 (default is 32)");
1539
1540MODULE_LICENSE("GPL v2");
1541MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
1542MODULE_FIRMWARE(WL18XX_FW_NAME);
diff --git a/drivers/net/wireless/ti/wl18xx/reg.h b/drivers/net/wireless/ti/wl18xx/reg.h
new file mode 100644
index 00000000000..937b71d8783
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/reg.h
@@ -0,0 +1,191 @@
1/*
2 * This file is part of wlcore
3 *
4 * Copyright (C) 2011 Texas Instruments Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __REG_H__
23#define __REG_H__
24
25#define WL18XX_REGISTERS_BASE 0x00800000
26#define WL18XX_CODE_BASE 0x00000000
27#define WL18XX_DATA_BASE 0x00400000
28#define WL18XX_DOUBLE_BUFFER_BASE 0x00600000
29#define WL18XX_MCU_KEY_SEARCH_BASE 0x00700000
30#define WL18XX_PHY_BASE 0x00900000
31#define WL18XX_TOP_OCP_BASE 0x00A00000
32#define WL18XX_PACKET_RAM_BASE 0x00B00000
33#define WL18XX_HOST_BASE 0x00C00000
34
35#define WL18XX_REGISTERS_DOWN_SIZE 0x0000B000
36
37#define WL18XX_REG_BOOT_PART_START 0x00802000
38#define WL18XX_REG_BOOT_PART_SIZE 0x00014578
39
40#define WL18XX_PHY_INIT_MEM_ADDR 0x80926000
41
42#define WL18XX_SDIO_WSPI_BASE (WL18XX_REGISTERS_BASE)
43#define WL18XX_REG_CONFIG_BASE (WL18XX_REGISTERS_BASE + 0x02000)
44#define WL18XX_WGCM_REGS_BASE (WL18XX_REGISTERS_BASE + 0x03000)
45#define WL18XX_ENC_BASE (WL18XX_REGISTERS_BASE + 0x04000)
46#define WL18XX_INTERRUPT_BASE (WL18XX_REGISTERS_BASE + 0x05000)
47#define WL18XX_UART_BASE (WL18XX_REGISTERS_BASE + 0x06000)
48#define WL18XX_WELP_BASE (WL18XX_REGISTERS_BASE + 0x07000)
49#define WL18XX_TCP_CKSM_BASE (WL18XX_REGISTERS_BASE + 0x08000)
50#define WL18XX_FIFO_BASE (WL18XX_REGISTERS_BASE + 0x09000)
51#define WL18XX_OCP_BRIDGE_BASE (WL18XX_REGISTERS_BASE + 0x0A000)
52#define WL18XX_PMAC_RX_BASE (WL18XX_REGISTERS_BASE + 0x14800)
53#define WL18XX_PMAC_ACM_BASE (WL18XX_REGISTERS_BASE + 0x14C00)
54#define WL18XX_PMAC_TX_BASE (WL18XX_REGISTERS_BASE + 0x15000)
55#define WL18XX_PMAC_CSR_BASE (WL18XX_REGISTERS_BASE + 0x15400)
56
57#define WL18XX_REG_ECPU_CONTROL (WL18XX_REGISTERS_BASE + 0x02004)
58#define WL18XX_REG_INTERRUPT_NO_CLEAR (WL18XX_REGISTERS_BASE + 0x050E8)
59#define WL18XX_REG_INTERRUPT_ACK (WL18XX_REGISTERS_BASE + 0x050F0)
60#define WL18XX_REG_INTERRUPT_TRIG (WL18XX_REGISTERS_BASE + 0x5074)
61#define WL18XX_REG_INTERRUPT_TRIG_H (WL18XX_REGISTERS_BASE + 0x5078)
62#define WL18XX_REG_INTERRUPT_MASK (WL18XX_REGISTERS_BASE + 0x0050DC)
63
64#define WL18XX_REG_CHIP_ID_B (WL18XX_REGISTERS_BASE + 0x01542C)
65
66#define WL18XX_SLV_MEM_DATA (WL18XX_HOST_BASE + 0x0018)
67#define WL18XX_SLV_REG_DATA (WL18XX_HOST_BASE + 0x0008)
68
69/* Scratch Pad registers*/
70#define WL18XX_SCR_PAD0 (WL18XX_REGISTERS_BASE + 0x0154EC)
71#define WL18XX_SCR_PAD1 (WL18XX_REGISTERS_BASE + 0x0154F0)
72#define WL18XX_SCR_PAD2 (WL18XX_REGISTERS_BASE + 0x0154F4)
73#define WL18XX_SCR_PAD3 (WL18XX_REGISTERS_BASE + 0x0154F8)
74#define WL18XX_SCR_PAD4 (WL18XX_REGISTERS_BASE + 0x0154FC)
75#define WL18XX_SCR_PAD4_SET (WL18XX_REGISTERS_BASE + 0x015504)
76#define WL18XX_SCR_PAD4_CLR (WL18XX_REGISTERS_BASE + 0x015500)
77#define WL18XX_SCR_PAD5 (WL18XX_REGISTERS_BASE + 0x015508)
78#define WL18XX_SCR_PAD5_SET (WL18XX_REGISTERS_BASE + 0x015510)
79#define WL18XX_SCR_PAD5_CLR (WL18XX_REGISTERS_BASE + 0x01550C)
80#define WL18XX_SCR_PAD6 (WL18XX_REGISTERS_BASE + 0x015514)
81#define WL18XX_SCR_PAD7 (WL18XX_REGISTERS_BASE + 0x015518)
82#define WL18XX_SCR_PAD8 (WL18XX_REGISTERS_BASE + 0x01551C)
83#define WL18XX_SCR_PAD9 (WL18XX_REGISTERS_BASE + 0x015520)
84
85/* Spare registers*/
86#define WL18XX_SPARE_A1 (WL18XX_REGISTERS_BASE + 0x002194)
87#define WL18XX_SPARE_A2 (WL18XX_REGISTERS_BASE + 0x002198)
88#define WL18XX_SPARE_A3 (WL18XX_REGISTERS_BASE + 0x00219C)
89#define WL18XX_SPARE_A4 (WL18XX_REGISTERS_BASE + 0x0021A0)
90#define WL18XX_SPARE_A5 (WL18XX_REGISTERS_BASE + 0x0021A4)
91#define WL18XX_SPARE_A6 (WL18XX_REGISTERS_BASE + 0x0021A8)
92#define WL18XX_SPARE_A7 (WL18XX_REGISTERS_BASE + 0x0021AC)
93#define WL18XX_SPARE_A8 (WL18XX_REGISTERS_BASE + 0x0021B0)
94#define WL18XX_SPARE_B1 (WL18XX_REGISTERS_BASE + 0x015524)
95#define WL18XX_SPARE_B2 (WL18XX_REGISTERS_BASE + 0x015528)
96#define WL18XX_SPARE_B3 (WL18XX_REGISTERS_BASE + 0x01552C)
97#define WL18XX_SPARE_B4 (WL18XX_REGISTERS_BASE + 0x015530)
98#define WL18XX_SPARE_B5 (WL18XX_REGISTERS_BASE + 0x015534)
99#define WL18XX_SPARE_B6 (WL18XX_REGISTERS_BASE + 0x015538)
100#define WL18XX_SPARE_B7 (WL18XX_REGISTERS_BASE + 0x01553C)
101#define WL18XX_SPARE_B8 (WL18XX_REGISTERS_BASE + 0x015540)
102
103#define WL18XX_REG_COMMAND_MAILBOX_PTR (WL18XX_SCR_PAD0)
104#define WL18XX_REG_EVENT_MAILBOX_PTR (WL18XX_SCR_PAD1)
105#define WL18XX_EEPROMLESS_IND (WL18XX_SCR_PAD4)
106
107#define WL18XX_WELP_ARM_COMMAND (WL18XX_REGISTERS_BASE + 0x7100)
108#define WL18XX_ENABLE (WL18XX_REGISTERS_BASE + 0x01543C)
109
110/* PRCM registers */
111#define PLATFORM_DETECTION 0xA0E3E0
112#define OCS_EN 0xA02080
113#define PRIMARY_CLK_DETECT 0xA020A6
114#define PLLSH_WCS_PLL_N 0xA02362
115#define PLLSH_WCS_PLL_M 0xA02360
116#define PLLSH_WCS_PLL_Q_FACTOR_CFG_1 0xA02364
117#define PLLSH_WCS_PLL_Q_FACTOR_CFG_2 0xA02366
118#define PLLSH_WCS_PLL_P_FACTOR_CFG_1 0xA02368
119#define PLLSH_WCS_PLL_P_FACTOR_CFG_2 0xA0236A
120#define PLLSH_WCS_PLL_SWALLOW_EN 0xA0236C
121#define PLLSH_WL_PLL_EN 0xA02392
122
123#define PLLSH_WCS_PLL_Q_FACTOR_CFG_1_MASK 0xFFFF
124#define PLLSH_WCS_PLL_Q_FACTOR_CFG_2_MASK 0x007F
125#define PLLSH_WCS_PLL_P_FACTOR_CFG_1_MASK 0xFFFF
126#define PLLSH_WCS_PLL_P_FACTOR_CFG_2_MASK 0x000F
127
128#define PLLSH_WCS_PLL_SWALLOW_EN_VAL1 0x1
129#define PLLSH_WCS_PLL_SWALLOW_EN_VAL2 0x12
130
131#define WL18XX_REG_FUSE_DATA_1_3 0xA0260C
132#define WL18XX_PG_VER_MASK 0x70
133#define WL18XX_PG_VER_OFFSET 4
134
135#define WL18XX_REG_FUSE_BD_ADDR_1 0xA02602
136#define WL18XX_REG_FUSE_BD_ADDR_2 0xA02606
137
138#define WL18XX_CMD_MBOX_ADDRESS 0xB007B4
139
140#define WL18XX_FW_STATUS_ADDR 0x50F8
141
142#define CHIP_ID_185x_PG10 (0x06030101)
143#define CHIP_ID_185x_PG20 (0x06030111)
144
145/*
146 * Host Command Interrupt. Setting this bit masks
147 * the interrupt that the host issues to inform
148 * the FW that it has sent a command
149 * to the Wlan hardware Command Mailbox.
150 */
151#define WL18XX_INTR_TRIG_CMD BIT(28)
152
153/*
154 * Host Event Acknowlegde Interrupt. The host
155 * sets this bit to acknowledge that it received
156 * the unsolicited information from the event
157 * mailbox.
158 */
159#define WL18XX_INTR_TRIG_EVENT_ACK BIT(29)
160
161/*
162 * To boot the firmware in PLT mode we need to write this value in
163 * SCR_PAD8 before starting.
164 */
165#define WL18XX_SCR_PAD8_PLT 0xBABABEBE
166
167enum {
168 COMPONENT_NO_SWITCH = 0x0,
169 COMPONENT_2_WAY_SWITCH = 0x1,
170 COMPONENT_3_WAY_SWITCH = 0x2,
171 COMPONENT_MATCHING = 0x3,
172};
173
174enum {
175 FEM_NONE = 0x0,
176 FEM_VENDOR_1 = 0x1,
177 FEM_VENDOR_2 = 0x2,
178 FEM_VENDOR_3 = 0x3,
179};
180
181enum {
182 BOARD_TYPE_EVB_18XX = 0,
183 BOARD_TYPE_DVP_18XX = 1,
184 BOARD_TYPE_HDK_18XX = 2,
185 BOARD_TYPE_FPGA_18XX = 3,
186 BOARD_TYPE_COM8_18XX = 4,
187
188 NUM_BOARD_TYPES,
189};
190
191#endif /* __REG_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c
new file mode 100644
index 00000000000..5b1fb10d9fd
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/tx.c
@@ -0,0 +1,127 @@
1/*
2 * This file is part of wl18xx
3 *
4 * Copyright (C) 2011 Texas Instruments Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#include "../wlcore/wlcore.h"
23#include "../wlcore/cmd.h"
24#include "../wlcore/debug.h"
25#include "../wlcore/acx.h"
26#include "../wlcore/tx.h"
27
28#include "wl18xx.h"
29#include "tx.h"
30
31static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
32{
33 struct ieee80211_tx_info *info;
34 struct sk_buff *skb;
35 int id = tx_stat_byte & WL18XX_TX_STATUS_DESC_ID_MASK;
36 bool tx_success;
37
38 /* check for id legality */
39 if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
40 wl1271_warning("illegal id in tx completion: %d", id);
41 return;
42 }
43
44 /* a zero bit indicates Tx success */
45 tx_success = !(tx_stat_byte & BIT(WL18XX_TX_STATUS_STAT_BIT_IDX));
46
47
48 skb = wl->tx_frames[id];
49 info = IEEE80211_SKB_CB(skb);
50
51 if (wl12xx_is_dummy_packet(wl, skb)) {
52 wl1271_free_tx_id(wl, id);
53 return;
54 }
55
56 /* update the TX status info */
57 if (tx_success && !(info->flags & IEEE80211_TX_CTL_NO_ACK))
58 info->flags |= IEEE80211_TX_STAT_ACK;
59
60 /* no real data about Tx completion */
61 info->status.rates[0].idx = -1;
62 info->status.rates[0].count = 0;
63 info->status.rates[0].flags = 0;
64 info->status.ack_signal = -1;
65
66 if (!tx_success)
67 wl->stats.retry_count++;
68
69 /*
70 * TODO: update sequence number for encryption? seems to be
71 * unsupported for now. needed for recovery with encryption.
72 */
73
74 /* remove private header from packet */
75 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
76
77 /* remove TKIP header space if present */
78 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
79 info->control.hw_key &&
80 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
81 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
82 memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data, hdrlen);
83 skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
84 }
85
86 wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p success %d",
87 id, skb, tx_success);
88
89 /* return the packet to the stack */
90 skb_queue_tail(&wl->deferred_tx_queue, skb);
91 queue_work(wl->freezable_wq, &wl->netstack_work);
92 wl1271_free_tx_id(wl, id);
93}
94
95void wl18xx_tx_immediate_complete(struct wl1271 *wl)
96{
97 struct wl18xx_fw_status_priv *status_priv =
98 (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
99 struct wl18xx_priv *priv = wl->priv;
100 u8 i;
101
102 /* nothing to do here */
103 if (priv->last_fw_rls_idx == status_priv->fw_release_idx)
104 return;
105
106 /* freed Tx descriptors */
107 wl1271_debug(DEBUG_TX, "last released desc = %d, current idx = %d",
108 priv->last_fw_rls_idx, status_priv->fw_release_idx);
109
110 if (status_priv->fw_release_idx >= WL18XX_FW_MAX_TX_STATUS_DESC) {
111 wl1271_error("invalid desc release index %d",
112 status_priv->fw_release_idx);
113 WARN_ON(1);
114 return;
115 }
116
117 for (i = priv->last_fw_rls_idx;
118 i != status_priv->fw_release_idx;
119 i = (i + 1) % WL18XX_FW_MAX_TX_STATUS_DESC) {
120 wl18xx_tx_complete_packet(wl,
121 status_priv->released_tx_desc[i]);
122
123 wl->tx_results_count++;
124 }
125
126 priv->last_fw_rls_idx = status_priv->fw_release_idx;
127}
diff --git a/drivers/net/wireless/ti/wl18xx/tx.h b/drivers/net/wireless/ti/wl18xx/tx.h
new file mode 100644
index 00000000000..ccddc548e44
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/tx.h
@@ -0,0 +1,46 @@
1/*
2 * This file is part of wl18xx
3 *
4 * Copyright (C) 2011 Texas Instruments. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __WL18XX_TX_H__
23#define __WL18XX_TX_H__
24
25#include "../wlcore/wlcore.h"
26
27#define WL18XX_TX_HW_BLOCK_SPARE 1
28/* for special cases - namely, TKIP and GEM */
29#define WL18XX_TX_HW_EXTRA_BLOCK_SPARE 2
30#define WL18XX_TX_HW_BLOCK_SIZE 268
31
32#define WL18XX_TX_STATUS_DESC_ID_MASK 0x7F
33#define WL18XX_TX_STATUS_STAT_BIT_IDX 7
34
35/* Indicates this TX HW frame is not padded to SDIO block size */
36#define WL18XX_TX_CTRL_NOT_PADDED BIT(7)
37
38/*
39 * The FW uses a special bit to indicate a wide channel should be used in
40 * the rate policy.
41 */
42#define CONF_TX_RATE_USE_WIDE_CHAN BIT(31)
43
44void wl18xx_tx_immediate_complete(struct wl1271 *wl);
45
46#endif /* __WL12XX_TX_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h
new file mode 100644
index 00000000000..bc67a475061
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h
@@ -0,0 +1,88 @@
1/*
2 * This file is part of wl18xx
3 *
4 * Copyright (C) 2011 Texas Instruments Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __WL18XX_PRIV_H__
23#define __WL18XX_PRIV_H__
24
25#include "conf.h"
26
27#define WL18XX_CMD_MAX_SIZE 740
28
29struct wl18xx_priv {
30 /* buffer for sending commands to FW */
31 u8 cmd_buf[WL18XX_CMD_MAX_SIZE];
32
33 struct wl18xx_priv_conf conf;
34
35 /* Index of last released Tx desc in FW */
36 u8 last_fw_rls_idx;
37
38 /* number of VIFs requiring extra spare mem-blocks */
39 int extra_spare_vif_count;
40};
41
42#define WL18XX_FW_MAX_TX_STATUS_DESC 33
43
44struct wl18xx_fw_status_priv {
45 /*
46 * Index in released_tx_desc for first byte that holds
47 * released tx host desc
48 */
49 u8 fw_release_idx;
50
51 /*
52 * Array of host Tx descriptors, where fw_release_idx
53 * indicated the first released idx.
54 */
55 u8 released_tx_desc[WL18XX_FW_MAX_TX_STATUS_DESC];
56
57 u8 padding[2];
58};
59
60#define WL18XX_PHY_VERSION_MAX_LEN 20
61
62struct wl18xx_static_data_priv {
63 char phy_version[WL18XX_PHY_VERSION_MAX_LEN];
64};
65
66struct wl18xx_clk_cfg {
67 u32 n;
68 u32 m;
69 u32 p;
70 u32 q;
71 bool swallow;
72};
73
74enum {
75 CLOCK_CONFIG_16_2_M = 1,
76 CLOCK_CONFIG_16_368_M,
77 CLOCK_CONFIG_16_8_M,
78 CLOCK_CONFIG_19_2_M,
79 CLOCK_CONFIG_26_M,
80 CLOCK_CONFIG_32_736_M,
81 CLOCK_CONFIG_33_6_M,
82 CLOCK_CONFIG_38_468_M,
83 CLOCK_CONFIG_52_M,
84
85 NUM_CLOCK_CONFIGS,
86};
87
88#endif /* __WL18XX_PRIV_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index f3d6fa50826..ce108a736bd 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -70,7 +70,7 @@ int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth)
70 struct acx_sleep_auth *auth; 70 struct acx_sleep_auth *auth;
71 int ret; 71 int ret;
72 72
73 wl1271_debug(DEBUG_ACX, "acx sleep auth"); 73 wl1271_debug(DEBUG_ACX, "acx sleep auth %d", sleep_auth);
74 74
75 auth = kzalloc(sizeof(*auth), GFP_KERNEL); 75 auth = kzalloc(sizeof(*auth), GFP_KERNEL);
76 if (!auth) { 76 if (!auth) {
@@ -81,11 +81,18 @@ int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth)
81 auth->sleep_auth = sleep_auth; 81 auth->sleep_auth = sleep_auth;
82 82
83 ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); 83 ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth));
84 if (ret < 0) {
85 wl1271_error("could not configure sleep_auth to %d: %d",
86 sleep_auth, ret);
87 goto out;
88 }
84 89
90 wl->sleep_auth = sleep_auth;
85out: 91out:
86 kfree(auth); 92 kfree(auth);
87 return ret; 93 return ret;
88} 94}
95EXPORT_SYMBOL_GPL(wl1271_acx_sleep_auth);
89 96
90int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif, 97int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif,
91 int power) 98 int power)
@@ -708,14 +715,14 @@ out:
708 return ret; 715 return ret;
709} 716}
710 717
711int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats) 718int wl1271_acx_statistics(struct wl1271 *wl, void *stats)
712{ 719{
713 int ret; 720 int ret;
714 721
715 wl1271_debug(DEBUG_ACX, "acx statistics"); 722 wl1271_debug(DEBUG_ACX, "acx statistics");
716 723
717 ret = wl1271_cmd_interrogate(wl, ACX_STATISTICS, stats, 724 ret = wl1271_cmd_interrogate(wl, ACX_STATISTICS, stats,
718 sizeof(*stats)); 725 wl->stats.fw_stats_len);
719 if (ret < 0) { 726 if (ret < 0) {
720 wl1271_warning("acx statistics failed: %d", ret); 727 wl1271_warning("acx statistics failed: %d", ret);
721 return -ENOMEM; 728 return -ENOMEM;
@@ -997,6 +1004,7 @@ out:
997 kfree(mem_conf); 1004 kfree(mem_conf);
998 return ret; 1005 return ret;
999} 1006}
1007EXPORT_SYMBOL_GPL(wl12xx_acx_mem_cfg);
1000 1008
1001int wl1271_acx_init_mem_config(struct wl1271 *wl) 1009int wl1271_acx_init_mem_config(struct wl1271 *wl)
1002{ 1010{
@@ -1027,6 +1035,7 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl)
1027 1035
1028 return 0; 1036 return 0;
1029} 1037}
1038EXPORT_SYMBOL_GPL(wl1271_acx_init_mem_config);
1030 1039
1031int wl1271_acx_init_rx_interrupt(struct wl1271 *wl) 1040int wl1271_acx_init_rx_interrupt(struct wl1271 *wl)
1032{ 1041{
@@ -1150,6 +1159,7 @@ out:
1150 kfree(acx); 1159 kfree(acx);
1151 return ret; 1160 return ret;
1152} 1161}
1162EXPORT_SYMBOL_GPL(wl1271_acx_pm_config);
1153 1163
1154int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, 1164int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1155 bool enable) 1165 bool enable)
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index e6a74869a5f..d03215d6b3b 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -51,21 +51,18 @@
51#define WL1271_ACX_INTR_TRACE_A BIT(7) 51#define WL1271_ACX_INTR_TRACE_A BIT(7)
52/* Trace message on MBOX #B */ 52/* Trace message on MBOX #B */
53#define WL1271_ACX_INTR_TRACE_B BIT(8) 53#define WL1271_ACX_INTR_TRACE_B BIT(8)
54/* SW FW Initiated interrupt Watchdog timer expiration */
55#define WL1271_ACX_SW_INTR_WATCHDOG BIT(9)
54 56
55#define WL1271_ACX_INTR_ALL 0xFFFFFFFF 57#define WL1271_ACX_INTR_ALL 0xFFFFFFFF
56#define WL1271_ACX_ALL_EVENTS_VECTOR (WL1271_ACX_INTR_WATCHDOG | \ 58
57 WL1271_ACX_INTR_INIT_COMPLETE | \ 59/* all possible interrupts - only appropriate ones will be masked in */
58 WL1271_ACX_INTR_EVENT_A | \ 60#define WLCORE_ALL_INTR_MASK (WL1271_ACX_INTR_WATCHDOG | \
59 WL1271_ACX_INTR_EVENT_B | \ 61 WL1271_ACX_INTR_EVENT_A | \
60 WL1271_ACX_INTR_CMD_COMPLETE | \ 62 WL1271_ACX_INTR_EVENT_B | \
61 WL1271_ACX_INTR_HW_AVAILABLE | \ 63 WL1271_ACX_INTR_HW_AVAILABLE | \
62 WL1271_ACX_INTR_DATA) 64 WL1271_ACX_INTR_DATA | \
63 65 WL1271_ACX_SW_INTR_WATCHDOG)
64#define WL1271_INTR_MASK (WL1271_ACX_INTR_WATCHDOG | \
65 WL1271_ACX_INTR_EVENT_A | \
66 WL1271_ACX_INTR_EVENT_B | \
67 WL1271_ACX_INTR_HW_AVAILABLE | \
68 WL1271_ACX_INTR_DATA)
69 66
70/* Target's information element */ 67/* Target's information element */
71struct acx_header { 68struct acx_header {
@@ -121,6 +118,11 @@ enum wl1271_psm_mode {
121 118
122 /* Extreme low power */ 119 /* Extreme low power */
123 WL1271_PSM_ELP = 2, 120 WL1271_PSM_ELP = 2,
121
122 WL1271_PSM_MAX = WL1271_PSM_ELP,
123
124 /* illegal out of band value of PSM mode */
125 WL1271_PSM_ILLEGAL = 0xff
124}; 126};
125 127
126struct acx_sleep_auth { 128struct acx_sleep_auth {
@@ -417,228 +419,6 @@ struct acx_ctsprotect {
417 u8 padding[2]; 419 u8 padding[2];
418} __packed; 420} __packed;
419 421
420struct acx_tx_statistics {
421 __le32 internal_desc_overflow;
422} __packed;
423
424struct acx_rx_statistics {
425 __le32 out_of_mem;
426 __le32 hdr_overflow;
427 __le32 hw_stuck;
428 __le32 dropped;
429 __le32 fcs_err;
430 __le32 xfr_hint_trig;
431 __le32 path_reset;
432 __le32 reset_counter;
433} __packed;
434
435struct acx_dma_statistics {
436 __le32 rx_requested;
437 __le32 rx_errors;
438 __le32 tx_requested;
439 __le32 tx_errors;
440} __packed;
441
442struct acx_isr_statistics {
443 /* host command complete */
444 __le32 cmd_cmplt;
445
446 /* fiqisr() */
447 __le32 fiqs;
448
449 /* (INT_STS_ND & INT_TRIG_RX_HEADER) */
450 __le32 rx_headers;
451
452 /* (INT_STS_ND & INT_TRIG_RX_CMPLT) */
453 __le32 rx_completes;
454
455 /* (INT_STS_ND & INT_TRIG_NO_RX_BUF) */
456 __le32 rx_mem_overflow;
457
458 /* (INT_STS_ND & INT_TRIG_S_RX_RDY) */
459 __le32 rx_rdys;
460
461 /* irqisr() */
462 __le32 irqs;
463
464 /* (INT_STS_ND & INT_TRIG_TX_PROC) */
465 __le32 tx_procs;
466
467 /* (INT_STS_ND & INT_TRIG_DECRYPT_DONE) */
468 __le32 decrypt_done;
469
470 /* (INT_STS_ND & INT_TRIG_DMA0) */
471 __le32 dma0_done;
472
473 /* (INT_STS_ND & INT_TRIG_DMA1) */
474 __le32 dma1_done;
475
476 /* (INT_STS_ND & INT_TRIG_TX_EXC_CMPLT) */
477 __le32 tx_exch_complete;
478
479 /* (INT_STS_ND & INT_TRIG_COMMAND) */
480 __le32 commands;
481
482 /* (INT_STS_ND & INT_TRIG_RX_PROC) */
483 __le32 rx_procs;
484
485 /* (INT_STS_ND & INT_TRIG_PM_802) */
486 __le32 hw_pm_mode_changes;
487
488 /* (INT_STS_ND & INT_TRIG_ACKNOWLEDGE) */
489 __le32 host_acknowledges;
490
491 /* (INT_STS_ND & INT_TRIG_PM_PCI) */
492 __le32 pci_pm;
493
494 /* (INT_STS_ND & INT_TRIG_ACM_WAKEUP) */
495 __le32 wakeups;
496
497 /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
498 __le32 low_rssi;
499} __packed;
500
501struct acx_wep_statistics {
502 /* WEP address keys configured */
503 __le32 addr_key_count;
504
505 /* default keys configured */
506 __le32 default_key_count;
507
508 __le32 reserved;
509
510 /* number of times that WEP key not found on lookup */
511 __le32 key_not_found;
512
513 /* number of times that WEP key decryption failed */
514 __le32 decrypt_fail;
515
516 /* WEP packets decrypted */
517 __le32 packets;
518
519 /* WEP decrypt interrupts */
520 __le32 interrupt;
521} __packed;
522
523#define ACX_MISSED_BEACONS_SPREAD 10
524
525struct acx_pwr_statistics {
526 /* the amount of enters into power save mode (both PD & ELP) */
527 __le32 ps_enter;
528
529 /* the amount of enters into ELP mode */
530 __le32 elp_enter;
531
532 /* the amount of missing beacon interrupts to the host */
533 __le32 missing_bcns;
534
535 /* the amount of wake on host-access times */
536 __le32 wake_on_host;
537
538 /* the amount of wake on timer-expire */
539 __le32 wake_on_timer_exp;
540
541 /* the number of packets that were transmitted with PS bit set */
542 __le32 tx_with_ps;
543
544 /* the number of packets that were transmitted with PS bit clear */
545 __le32 tx_without_ps;
546
547 /* the number of received beacons */
548 __le32 rcvd_beacons;
549
550 /* the number of entering into PowerOn (power save off) */
551 __le32 power_save_off;
552
553 /* the number of entries into power save mode */
554 __le16 enable_ps;
555
556 /*
557 * the number of exits from power save, not including failed PS
558 * transitions
559 */
560 __le16 disable_ps;
561
562 /*
563 * the number of times the TSF counter was adjusted because
564 * of drift
565 */
566 __le32 fix_tsf_ps;
567
568 /* Gives statistics about the spread continuous missed beacons.
569 * The 16 LSB are dedicated for the PS mode.
570 * The 16 MSB are dedicated for the PS mode.
571 * cont_miss_bcns_spread[0] - single missed beacon.
572 * cont_miss_bcns_spread[1] - two continuous missed beacons.
573 * cont_miss_bcns_spread[2] - three continuous missed beacons.
574 * ...
575 * cont_miss_bcns_spread[9] - ten and more continuous missed beacons.
576 */
577 __le32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD];
578
579 /* the number of beacons in awake mode */
580 __le32 rcvd_awake_beacons;
581} __packed;
582
583struct acx_mic_statistics {
584 __le32 rx_pkts;
585 __le32 calc_failure;
586} __packed;
587
588struct acx_aes_statistics {
589 __le32 encrypt_fail;
590 __le32 decrypt_fail;
591 __le32 encrypt_packets;
592 __le32 decrypt_packets;
593 __le32 encrypt_interrupt;
594 __le32 decrypt_interrupt;
595} __packed;
596
597struct acx_event_statistics {
598 __le32 heart_beat;
599 __le32 calibration;
600 __le32 rx_mismatch;
601 __le32 rx_mem_empty;
602 __le32 rx_pool;
603 __le32 oom_late;
604 __le32 phy_transmit_error;
605 __le32 tx_stuck;
606} __packed;
607
608struct acx_ps_statistics {
609 __le32 pspoll_timeouts;
610 __le32 upsd_timeouts;
611 __le32 upsd_max_sptime;
612 __le32 upsd_max_apturn;
613 __le32 pspoll_max_apturn;
614 __le32 pspoll_utilization;
615 __le32 upsd_utilization;
616} __packed;
617
618struct acx_rxpipe_statistics {
619 __le32 rx_prep_beacon_drop;
620 __le32 descr_host_int_trig_rx_data;
621 __le32 beacon_buffer_thres_host_int_trig_rx_data;
622 __le32 missed_beacon_host_int_trig_rx_data;
623 __le32 tx_xfr_host_int_trig_rx_data;
624} __packed;
625
626struct acx_statistics {
627 struct acx_header header;
628
629 struct acx_tx_statistics tx;
630 struct acx_rx_statistics rx;
631 struct acx_dma_statistics dma;
632 struct acx_isr_statistics isr;
633 struct acx_wep_statistics wep;
634 struct acx_pwr_statistics pwr;
635 struct acx_aes_statistics aes;
636 struct acx_mic_statistics mic;
637 struct acx_event_statistics event;
638 struct acx_ps_statistics ps;
639 struct acx_rxpipe_statistics rxpipe;
640} __packed;
641
642struct acx_rate_class { 422struct acx_rate_class {
643 __le32 enabled_rates; 423 __le32 enabled_rates;
644 u8 short_retry_limit; 424 u8 short_retry_limit;
@@ -828,6 +608,8 @@ struct wl1271_acx_keep_alive_config {
828#define HOST_IF_CFG_RX_FIFO_ENABLE BIT(0) 608#define HOST_IF_CFG_RX_FIFO_ENABLE BIT(0)
829#define HOST_IF_CFG_TX_EXTRA_BLKS_SWAP BIT(1) 609#define HOST_IF_CFG_TX_EXTRA_BLKS_SWAP BIT(1)
830#define HOST_IF_CFG_TX_PAD_TO_SDIO_BLK BIT(3) 610#define HOST_IF_CFG_TX_PAD_TO_SDIO_BLK BIT(3)
611#define HOST_IF_CFG_RX_PAD_TO_SDIO_BLK BIT(4)
612#define HOST_IF_CFG_ADD_RX_ALIGNMENT BIT(6)
831 613
832enum { 614enum {
833 WL1271_ACX_TRIG_TYPE_LEVEL = 0, 615 WL1271_ACX_TRIG_TYPE_LEVEL = 0,
@@ -946,7 +728,7 @@ struct wl1271_acx_ht_information {
946 u8 padding[2]; 728 u8 padding[2];
947} __packed; 729} __packed;
948 730
949#define RX_BA_MAX_SESSIONS 2 731#define RX_BA_MAX_SESSIONS 3
950 732
951struct wl1271_acx_ba_initiator_policy { 733struct wl1271_acx_ba_initiator_policy {
952 struct acx_header header; 734 struct acx_header header;
@@ -1243,6 +1025,7 @@ enum {
1243 ACX_CONFIG_HANGOVER = 0x0042, 1025 ACX_CONFIG_HANGOVER = 0x0042,
1244 ACX_FEATURE_CFG = 0x0043, 1026 ACX_FEATURE_CFG = 0x0043,
1245 ACX_PROTECTION_CFG = 0x0044, 1027 ACX_PROTECTION_CFG = 0x0044,
1028 ACX_CHECKSUM_CONFIG = 0x0045,
1246}; 1029};
1247 1030
1248 1031
@@ -1281,7 +1064,7 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1281 enum acx_preamble_type preamble); 1064 enum acx_preamble_type preamble);
1282int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif, 1065int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1283 enum acx_ctsprotect_type ctsprotect); 1066 enum acx_ctsprotect_type ctsprotect);
1284int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats); 1067int wl1271_acx_statistics(struct wl1271 *wl, void *stats);
1285int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif); 1068int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif);
1286int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c, 1069int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
1287 u8 idx); 1070 u8 idx);
diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
index 9b98230f84c..8965960b841 100644
--- a/drivers/net/wireless/ti/wlcore/boot.c
+++ b/drivers/net/wireless/ti/wlcore/boot.c
@@ -33,22 +33,35 @@
33#include "rx.h" 33#include "rx.h"
34#include "hw_ops.h" 34#include "hw_ops.h"
35 35
36static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag) 36static int wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
37{ 37{
38 u32 cpu_ctrl; 38 u32 cpu_ctrl;
39 int ret;
39 40
40 /* 10.5.0 run the firmware (I) */ 41 /* 10.5.0 run the firmware (I) */
41 cpu_ctrl = wlcore_read_reg(wl, REG_ECPU_CONTROL); 42 ret = wlcore_read_reg(wl, REG_ECPU_CONTROL, &cpu_ctrl);
43 if (ret < 0)
44 goto out;
42 45
43 /* 10.5.1 run the firmware (II) */ 46 /* 10.5.1 run the firmware (II) */
44 cpu_ctrl |= flag; 47 cpu_ctrl |= flag;
45 wlcore_write_reg(wl, REG_ECPU_CONTROL, cpu_ctrl); 48 ret = wlcore_write_reg(wl, REG_ECPU_CONTROL, cpu_ctrl);
49
50out:
51 return ret;
46} 52}
47 53
48static int wlcore_parse_fw_ver(struct wl1271 *wl) 54static int wlcore_boot_parse_fw_ver(struct wl1271 *wl,
55 struct wl1271_static_data *static_data)
49{ 56{
50 int ret; 57 int ret;
51 58
59 strncpy(wl->chip.fw_ver_str, static_data->fw_version,
60 sizeof(wl->chip.fw_ver_str));
61
62 /* make sure the string is NULL-terminated */
63 wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0';
64
52 ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u", 65 ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u",
53 &wl->chip.fw_ver[0], &wl->chip.fw_ver[1], 66 &wl->chip.fw_ver[0], &wl->chip.fw_ver[1],
54 &wl->chip.fw_ver[2], &wl->chip.fw_ver[3], 67 &wl->chip.fw_ver[2], &wl->chip.fw_ver[3],
@@ -57,43 +70,45 @@ static int wlcore_parse_fw_ver(struct wl1271 *wl)
57 if (ret != 5) { 70 if (ret != 5) {
58 wl1271_warning("fw version incorrect value"); 71 wl1271_warning("fw version incorrect value");
59 memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver)); 72 memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver));
60 return -EINVAL; 73 ret = -EINVAL;
74 goto out;
61 } 75 }
62 76
63 ret = wlcore_identify_fw(wl); 77 ret = wlcore_identify_fw(wl);
64 if (ret < 0) 78 if (ret < 0)
65 return ret; 79 goto out;
66 80out:
67 return 0; 81 return ret;
68} 82}
69 83
70static int wlcore_boot_fw_version(struct wl1271 *wl) 84static int wlcore_boot_static_data(struct wl1271 *wl)
71{ 85{
72 struct wl1271_static_data *static_data; 86 struct wl1271_static_data *static_data;
87 size_t len = sizeof(*static_data) + wl->static_data_priv_len;
73 int ret; 88 int ret;
74 89
75 static_data = kmalloc(sizeof(*static_data), GFP_KERNEL | GFP_DMA); 90 static_data = kmalloc(len, GFP_KERNEL);
76 if (!static_data) { 91 if (!static_data) {
77 wl1271_error("Couldn't allocate memory for static data!"); 92 ret = -ENOMEM;
78 return -ENOMEM; 93 goto out;
79 } 94 }
80 95
81 wl1271_read(wl, wl->cmd_box_addr, static_data, sizeof(*static_data), 96 ret = wlcore_read(wl, wl->cmd_box_addr, static_data, len, false);
82 false); 97 if (ret < 0)
83 98 goto out_free;
84 strncpy(wl->chip.fw_ver_str, static_data->fw_version,
85 sizeof(wl->chip.fw_ver_str));
86
87 kfree(static_data);
88 99
89 /* make sure the string is NULL-terminated */ 100 ret = wlcore_boot_parse_fw_ver(wl, static_data);
90 wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0'; 101 if (ret < 0)
102 goto out_free;
91 103
92 ret = wlcore_parse_fw_ver(wl); 104 ret = wlcore_handle_static_data(wl, static_data);
93 if (ret < 0) 105 if (ret < 0)
94 return ret; 106 goto out_free;
95 107
96 return 0; 108out_free:
109 kfree(static_data);
110out:
111 return ret;
97} 112}
98 113
99static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf, 114static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
@@ -102,6 +117,7 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
102 struct wlcore_partition_set partition; 117 struct wlcore_partition_set partition;
103 int addr, chunk_num, partition_limit; 118 int addr, chunk_num, partition_limit;
104 u8 *p, *chunk; 119 u8 *p, *chunk;
120 int ret;
105 121
106 /* whal_FwCtrl_LoadFwImageSm() */ 122 /* whal_FwCtrl_LoadFwImageSm() */
107 123
@@ -123,7 +139,9 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
123 139
124 memcpy(&partition, &wl->ptable[PART_DOWN], sizeof(partition)); 140 memcpy(&partition, &wl->ptable[PART_DOWN], sizeof(partition));
125 partition.mem.start = dest; 141 partition.mem.start = dest;
126 wlcore_set_partition(wl, &partition); 142 ret = wlcore_set_partition(wl, &partition);
143 if (ret < 0)
144 return ret;
127 145
128 /* 10.1 set partition limit and chunk num */ 146 /* 10.1 set partition limit and chunk num */
129 chunk_num = 0; 147 chunk_num = 0;
@@ -137,7 +155,9 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
137 partition_limit = chunk_num * CHUNK_SIZE + 155 partition_limit = chunk_num * CHUNK_SIZE +
138 wl->ptable[PART_DOWN].mem.size; 156 wl->ptable[PART_DOWN].mem.size;
139 partition.mem.start = addr; 157 partition.mem.start = addr;
140 wlcore_set_partition(wl, &partition); 158 ret = wlcore_set_partition(wl, &partition);
159 if (ret < 0)
160 return ret;
141 } 161 }
142 162
143 /* 10.3 upload the chunk */ 163 /* 10.3 upload the chunk */
@@ -146,7 +166,9 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
146 memcpy(chunk, p, CHUNK_SIZE); 166 memcpy(chunk, p, CHUNK_SIZE);
147 wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x", 167 wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
148 p, addr); 168 p, addr);
149 wl1271_write(wl, addr, chunk, CHUNK_SIZE, false); 169 ret = wlcore_write(wl, addr, chunk, CHUNK_SIZE, false);
170 if (ret < 0)
171 goto out;
150 172
151 chunk_num++; 173 chunk_num++;
152 } 174 }
@@ -157,10 +179,11 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
157 memcpy(chunk, p, fw_data_len % CHUNK_SIZE); 179 memcpy(chunk, p, fw_data_len % CHUNK_SIZE);
158 wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x", 180 wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x",
159 fw_data_len % CHUNK_SIZE, p, addr); 181 fw_data_len % CHUNK_SIZE, p, addr);
160 wl1271_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false); 182 ret = wlcore_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
161 183
184out:
162 kfree(chunk); 185 kfree(chunk);
163 return 0; 186 return ret;
164} 187}
165 188
166int wlcore_boot_upload_firmware(struct wl1271 *wl) 189int wlcore_boot_upload_firmware(struct wl1271 *wl)
@@ -203,9 +226,12 @@ int wlcore_boot_upload_nvs(struct wl1271 *wl)
203 int i; 226 int i;
204 u32 dest_addr, val; 227 u32 dest_addr, val;
205 u8 *nvs_ptr, *nvs_aligned; 228 u8 *nvs_ptr, *nvs_aligned;
229 int ret;
206 230
207 if (wl->nvs == NULL) 231 if (wl->nvs == NULL) {
232 wl1271_error("NVS file is needed during boot");
208 return -ENODEV; 233 return -ENODEV;
234 }
209 235
210 if (wl->quirks & WLCORE_QUIRK_LEGACY_NVS) { 236 if (wl->quirks & WLCORE_QUIRK_LEGACY_NVS) {
211 struct wl1271_nvs_file *nvs = 237 struct wl1271_nvs_file *nvs =
@@ -298,7 +324,9 @@ int wlcore_boot_upload_nvs(struct wl1271 *wl)
298 wl1271_debug(DEBUG_BOOT, 324 wl1271_debug(DEBUG_BOOT,
299 "nvs burst write 0x%x: 0x%x", 325 "nvs burst write 0x%x: 0x%x",
300 dest_addr, val); 326 dest_addr, val);
301 wl1271_write32(wl, dest_addr, val); 327 ret = wlcore_write32(wl, dest_addr, val);
328 if (ret < 0)
329 return ret;
302 330
303 nvs_ptr += 4; 331 nvs_ptr += 4;
304 dest_addr += 4; 332 dest_addr += 4;
@@ -324,7 +352,9 @@ int wlcore_boot_upload_nvs(struct wl1271 *wl)
324 nvs_len -= nvs_ptr - (u8 *)wl->nvs; 352 nvs_len -= nvs_ptr - (u8 *)wl->nvs;
325 353
326 /* Now we must set the partition correctly */ 354 /* Now we must set the partition correctly */
327 wlcore_set_partition(wl, &wl->ptable[PART_WORK]); 355 ret = wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
356 if (ret < 0)
357 return ret;
328 358
329 /* Copy the NVS tables to a new block to ensure alignment */ 359 /* Copy the NVS tables to a new block to ensure alignment */
330 nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL); 360 nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);
@@ -332,11 +362,11 @@ int wlcore_boot_upload_nvs(struct wl1271 *wl)
332 return -ENOMEM; 362 return -ENOMEM;
333 363
334 /* And finally we upload the NVS tables */ 364 /* And finally we upload the NVS tables */
335 wlcore_write_data(wl, REG_CMD_MBOX_ADDRESS, 365 ret = wlcore_write_data(wl, REG_CMD_MBOX_ADDRESS, nvs_aligned, nvs_len,
336 nvs_aligned, nvs_len, false); 366 false);
337 367
338 kfree(nvs_aligned); 368 kfree(nvs_aligned);
339 return 0; 369 return ret;
340 370
341out_badnvs: 371out_badnvs:
342 wl1271_error("nvs data is malformed"); 372 wl1271_error("nvs data is malformed");
@@ -350,11 +380,17 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
350 u32 chip_id, intr; 380 u32 chip_id, intr;
351 381
352 /* Make sure we have the boot partition */ 382 /* Make sure we have the boot partition */
353 wlcore_set_partition(wl, &wl->ptable[PART_BOOT]); 383 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
384 if (ret < 0)
385 return ret;
354 386
355 wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT); 387 ret = wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
388 if (ret < 0)
389 return ret;
356 390
357 chip_id = wlcore_read_reg(wl, REG_CHIP_ID_B); 391 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &chip_id);
392 if (ret < 0)
393 return ret;
358 394
359 wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id); 395 wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id);
360 396
@@ -367,7 +403,9 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
367 loop = 0; 403 loop = 0;
368 while (loop++ < INIT_LOOP) { 404 while (loop++ < INIT_LOOP) {
369 udelay(INIT_LOOP_DELAY); 405 udelay(INIT_LOOP_DELAY);
370 intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR); 406 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr);
407 if (ret < 0)
408 return ret;
371 409
372 if (intr == 0xffffffff) { 410 if (intr == 0xffffffff) {
373 wl1271_error("error reading hardware complete " 411 wl1271_error("error reading hardware complete "
@@ -376,8 +414,10 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
376 } 414 }
377 /* check that ACX_INTR_INIT_COMPLETE is enabled */ 415 /* check that ACX_INTR_INIT_COMPLETE is enabled */
378 else if (intr & WL1271_ACX_INTR_INIT_COMPLETE) { 416 else if (intr & WL1271_ACX_INTR_INIT_COMPLETE) {
379 wlcore_write_reg(wl, REG_INTERRUPT_ACK, 417 ret = wlcore_write_reg(wl, REG_INTERRUPT_ACK,
380 WL1271_ACX_INTR_INIT_COMPLETE); 418 WL1271_ACX_INTR_INIT_COMPLETE);
419 if (ret < 0)
420 return ret;
381 break; 421 break;
382 } 422 }
383 } 423 }
@@ -389,20 +429,25 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
389 } 429 }
390 430
391 /* get hardware config command mail box */ 431 /* get hardware config command mail box */
392 wl->cmd_box_addr = wlcore_read_reg(wl, REG_COMMAND_MAILBOX_PTR); 432 ret = wlcore_read_reg(wl, REG_COMMAND_MAILBOX_PTR, &wl->cmd_box_addr);
433 if (ret < 0)
434 return ret;
393 435
394 wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x", wl->cmd_box_addr); 436 wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x", wl->cmd_box_addr);
395 437
396 /* get hardware config event mail box */ 438 /* get hardware config event mail box */
397 wl->mbox_ptr[0] = wlcore_read_reg(wl, REG_EVENT_MAILBOX_PTR); 439 ret = wlcore_read_reg(wl, REG_EVENT_MAILBOX_PTR, &wl->mbox_ptr[0]);
440 if (ret < 0)
441 return ret;
442
398 wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox); 443 wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
399 444
400 wl1271_debug(DEBUG_MAILBOX, "MBOX ptrs: 0x%x 0x%x", 445 wl1271_debug(DEBUG_MAILBOX, "MBOX ptrs: 0x%x 0x%x",
401 wl->mbox_ptr[0], wl->mbox_ptr[1]); 446 wl->mbox_ptr[0], wl->mbox_ptr[1]);
402 447
403 ret = wlcore_boot_fw_version(wl); 448 ret = wlcore_boot_static_data(wl);
404 if (ret < 0) { 449 if (ret < 0) {
405 wl1271_error("couldn't boot firmware"); 450 wl1271_error("error getting static data");
406 return ret; 451 return ret;
407 } 452 }
408 453
@@ -436,9 +481,9 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
436 } 481 }
437 482
438 /* set the working partition to its "running" mode offset */ 483 /* set the working partition to its "running" mode offset */
439 wlcore_set_partition(wl, &wl->ptable[PART_WORK]); 484 ret = wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
440 485
441 /* firmware startup completed */ 486 /* firmware startup completed */
442 return 0; 487 return ret;
443} 488}
444EXPORT_SYMBOL_GPL(wlcore_boot_run_firmware); 489EXPORT_SYMBOL_GPL(wlcore_boot_run_firmware);
diff --git a/drivers/net/wireless/ti/wlcore/boot.h b/drivers/net/wireless/ti/wlcore/boot.h
index 094981dd222..a525225f990 100644
--- a/drivers/net/wireless/ti/wlcore/boot.h
+++ b/drivers/net/wireless/ti/wlcore/boot.h
@@ -40,6 +40,7 @@ struct wl1271_static_data {
40 u8 fw_version[WL1271_FW_VERSION_MAX_LEN]; 40 u8 fw_version[WL1271_FW_VERSION_MAX_LEN];
41 u32 hw_version; 41 u32 hw_version;
42 u8 tx_power_table[WL1271_NO_SUBBANDS][WL1271_NO_POWER_LEVELS]; 42 u8 tx_power_table[WL1271_NO_SUBBANDS][WL1271_NO_POWER_LEVELS];
43 u8 priv[0];
43}; 44};
44 45
45/* number of times we try to read the INIT interrupt */ 46/* number of times we try to read the INIT interrupt */
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 5b128a97144..56c7a2342fd 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -36,6 +36,7 @@
36#include "cmd.h" 36#include "cmd.h"
37#include "event.h" 37#include "event.h"
38#include "tx.h" 38#include "tx.h"
39#include "hw_ops.h"
39 40
40#define WL1271_CMD_FAST_POLL_COUNT 50 41#define WL1271_CMD_FAST_POLL_COUNT 50
41 42
@@ -64,17 +65,24 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
64 WARN_ON(len % 4 != 0); 65 WARN_ON(len % 4 != 0);
65 WARN_ON(test_bit(WL1271_FLAG_IN_ELP, &wl->flags)); 66 WARN_ON(test_bit(WL1271_FLAG_IN_ELP, &wl->flags));
66 67
67 wl1271_write(wl, wl->cmd_box_addr, buf, len, false); 68 ret = wlcore_write(wl, wl->cmd_box_addr, buf, len, false);
69 if (ret < 0)
70 goto fail;
68 71
69 /* 72 /*
70 * TODO: we just need this because one bit is in a different 73 * TODO: we just need this because one bit is in a different
71 * place. Is there any better way? 74 * place. Is there any better way?
72 */ 75 */
73 wl->ops->trigger_cmd(wl, wl->cmd_box_addr, buf, len); 76 ret = wl->ops->trigger_cmd(wl, wl->cmd_box_addr, buf, len);
77 if (ret < 0)
78 goto fail;
74 79
75 timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT); 80 timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT);
76 81
77 intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR); 82 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr);
83 if (ret < 0)
84 goto fail;
85
78 while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) { 86 while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) {
79 if (time_after(jiffies, timeout)) { 87 if (time_after(jiffies, timeout)) {
80 wl1271_error("command complete timeout"); 88 wl1271_error("command complete timeout");
@@ -88,13 +96,18 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
88 else 96 else
89 msleep(1); 97 msleep(1);
90 98
91 intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR); 99 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr);
100 if (ret < 0)
101 goto fail;
92 } 102 }
93 103
94 /* read back the status code of the command */ 104 /* read back the status code of the command */
95 if (res_len == 0) 105 if (res_len == 0)
96 res_len = sizeof(struct wl1271_cmd_header); 106 res_len = sizeof(struct wl1271_cmd_header);
97 wl1271_read(wl, wl->cmd_box_addr, cmd, res_len, false); 107
108 ret = wlcore_read(wl, wl->cmd_box_addr, cmd, res_len, false);
109 if (ret < 0)
110 goto fail;
98 111
99 status = le16_to_cpu(cmd->status); 112 status = le16_to_cpu(cmd->status);
100 if (status != CMD_STATUS_SUCCESS) { 113 if (status != CMD_STATUS_SUCCESS) {
@@ -103,11 +116,14 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
103 goto fail; 116 goto fail;
104 } 117 }
105 118
106 wlcore_write_reg(wl, REG_INTERRUPT_ACK, WL1271_ACX_INTR_CMD_COMPLETE); 119 ret = wlcore_write_reg(wl, REG_INTERRUPT_ACK,
120 WL1271_ACX_INTR_CMD_COMPLETE);
121 if (ret < 0)
122 goto fail;
123
107 return 0; 124 return 0;
108 125
109fail: 126fail:
110 WARN_ON(1);
111 wl12xx_queue_recovery_work(wl); 127 wl12xx_queue_recovery_work(wl);
112 return ret; 128 return ret;
113} 129}
@@ -116,35 +132,45 @@ fail:
116 * Poll the mailbox event field until any of the bits in the mask is set or a 132 * Poll the mailbox event field until any of the bits in the mask is set or a
117 * timeout occurs (WL1271_EVENT_TIMEOUT in msecs) 133 * timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
118 */ 134 */
119static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask) 135static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
136 u32 mask, bool *timeout)
120{ 137{
121 u32 *events_vector; 138 u32 *events_vector;
122 u32 event; 139 u32 event;
123 unsigned long timeout; 140 unsigned long timeout_time;
124 int ret = 0; 141 int ret = 0;
125 142
143 *timeout = false;
144
126 events_vector = kmalloc(sizeof(*events_vector), GFP_KERNEL | GFP_DMA); 145 events_vector = kmalloc(sizeof(*events_vector), GFP_KERNEL | GFP_DMA);
127 if (!events_vector) 146 if (!events_vector)
128 return -ENOMEM; 147 return -ENOMEM;
129 148
130 timeout = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT); 149 timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
131 150
132 do { 151 do {
133 if (time_after(jiffies, timeout)) { 152 if (time_after(jiffies, timeout_time)) {
134 wl1271_debug(DEBUG_CMD, "timeout waiting for event %d", 153 wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
135 (int)mask); 154 (int)mask);
136 ret = -ETIMEDOUT; 155 *timeout = true;
137 goto out; 156 goto out;
138 } 157 }
139 158
140 msleep(1); 159 msleep(1);
141 160
142 /* read from both event fields */ 161 /* read from both event fields */
143 wl1271_read(wl, wl->mbox_ptr[0], events_vector, 162 ret = wlcore_read(wl, wl->mbox_ptr[0], events_vector,
144 sizeof(*events_vector), false); 163 sizeof(*events_vector), false);
164 if (ret < 0)
165 goto out;
166
145 event = *events_vector & mask; 167 event = *events_vector & mask;
146 wl1271_read(wl, wl->mbox_ptr[1], events_vector, 168
147 sizeof(*events_vector), false); 169 ret = wlcore_read(wl, wl->mbox_ptr[1], events_vector,
170 sizeof(*events_vector), false);
171 if (ret < 0)
172 goto out;
173
148 event |= *events_vector & mask; 174 event |= *events_vector & mask;
149 } while (!event); 175 } while (!event);
150 176
@@ -156,9 +182,10 @@ out:
156static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask) 182static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
157{ 183{
158 int ret; 184 int ret;
185 bool timeout = false;
159 186
160 ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask); 187 ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask, &timeout);
161 if (ret != 0) { 188 if (ret != 0 || timeout) {
162 wl12xx_queue_recovery_work(wl); 189 wl12xx_queue_recovery_work(wl);
163 return ret; 190 return ret;
164 } 191 }
@@ -291,6 +318,23 @@ static int wl12xx_get_new_session_id(struct wl1271 *wl,
291 return wlvif->session_counter; 318 return wlvif->session_counter;
292} 319}
293 320
321static u8 wlcore_get_native_channel_type(u8 nl_channel_type)
322{
323 switch (nl_channel_type) {
324 case NL80211_CHAN_NO_HT:
325 return WLCORE_CHAN_NO_HT;
326 case NL80211_CHAN_HT20:
327 return WLCORE_CHAN_HT20;
328 case NL80211_CHAN_HT40MINUS:
329 return WLCORE_CHAN_HT40MINUS;
330 case NL80211_CHAN_HT40PLUS:
331 return WLCORE_CHAN_HT40PLUS;
332 default:
333 WARN_ON(1);
334 return WLCORE_CHAN_NO_HT;
335 }
336}
337
294static int wl12xx_cmd_role_start_dev(struct wl1271 *wl, 338static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
295 struct wl12xx_vif *wlvif) 339 struct wl12xx_vif *wlvif)
296{ 340{
@@ -407,6 +451,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
407 memcpy(cmd->sta.ssid, wlvif->ssid, wlvif->ssid_len); 451 memcpy(cmd->sta.ssid, wlvif->ssid, wlvif->ssid_len);
408 memcpy(cmd->sta.bssid, vif->bss_conf.bssid, ETH_ALEN); 452 memcpy(cmd->sta.bssid, vif->bss_conf.bssid, ETH_ALEN);
409 cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set); 453 cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
454 cmd->channel_type = wlcore_get_native_channel_type(wlvif->channel_type);
410 455
411 if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) { 456 if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
412 ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid); 457 ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid);
@@ -482,6 +527,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
482 struct wl12xx_cmd_role_start *cmd; 527 struct wl12xx_cmd_role_start *cmd;
483 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); 528 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
484 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 529 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
530 u32 supported_rates;
485 int ret; 531 int ret;
486 532
487 wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wlvif->role_id); 533 wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wlvif->role_id);
@@ -519,6 +565,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
519 /* FIXME: Change when adding DFS */ 565 /* FIXME: Change when adding DFS */
520 cmd->ap.reset_tsf = 1; /* By default reset AP TSF */ 566 cmd->ap.reset_tsf = 1; /* By default reset AP TSF */
521 cmd->channel = wlvif->channel; 567 cmd->channel = wlvif->channel;
568 cmd->channel_type = wlcore_get_native_channel_type(wlvif->channel_type);
522 569
523 if (!bss_conf->hidden_ssid) { 570 if (!bss_conf->hidden_ssid) {
524 /* take the SSID from the beacon for backward compatibility */ 571 /* take the SSID from the beacon for backward compatibility */
@@ -531,7 +578,13 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
531 memcpy(cmd->ap.ssid, bss_conf->ssid, bss_conf->ssid_len); 578 memcpy(cmd->ap.ssid, bss_conf->ssid, bss_conf->ssid_len);
532 } 579 }
533 580
534 cmd->ap.local_rates = cpu_to_le32(0xffffffff); 581 supported_rates = CONF_TX_AP_ENABLED_RATES | CONF_TX_MCS_RATES |
582 wlcore_hw_ap_get_mimo_wide_rate_mask(wl, wlvif);
583
584 wl1271_debug(DEBUG_CMD, "cmd role start ap with supported_rates 0x%08x",
585 supported_rates);
586
587 cmd->ap.local_rates = cpu_to_le32(supported_rates);
535 588
536 switch (wlvif->band) { 589 switch (wlvif->band) {
537 case IEEE80211_BAND_2GHZ: 590 case IEEE80211_BAND_2GHZ:
@@ -797,6 +850,7 @@ out:
797 kfree(cmd); 850 kfree(cmd);
798 return ret; 851 return ret;
799} 852}
853EXPORT_SYMBOL_GPL(wl1271_cmd_data_path);
800 854
801int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, 855int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
802 u8 ps_mode, u16 auto_ps_timeout) 856 u8 ps_mode, u16 auto_ps_timeout)
@@ -1018,7 +1072,7 @@ out:
1018 1072
1019int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif) 1073int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1020{ 1074{
1021 int ret, extra; 1075 int ret, extra = 0;
1022 u16 fc; 1076 u16 fc;
1023 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); 1077 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
1024 struct sk_buff *skb; 1078 struct sk_buff *skb;
@@ -1057,7 +1111,8 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1057 /* encryption space */ 1111 /* encryption space */
1058 switch (wlvif->encryption_type) { 1112 switch (wlvif->encryption_type) {
1059 case KEY_TKIP: 1113 case KEY_TKIP:
1060 extra = WL1271_EXTRA_SPACE_TKIP; 1114 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
1115 extra = WL1271_EXTRA_SPACE_TKIP;
1061 break; 1116 break;
1062 case KEY_AES: 1117 case KEY_AES:
1063 extra = WL1271_EXTRA_SPACE_AES; 1118 extra = WL1271_EXTRA_SPACE_AES;
@@ -1346,13 +1401,18 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1346 1401
1347 for (i = 0; i < NUM_ACCESS_CATEGORIES_COPY; i++) 1402 for (i = 0; i < NUM_ACCESS_CATEGORIES_COPY; i++)
1348 if (sta->wme && (sta->uapsd_queues & BIT(i))) 1403 if (sta->wme && (sta->uapsd_queues & BIT(i)))
1349 cmd->psd_type[i] = WL1271_PSD_UPSD_TRIGGER; 1404 cmd->psd_type[NUM_ACCESS_CATEGORIES_COPY-1-i] =
1405 WL1271_PSD_UPSD_TRIGGER;
1350 else 1406 else
1351 cmd->psd_type[i] = WL1271_PSD_LEGACY; 1407 cmd->psd_type[NUM_ACCESS_CATEGORIES_COPY-1-i] =
1408 WL1271_PSD_LEGACY;
1409
1352 1410
1353 sta_rates = sta->supp_rates[wlvif->band]; 1411 sta_rates = sta->supp_rates[wlvif->band];
1354 if (sta->ht_cap.ht_supported) 1412 if (sta->ht_cap.ht_supported)
1355 sta_rates |= sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET; 1413 sta_rates |=
1414 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) |
1415 (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET);
1356 1416
1357 cmd->supported_rates = 1417 cmd->supported_rates =
1358 cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates, 1418 cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates,
@@ -1378,6 +1438,7 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
1378{ 1438{
1379 struct wl12xx_cmd_remove_peer *cmd; 1439 struct wl12xx_cmd_remove_peer *cmd;
1380 int ret; 1440 int ret;
1441 bool timeout = false;
1381 1442
1382 wl1271_debug(DEBUG_CMD, "cmd remove peer %d", (int)hlid); 1443 wl1271_debug(DEBUG_CMD, "cmd remove peer %d", (int)hlid);
1383 1444
@@ -1398,12 +1459,16 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
1398 goto out_free; 1459 goto out_free;
1399 } 1460 }
1400 1461
1462 ret = wl1271_cmd_wait_for_event_or_timeout(wl,
1463 PEER_REMOVE_COMPLETE_EVENT_ID,
1464 &timeout);
1401 /* 1465 /*
1402 * We are ok with a timeout here. The event is sometimes not sent 1466 * We are ok with a timeout here. The event is sometimes not sent
1403 * due to a firmware bug. 1467 * due to a firmware bug. In case of another error (like SDIO timeout)
1468 * queue a recovery.
1404 */ 1469 */
1405 wl1271_cmd_wait_for_event_or_timeout(wl, 1470 if (ret)
1406 PEER_REMOVE_COMPLETE_EVENT_ID); 1471 wl12xx_queue_recovery_work(wl);
1407 1472
1408out_free: 1473out_free:
1409 kfree(cmd); 1474 kfree(cmd);
@@ -1573,19 +1638,25 @@ out:
1573int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id) 1638int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id)
1574{ 1639{
1575 int ret = 0; 1640 int ret = 0;
1641 bool is_first_roc;
1576 1642
1577 if (WARN_ON(test_bit(role_id, wl->roc_map))) 1643 if (WARN_ON(test_bit(role_id, wl->roc_map)))
1578 return 0; 1644 return 0;
1579 1645
1646 is_first_roc = (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) >=
1647 WL12XX_MAX_ROLES);
1648
1580 ret = wl12xx_cmd_roc(wl, wlvif, role_id); 1649 ret = wl12xx_cmd_roc(wl, wlvif, role_id);
1581 if (ret < 0) 1650 if (ret < 0)
1582 goto out; 1651 goto out;
1583 1652
1584 ret = wl1271_cmd_wait_for_event(wl, 1653 if (is_first_roc) {
1585 REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID); 1654 ret = wl1271_cmd_wait_for_event(wl,
1586 if (ret < 0) { 1655 REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID);
1587 wl1271_error("cmd roc event completion error"); 1656 if (ret < 0) {
1588 goto out; 1657 wl1271_error("cmd roc event completion error");
1658 goto out;
1659 }
1589 } 1660 }
1590 1661
1591 __set_bit(role_id, wl->roc_map); 1662 __set_bit(role_id, wl->roc_map);
@@ -1714,7 +1785,9 @@ int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1714 return -EINVAL; 1785 return -EINVAL;
1715 1786
1716 /* flush all pending packets */ 1787 /* flush all pending packets */
1717 wl1271_tx_work_locked(wl); 1788 ret = wlcore_tx_work_locked(wl);
1789 if (ret < 0)
1790 goto out;
1718 1791
1719 if (test_bit(wlvif->dev_role_id, wl->roc_map)) { 1792 if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
1720 ret = wl12xx_croc(wl, wlvif->dev_role_id); 1793 ret = wl12xx_croc(wl, wlvif->dev_role_id);
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index a46ae07cb77..c8a6510c72c 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -192,7 +192,7 @@ enum cmd_templ {
192#define WL1271_COMMAND_TIMEOUT 2000 192#define WL1271_COMMAND_TIMEOUT 2000
193#define WL1271_CMD_TEMPL_DFLT_SIZE 252 193#define WL1271_CMD_TEMPL_DFLT_SIZE 252
194#define WL1271_CMD_TEMPL_MAX_SIZE 512 194#define WL1271_CMD_TEMPL_MAX_SIZE 512
195#define WL1271_EVENT_TIMEOUT 750 195#define WL1271_EVENT_TIMEOUT 1000
196 196
197struct wl1271_cmd_header { 197struct wl1271_cmd_header {
198 __le16 id; 198 __le16 id;
@@ -266,13 +266,22 @@ enum wlcore_band {
266 WLCORE_BAND_MAX_RADIO = 0x7F, 266 WLCORE_BAND_MAX_RADIO = 0x7F,
267}; 267};
268 268
269enum wlcore_channel_type {
270 WLCORE_CHAN_NO_HT,
271 WLCORE_CHAN_HT20,
272 WLCORE_CHAN_HT40MINUS,
273 WLCORE_CHAN_HT40PLUS
274};
275
269struct wl12xx_cmd_role_start { 276struct wl12xx_cmd_role_start {
270 struct wl1271_cmd_header header; 277 struct wl1271_cmd_header header;
271 278
272 u8 role_id; 279 u8 role_id;
273 u8 band; 280 u8 band;
274 u8 channel; 281 u8 channel;
275 u8 padding; 282
283 /* enum wlcore_channel_type */
284 u8 channel_type;
276 285
277 union { 286 union {
278 struct { 287 struct {
@@ -643,4 +652,25 @@ struct wl12xx_cmd_stop_channel_switch {
643 struct wl1271_cmd_header header; 652 struct wl1271_cmd_header header;
644} __packed; 653} __packed;
645 654
655/* Used to check radio status after calibration */
656#define MAX_TLV_LENGTH 500
657#define TEST_CMD_P2G_CAL 2 /* TX BiP */
658
659struct wl1271_cmd_cal_p2g {
660 struct wl1271_cmd_header header;
661
662 struct wl1271_cmd_test_header test;
663
664 __le32 ver;
665 __le16 len;
666 u8 buf[MAX_TLV_LENGTH];
667 u8 type;
668 u8 padding;
669
670 __le16 radio_status;
671
672 u8 sub_band_mask;
673 u8 padding2;
674} __packed;
675
646#endif /* __WL1271_CMD_H__ */ 676#endif /* __WL1271_CMD_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h
index fef0db4213b..d77224f2ac6 100644
--- a/drivers/net/wireless/ti/wlcore/conf.h
+++ b/drivers/net/wireless/ti/wlcore/conf.h
@@ -45,7 +45,15 @@ enum {
45 CONF_HW_BIT_RATE_MCS_4 = BIT(17), 45 CONF_HW_BIT_RATE_MCS_4 = BIT(17),
46 CONF_HW_BIT_RATE_MCS_5 = BIT(18), 46 CONF_HW_BIT_RATE_MCS_5 = BIT(18),
47 CONF_HW_BIT_RATE_MCS_6 = BIT(19), 47 CONF_HW_BIT_RATE_MCS_6 = BIT(19),
48 CONF_HW_BIT_RATE_MCS_7 = BIT(20) 48 CONF_HW_BIT_RATE_MCS_7 = BIT(20),
49 CONF_HW_BIT_RATE_MCS_8 = BIT(21),
50 CONF_HW_BIT_RATE_MCS_9 = BIT(22),
51 CONF_HW_BIT_RATE_MCS_10 = BIT(23),
52 CONF_HW_BIT_RATE_MCS_11 = BIT(24),
53 CONF_HW_BIT_RATE_MCS_12 = BIT(25),
54 CONF_HW_BIT_RATE_MCS_13 = BIT(26),
55 CONF_HW_BIT_RATE_MCS_14 = BIT(27),
56 CONF_HW_BIT_RATE_MCS_15 = BIT(28),
49}; 57};
50 58
51enum { 59enum {
@@ -310,7 +318,7 @@ enum {
310struct conf_sg_settings { 318struct conf_sg_settings {
311 u32 params[CONF_SG_PARAMS_MAX]; 319 u32 params[CONF_SG_PARAMS_MAX];
312 u8 state; 320 u8 state;
313}; 321} __packed;
314 322
315enum conf_rx_queue_type { 323enum conf_rx_queue_type {
316 CONF_RX_QUEUE_TYPE_LOW_PRIORITY, /* All except the high priority */ 324 CONF_RX_QUEUE_TYPE_LOW_PRIORITY, /* All except the high priority */
@@ -394,7 +402,7 @@ struct conf_rx_settings {
394 * Range: RX_QUEUE_TYPE_RX_LOW_PRIORITY, RX_QUEUE_TYPE_RX_HIGH_PRIORITY, 402 * Range: RX_QUEUE_TYPE_RX_LOW_PRIORITY, RX_QUEUE_TYPE_RX_HIGH_PRIORITY,
395 */ 403 */
396 u8 queue_type; 404 u8 queue_type;
397}; 405} __packed;
398 406
399#define CONF_TX_MAX_RATE_CLASSES 10 407#define CONF_TX_MAX_RATE_CLASSES 10
400 408
@@ -435,6 +443,12 @@ struct conf_rx_settings {
435 CONF_HW_BIT_RATE_MCS_5 | CONF_HW_BIT_RATE_MCS_6 | \ 443 CONF_HW_BIT_RATE_MCS_5 | CONF_HW_BIT_RATE_MCS_6 | \
436 CONF_HW_BIT_RATE_MCS_7) 444 CONF_HW_BIT_RATE_MCS_7)
437 445
446#define CONF_TX_MIMO_RATES (CONF_HW_BIT_RATE_MCS_8 | \
447 CONF_HW_BIT_RATE_MCS_9 | CONF_HW_BIT_RATE_MCS_10 | \
448 CONF_HW_BIT_RATE_MCS_11 | CONF_HW_BIT_RATE_MCS_12 | \
449 CONF_HW_BIT_RATE_MCS_13 | CONF_HW_BIT_RATE_MCS_14 | \
450 CONF_HW_BIT_RATE_MCS_15)
451
438/* 452/*
439 * Default rates for management traffic when operating in AP mode. This 453 * Default rates for management traffic when operating in AP mode. This
440 * should be configured according to the basic rate set of the AP 454 * should be configured according to the basic rate set of the AP
@@ -487,7 +501,7 @@ struct conf_tx_rate_class {
487 * the policy (0 - long preamble, 1 - short preamble. 501 * the policy (0 - long preamble, 1 - short preamble.
488 */ 502 */
489 u8 aflags; 503 u8 aflags;
490}; 504} __packed;
491 505
492#define CONF_TX_MAX_AC_COUNT 4 506#define CONF_TX_MAX_AC_COUNT 4
493 507
@@ -504,7 +518,7 @@ enum conf_tx_ac {
504 CONF_TX_AC_VI = 2, /* video */ 518 CONF_TX_AC_VI = 2, /* video */
505 CONF_TX_AC_VO = 3, /* voice */ 519 CONF_TX_AC_VO = 3, /* voice */
506 CONF_TX_AC_CTS2SELF = 4, /* fictitious AC, follows AC_VO */ 520 CONF_TX_AC_CTS2SELF = 4, /* fictitious AC, follows AC_VO */
507 CONF_TX_AC_ANY_TID = 0x1f 521 CONF_TX_AC_ANY_TID = 0xff
508}; 522};
509 523
510struct conf_tx_ac_category { 524struct conf_tx_ac_category {
@@ -544,7 +558,7 @@ struct conf_tx_ac_category {
544 * Range: u16 558 * Range: u16
545 */ 559 */
546 u16 tx_op_limit; 560 u16 tx_op_limit;
547}; 561} __packed;
548 562
549#define CONF_TX_MAX_TID_COUNT 8 563#define CONF_TX_MAX_TID_COUNT 8
550 564
@@ -578,7 +592,7 @@ struct conf_tx_tid {
578 u8 ps_scheme; 592 u8 ps_scheme;
579 u8 ack_policy; 593 u8 ack_policy;
580 u32 apsd_conf[2]; 594 u32 apsd_conf[2];
581}; 595} __packed;
582 596
583struct conf_tx_settings { 597struct conf_tx_settings {
584 /* 598 /*
@@ -664,7 +678,7 @@ struct conf_tx_settings {
664 678
665 /* Time in ms for Tx watchdog timer to expire */ 679 /* Time in ms for Tx watchdog timer to expire */
666 u32 tx_watchdog_timeout; 680 u32 tx_watchdog_timeout;
667}; 681} __packed;
668 682
669enum { 683enum {
670 CONF_WAKE_UP_EVENT_BEACON = 0x01, /* Wake on every Beacon*/ 684 CONF_WAKE_UP_EVENT_BEACON = 0x01, /* Wake on every Beacon*/
@@ -711,7 +725,7 @@ struct conf_bcn_filt_rule {
711 * Version for the vendor specifie IE (221) 725 * Version for the vendor specifie IE (221)
712 */ 726 */
713 u8 version[CONF_BCN_IE_VER_LEN]; 727 u8 version[CONF_BCN_IE_VER_LEN];
714}; 728} __packed;
715 729
716#define CONF_MAX_RSSI_SNR_TRIGGERS 8 730#define CONF_MAX_RSSI_SNR_TRIGGERS 8
717 731
@@ -762,7 +776,7 @@ struct conf_sig_weights {
762 * Range: u8 776 * Range: u8
763 */ 777 */
764 u8 snr_pkt_avg_weight; 778 u8 snr_pkt_avg_weight;
765}; 779} __packed;
766 780
767enum conf_bcn_filt_mode { 781enum conf_bcn_filt_mode {
768 CONF_BCN_FILT_MODE_DISABLED = 0, 782 CONF_BCN_FILT_MODE_DISABLED = 0,
@@ -810,7 +824,7 @@ struct conf_conn_settings {
810 * 824 *
811 * Range: CONF_BCN_FILT_MODE_* 825 * Range: CONF_BCN_FILT_MODE_*
812 */ 826 */
813 enum conf_bcn_filt_mode bcn_filt_mode; 827 u8 bcn_filt_mode;
814 828
815 /* 829 /*
816 * Configure Beacon filter pass-thru rules. 830 * Configure Beacon filter pass-thru rules.
@@ -937,7 +951,13 @@ struct conf_conn_settings {
937 * Range: u16 951 * Range: u16
938 */ 952 */
939 u8 max_listen_interval; 953 u8 max_listen_interval;
940}; 954
955 /*
956 * Default sleep authorization for a new STA interface. This determines
957 * whether we can go to ELP.
958 */
959 u8 sta_sleep_auth;
960} __packed;
941 961
942enum { 962enum {
943 CONF_REF_CLK_19_2_E, 963 CONF_REF_CLK_19_2_E,
@@ -965,6 +985,11 @@ struct conf_itrim_settings {
965 985
966 /* moderation timeout in microsecs from the last TX */ 986 /* moderation timeout in microsecs from the last TX */
967 u32 timeout; 987 u32 timeout;
988} __packed;
989
990enum conf_fast_wakeup {
991 CONF_FAST_WAKEUP_ENABLE,
992 CONF_FAST_WAKEUP_DISABLE,
968}; 993};
969 994
970struct conf_pm_config_settings { 995struct conf_pm_config_settings {
@@ -978,10 +1003,10 @@ struct conf_pm_config_settings {
978 /* 1003 /*
979 * Host fast wakeup support 1004 * Host fast wakeup support
980 * 1005 *
981 * Range: true, false 1006 * Range: enum conf_fast_wakeup
982 */ 1007 */
983 bool host_fast_wakeup_support; 1008 u8 host_fast_wakeup_support;
984}; 1009} __packed;
985 1010
986struct conf_roam_trigger_settings { 1011struct conf_roam_trigger_settings {
987 /* 1012 /*
@@ -1018,7 +1043,7 @@ struct conf_roam_trigger_settings {
1018 * Range: 0 - 255 1043 * Range: 0 - 255
1019 */ 1044 */
1020 u8 avg_weight_snr_data; 1045 u8 avg_weight_snr_data;
1021}; 1046} __packed;
1022 1047
1023struct conf_scan_settings { 1048struct conf_scan_settings {
1024 /* 1049 /*
@@ -1064,7 +1089,7 @@ struct conf_scan_settings {
1064 * Range: u32 Microsecs 1089 * Range: u32 Microsecs
1065 */ 1090 */
1066 u32 split_scan_timeout; 1091 u32 split_scan_timeout;
1067}; 1092} __packed;
1068 1093
1069struct conf_sched_scan_settings { 1094struct conf_sched_scan_settings {
1070 /* 1095 /*
@@ -1102,7 +1127,7 @@ struct conf_sched_scan_settings {
1102 1127
1103 /* SNR threshold to be used for filtering */ 1128 /* SNR threshold to be used for filtering */
1104 s8 snr_threshold; 1129 s8 snr_threshold;
1105}; 1130} __packed;
1106 1131
1107struct conf_ht_setting { 1132struct conf_ht_setting {
1108 u8 rx_ba_win_size; 1133 u8 rx_ba_win_size;
@@ -1111,7 +1136,7 @@ struct conf_ht_setting {
1111 1136
1112 /* bitmap of enabled TIDs for TX BA sessions */ 1137 /* bitmap of enabled TIDs for TX BA sessions */
1113 u8 tx_ba_tid_bitmap; 1138 u8 tx_ba_tid_bitmap;
1114}; 1139} __packed;
1115 1140
1116struct conf_memory_settings { 1141struct conf_memory_settings {
1117 /* Number of stations supported in IBSS mode */ 1142 /* Number of stations supported in IBSS mode */
@@ -1151,7 +1176,7 @@ struct conf_memory_settings {
1151 * Range: 0-120 1176 * Range: 0-120
1152 */ 1177 */
1153 u8 tx_min; 1178 u8 tx_min;
1154}; 1179} __packed;
1155 1180
1156struct conf_fm_coex { 1181struct conf_fm_coex {
1157 u8 enable; 1182 u8 enable;
@@ -1164,7 +1189,7 @@ struct conf_fm_coex {
1164 u16 ldo_stabilization_time; 1189 u16 ldo_stabilization_time;
1165 u8 fm_disturbed_band_margin; 1190 u8 fm_disturbed_band_margin;
1166 u8 swallow_clk_diff; 1191 u8 swallow_clk_diff;
1167}; 1192} __packed;
1168 1193
1169struct conf_rx_streaming_settings { 1194struct conf_rx_streaming_settings {
1170 /* 1195 /*
@@ -1193,7 +1218,7 @@ struct conf_rx_streaming_settings {
1193 * enable rx streaming also when there is no coex activity 1218 * enable rx streaming also when there is no coex activity
1194 */ 1219 */
1195 u8 always; 1220 u8 always;
1196}; 1221} __packed;
1197 1222
1198struct conf_fwlog { 1223struct conf_fwlog {
1199 /* Continuous or on-demand */ 1224 /* Continuous or on-demand */
@@ -1217,7 +1242,7 @@ struct conf_fwlog {
1217 1242
1218 /* Regulates the frequency of log messages */ 1243 /* Regulates the frequency of log messages */
1219 u8 threshold; 1244 u8 threshold;
1220}; 1245} __packed;
1221 1246
1222#define ACX_RATE_MGMT_NUM_OF_RATES 13 1247#define ACX_RATE_MGMT_NUM_OF_RATES 13
1223struct conf_rate_policy_settings { 1248struct conf_rate_policy_settings {
@@ -1236,7 +1261,7 @@ struct conf_rate_policy_settings {
1236 u8 rate_check_up; 1261 u8 rate_check_up;
1237 u8 rate_check_down; 1262 u8 rate_check_down;
1238 u8 rate_retry_policy[ACX_RATE_MGMT_NUM_OF_RATES]; 1263 u8 rate_retry_policy[ACX_RATE_MGMT_NUM_OF_RATES];
1239}; 1264} __packed;
1240 1265
1241struct conf_hangover_settings { 1266struct conf_hangover_settings {
1242 u32 recover_time; 1267 u32 recover_time;
@@ -1250,7 +1275,23 @@ struct conf_hangover_settings {
1250 u8 quiet_time; 1275 u8 quiet_time;
1251 u8 increase_time; 1276 u8 increase_time;
1252 u8 window_size; 1277 u8 window_size;
1253}; 1278} __packed;
1279
1280/*
1281 * The conf version consists of 4 bytes. The two MSB are the wlcore
1282 * version, the two LSB are the lower driver's private conf
1283 * version.
1284 */
1285#define WLCORE_CONF_VERSION (0x0002 << 16)
1286#define WLCORE_CONF_MASK 0xffff0000
1287#define WLCORE_CONF_SIZE (sizeof(struct wlcore_conf_header) + \
1288 sizeof(struct wlcore_conf))
1289
1290struct wlcore_conf_header {
1291 __le32 magic;
1292 __le32 version;
1293 __le32 checksum;
1294} __packed;
1254 1295
1255struct wlcore_conf { 1296struct wlcore_conf {
1256 struct conf_sg_settings sg; 1297 struct conf_sg_settings sg;
@@ -1269,6 +1310,12 @@ struct wlcore_conf {
1269 struct conf_fwlog fwlog; 1310 struct conf_fwlog fwlog;
1270 struct conf_rate_policy_settings rate; 1311 struct conf_rate_policy_settings rate;
1271 struct conf_hangover_settings hangover; 1312 struct conf_hangover_settings hangover;
1272}; 1313} __packed;
1314
1315struct wlcore_conf_file {
1316 struct wlcore_conf_header header;
1317 struct wlcore_conf core;
1318 u8 priv[0];
1319} __packed;
1273 1320
1274#endif 1321#endif
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index d5aea1ff5ad..80dbc5304fa 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -25,6 +25,7 @@
25 25
26#include <linux/skbuff.h> 26#include <linux/skbuff.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/module.h>
28 29
29#include "wlcore.h" 30#include "wlcore.h"
30#include "debug.h" 31#include "debug.h"
@@ -32,14 +33,16 @@
32#include "ps.h" 33#include "ps.h"
33#include "io.h" 34#include "io.h"
34#include "tx.h" 35#include "tx.h"
36#include "hw_ops.h"
35 37
36/* ms */ 38/* ms */
37#define WL1271_DEBUGFS_STATS_LIFETIME 1000 39#define WL1271_DEBUGFS_STATS_LIFETIME 1000
38 40
41#define WLCORE_MAX_BLOCK_SIZE ((size_t)(4*PAGE_SIZE))
42
39/* debugfs macros idea from mac80211 */ 43/* debugfs macros idea from mac80211 */
40#define DEBUGFS_FORMAT_BUFFER_SIZE 100 44int wl1271_format_buffer(char __user *userbuf, size_t count,
41static int wl1271_format_buffer(char __user *userbuf, size_t count, 45 loff_t *ppos, char *fmt, ...)
42 loff_t *ppos, char *fmt, ...)
43{ 46{
44 va_list args; 47 va_list args;
45 char buf[DEBUGFS_FORMAT_BUFFER_SIZE]; 48 char buf[DEBUGFS_FORMAT_BUFFER_SIZE];
@@ -51,59 +54,9 @@ static int wl1271_format_buffer(char __user *userbuf, size_t count,
51 54
52 return simple_read_from_buffer(userbuf, count, ppos, buf, res); 55 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
53} 56}
57EXPORT_SYMBOL_GPL(wl1271_format_buffer);
54 58
55#define DEBUGFS_READONLY_FILE(name, fmt, value...) \ 59void wl1271_debugfs_update_stats(struct wl1271 *wl)
56static ssize_t name## _read(struct file *file, char __user *userbuf, \
57 size_t count, loff_t *ppos) \
58{ \
59 struct wl1271 *wl = file->private_data; \
60 return wl1271_format_buffer(userbuf, count, ppos, \
61 fmt "\n", ##value); \
62} \
63 \
64static const struct file_operations name## _ops = { \
65 .read = name## _read, \
66 .open = simple_open, \
67 .llseek = generic_file_llseek, \
68};
69
70#define DEBUGFS_ADD(name, parent) \
71 entry = debugfs_create_file(#name, 0400, parent, \
72 wl, &name## _ops); \
73 if (!entry || IS_ERR(entry)) \
74 goto err; \
75
76#define DEBUGFS_ADD_PREFIX(prefix, name, parent) \
77 do { \
78 entry = debugfs_create_file(#name, 0400, parent, \
79 wl, &prefix## _## name## _ops); \
80 if (!entry || IS_ERR(entry)) \
81 goto err; \
82 } while (0);
83
84#define DEBUGFS_FWSTATS_FILE(sub, name, fmt) \
85static ssize_t sub## _ ##name## _read(struct file *file, \
86 char __user *userbuf, \
87 size_t count, loff_t *ppos) \
88{ \
89 struct wl1271 *wl = file->private_data; \
90 \
91 wl1271_debugfs_update_stats(wl); \
92 \
93 return wl1271_format_buffer(userbuf, count, ppos, fmt "\n", \
94 wl->stats.fw_stats->sub.name); \
95} \
96 \
97static const struct file_operations sub## _ ##name## _ops = { \
98 .read = sub## _ ##name## _read, \
99 .open = simple_open, \
100 .llseek = generic_file_llseek, \
101};
102
103#define DEBUGFS_FWSTATS_ADD(sub, name) \
104 DEBUGFS_ADD(sub## _ ##name, stats)
105
106static void wl1271_debugfs_update_stats(struct wl1271 *wl)
107{ 60{
108 int ret; 61 int ret;
109 62
@@ -125,97 +78,7 @@ static void wl1271_debugfs_update_stats(struct wl1271 *wl)
125out: 78out:
126 mutex_unlock(&wl->mutex); 79 mutex_unlock(&wl->mutex);
127} 80}
128 81EXPORT_SYMBOL_GPL(wl1271_debugfs_update_stats);
129DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, "%u");
130
131DEBUGFS_FWSTATS_FILE(rx, out_of_mem, "%u");
132DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, "%u");
133DEBUGFS_FWSTATS_FILE(rx, hw_stuck, "%u");
134DEBUGFS_FWSTATS_FILE(rx, dropped, "%u");
135DEBUGFS_FWSTATS_FILE(rx, fcs_err, "%u");
136DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, "%u");
137DEBUGFS_FWSTATS_FILE(rx, path_reset, "%u");
138DEBUGFS_FWSTATS_FILE(rx, reset_counter, "%u");
139
140DEBUGFS_FWSTATS_FILE(dma, rx_requested, "%u");
141DEBUGFS_FWSTATS_FILE(dma, rx_errors, "%u");
142DEBUGFS_FWSTATS_FILE(dma, tx_requested, "%u");
143DEBUGFS_FWSTATS_FILE(dma, tx_errors, "%u");
144
145DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, "%u");
146DEBUGFS_FWSTATS_FILE(isr, fiqs, "%u");
147DEBUGFS_FWSTATS_FILE(isr, rx_headers, "%u");
148DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, "%u");
149DEBUGFS_FWSTATS_FILE(isr, rx_rdys, "%u");
150DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
151DEBUGFS_FWSTATS_FILE(isr, tx_procs, "%u");
152DEBUGFS_FWSTATS_FILE(isr, decrypt_done, "%u");
153DEBUGFS_FWSTATS_FILE(isr, dma0_done, "%u");
154DEBUGFS_FWSTATS_FILE(isr, dma1_done, "%u");
155DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, "%u");
156DEBUGFS_FWSTATS_FILE(isr, commands, "%u");
157DEBUGFS_FWSTATS_FILE(isr, rx_procs, "%u");
158DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, "%u");
159DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, "%u");
160DEBUGFS_FWSTATS_FILE(isr, pci_pm, "%u");
161DEBUGFS_FWSTATS_FILE(isr, wakeups, "%u");
162DEBUGFS_FWSTATS_FILE(isr, low_rssi, "%u");
163
164DEBUGFS_FWSTATS_FILE(wep, addr_key_count, "%u");
165DEBUGFS_FWSTATS_FILE(wep, default_key_count, "%u");
166/* skipping wep.reserved */
167DEBUGFS_FWSTATS_FILE(wep, key_not_found, "%u");
168DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, "%u");
169DEBUGFS_FWSTATS_FILE(wep, packets, "%u");
170DEBUGFS_FWSTATS_FILE(wep, interrupt, "%u");
171
172DEBUGFS_FWSTATS_FILE(pwr, ps_enter, "%u");
173DEBUGFS_FWSTATS_FILE(pwr, elp_enter, "%u");
174DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, "%u");
175DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, "%u");
176DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, "%u");
177DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, "%u");
178DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, "%u");
179DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, "%u");
180DEBUGFS_FWSTATS_FILE(pwr, power_save_off, "%u");
181DEBUGFS_FWSTATS_FILE(pwr, enable_ps, "%u");
182DEBUGFS_FWSTATS_FILE(pwr, disable_ps, "%u");
183DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, "%u");
184/* skipping cont_miss_bcns_spread for now */
185DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, "%u");
186
187DEBUGFS_FWSTATS_FILE(mic, rx_pkts, "%u");
188DEBUGFS_FWSTATS_FILE(mic, calc_failure, "%u");
189
190DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, "%u");
191DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, "%u");
192DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, "%u");
193DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, "%u");
194DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, "%u");
195DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, "%u");
196
197DEBUGFS_FWSTATS_FILE(event, heart_beat, "%u");
198DEBUGFS_FWSTATS_FILE(event, calibration, "%u");
199DEBUGFS_FWSTATS_FILE(event, rx_mismatch, "%u");
200DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, "%u");
201DEBUGFS_FWSTATS_FILE(event, rx_pool, "%u");
202DEBUGFS_FWSTATS_FILE(event, oom_late, "%u");
203DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, "%u");
204DEBUGFS_FWSTATS_FILE(event, tx_stuck, "%u");
205
206DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, "%u");
207DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, "%u");
208DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, "%u");
209DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, "%u");
210DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, "%u");
211DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, "%u");
212DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, "%u");
213
214DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, "%u");
215DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, "%u");
216DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data, "%u");
217DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, "%u");
218DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, "%u");
219 82
220DEBUGFS_READONLY_FILE(retry_count, "%u", wl->stats.retry_count); 83DEBUGFS_READONLY_FILE(retry_count, "%u", wl->stats.retry_count);
221DEBUGFS_READONLY_FILE(excessive_retries, "%u", 84DEBUGFS_READONLY_FILE(excessive_retries, "%u",
@@ -241,6 +104,89 @@ static const struct file_operations tx_queue_len_ops = {
241 .llseek = default_llseek, 104 .llseek = default_llseek,
242}; 105};
243 106
107static void chip_op_handler(struct wl1271 *wl, unsigned long value,
108 void *arg)
109{
110 int ret;
111 int (*chip_op) (struct wl1271 *wl);
112
113 if (!arg) {
114 wl1271_warning("debugfs chip_op_handler with no callback");
115 return;
116 }
117
118 ret = wl1271_ps_elp_wakeup(wl);
119 if (ret < 0)
120 return;
121
122 chip_op = arg;
123 chip_op(wl);
124
125 wl1271_ps_elp_sleep(wl);
126}
127
128
129static inline void no_write_handler(struct wl1271 *wl,
130 unsigned long value,
131 unsigned long param)
132{
133}
134
135#define WL12XX_CONF_DEBUGFS(param, conf_sub_struct, \
136 min_val, max_val, write_handler_locked, \
137 write_handler_arg) \
138 static ssize_t param##_read(struct file *file, \
139 char __user *user_buf, \
140 size_t count, loff_t *ppos) \
141 { \
142 struct wl1271 *wl = file->private_data; \
143 return wl1271_format_buffer(user_buf, count, \
144 ppos, "%d\n", \
145 wl->conf.conf_sub_struct.param); \
146 } \
147 \
148 static ssize_t param##_write(struct file *file, \
149 const char __user *user_buf, \
150 size_t count, loff_t *ppos) \
151 { \
152 struct wl1271 *wl = file->private_data; \
153 unsigned long value; \
154 int ret; \
155 \
156 ret = kstrtoul_from_user(user_buf, count, 10, &value); \
157 if (ret < 0) { \
158 wl1271_warning("illegal value for " #param); \
159 return -EINVAL; \
160 } \
161 \
162 if (value < min_val || value > max_val) { \
163 wl1271_warning(#param " is not in valid range"); \
164 return -ERANGE; \
165 } \
166 \
167 mutex_lock(&wl->mutex); \
168 wl->conf.conf_sub_struct.param = value; \
169 \
170 write_handler_locked(wl, value, write_handler_arg); \
171 \
172 mutex_unlock(&wl->mutex); \
173 return count; \
174 } \
175 \
176 static const struct file_operations param##_ops = { \
177 .read = param##_read, \
178 .write = param##_write, \
179 .open = simple_open, \
180 .llseek = default_llseek, \
181 };
182
183WL12XX_CONF_DEBUGFS(irq_pkt_threshold, rx, 0, 65535,
184 chip_op_handler, wl1271_acx_init_rx_interrupt)
185WL12XX_CONF_DEBUGFS(irq_blk_threshold, rx, 0, 65535,
186 chip_op_handler, wl1271_acx_init_rx_interrupt)
187WL12XX_CONF_DEBUGFS(irq_timeout, rx, 0, 100,
188 chip_op_handler, wl1271_acx_init_rx_interrupt)
189
244static ssize_t gpio_power_read(struct file *file, char __user *user_buf, 190static ssize_t gpio_power_read(struct file *file, char __user *user_buf,
245 size_t count, loff_t *ppos) 191 size_t count, loff_t *ppos)
246{ 192{
@@ -535,8 +481,7 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
535 DRIVER_STATE_PRINT_LHEX(ap_ps_map); 481 DRIVER_STATE_PRINT_LHEX(ap_ps_map);
536 DRIVER_STATE_PRINT_HEX(quirks); 482 DRIVER_STATE_PRINT_HEX(quirks);
537 DRIVER_STATE_PRINT_HEX(irq); 483 DRIVER_STATE_PRINT_HEX(irq);
538 DRIVER_STATE_PRINT_HEX(ref_clock); 484 /* TODO: ref_clock and tcxo_clock were moved to wl12xx priv */
539 DRIVER_STATE_PRINT_HEX(tcxo_clock);
540 DRIVER_STATE_PRINT_HEX(hw_pg_ver); 485 DRIVER_STATE_PRINT_HEX(hw_pg_ver);
541 DRIVER_STATE_PRINT_HEX(platform_quirks); 486 DRIVER_STATE_PRINT_HEX(platform_quirks);
542 DRIVER_STATE_PRINT_HEX(chip.id); 487 DRIVER_STATE_PRINT_HEX(chip.id);
@@ -647,7 +592,6 @@ static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
647 VIF_STATE_PRINT_INT(last_rssi_event); 592 VIF_STATE_PRINT_INT(last_rssi_event);
648 VIF_STATE_PRINT_INT(ba_support); 593 VIF_STATE_PRINT_INT(ba_support);
649 VIF_STATE_PRINT_INT(ba_allowed); 594 VIF_STATE_PRINT_INT(ba_allowed);
650 VIF_STATE_PRINT_INT(is_gem);
651 VIF_STATE_PRINT_LLHEX(tx_security_seq); 595 VIF_STATE_PRINT_LLHEX(tx_security_seq);
652 VIF_STATE_PRINT_INT(tx_security_last_seq_lsb); 596 VIF_STATE_PRINT_INT(tx_security_last_seq_lsb);
653 } 597 }
@@ -1002,108 +946,281 @@ static const struct file_operations beacon_filtering_ops = {
1002 .llseek = default_llseek, 946 .llseek = default_llseek,
1003}; 947};
1004 948
1005static int wl1271_debugfs_add_files(struct wl1271 *wl, 949static ssize_t fw_stats_raw_read(struct file *file,
1006 struct dentry *rootdir) 950 char __user *userbuf,
951 size_t count, loff_t *ppos)
1007{ 952{
1008 int ret = 0; 953 struct wl1271 *wl = file->private_data;
1009 struct dentry *entry, *stats, *streaming;
1010 954
1011 stats = debugfs_create_dir("fw-statistics", rootdir); 955 wl1271_debugfs_update_stats(wl);
1012 if (!stats || IS_ERR(stats)) { 956
1013 entry = stats; 957 return simple_read_from_buffer(userbuf, count, ppos,
1014 goto err; 958 wl->stats.fw_stats,
959 wl->stats.fw_stats_len);
960}
961
962static const struct file_operations fw_stats_raw_ops = {
963 .read = fw_stats_raw_read,
964 .open = simple_open,
965 .llseek = default_llseek,
966};
967
968static ssize_t sleep_auth_read(struct file *file, char __user *user_buf,
969 size_t count, loff_t *ppos)
970{
971 struct wl1271 *wl = file->private_data;
972
973 return wl1271_format_buffer(user_buf, count,
974 ppos, "%d\n",
975 wl->sleep_auth);
976}
977
978static ssize_t sleep_auth_write(struct file *file,
979 const char __user *user_buf,
980 size_t count, loff_t *ppos)
981{
982 struct wl1271 *wl = file->private_data;
983 unsigned long value;
984 int ret;
985
986 ret = kstrtoul_from_user(user_buf, count, 0, &value);
987 if (ret < 0) {
988 wl1271_warning("illegal value in sleep_auth");
989 return -EINVAL;
990 }
991
992 if (value < 0 || value > WL1271_PSM_MAX) {
993 wl1271_warning("sleep_auth must be between 0 and %d",
994 WL1271_PSM_MAX);
995 return -ERANGE;
996 }
997
998 mutex_lock(&wl->mutex);
999
1000 wl->conf.conn.sta_sleep_auth = value;
1001
1002 if (wl->state == WL1271_STATE_OFF) {
1003 /* this will show up on "read" in case we are off */
1004 wl->sleep_auth = value;
1005 goto out;
1015 } 1006 }
1016 1007
1017 DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow); 1008 ret = wl1271_ps_elp_wakeup(wl);
1018 1009 if (ret < 0)
1019 DEBUGFS_FWSTATS_ADD(rx, out_of_mem); 1010 goto out;
1020 DEBUGFS_FWSTATS_ADD(rx, hdr_overflow); 1011
1021 DEBUGFS_FWSTATS_ADD(rx, hw_stuck); 1012 ret = wl1271_acx_sleep_auth(wl, value);
1022 DEBUGFS_FWSTATS_ADD(rx, dropped); 1013 if (ret < 0)
1023 DEBUGFS_FWSTATS_ADD(rx, fcs_err); 1014 goto out_sleep;
1024 DEBUGFS_FWSTATS_ADD(rx, xfr_hint_trig); 1015
1025 DEBUGFS_FWSTATS_ADD(rx, path_reset); 1016out_sleep:
1026 DEBUGFS_FWSTATS_ADD(rx, reset_counter); 1017 wl1271_ps_elp_sleep(wl);
1027 1018out:
1028 DEBUGFS_FWSTATS_ADD(dma, rx_requested); 1019 mutex_unlock(&wl->mutex);
1029 DEBUGFS_FWSTATS_ADD(dma, rx_errors); 1020 return count;
1030 DEBUGFS_FWSTATS_ADD(dma, tx_requested); 1021}
1031 DEBUGFS_FWSTATS_ADD(dma, tx_errors); 1022
1032 1023static const struct file_operations sleep_auth_ops = {
1033 DEBUGFS_FWSTATS_ADD(isr, cmd_cmplt); 1024 .read = sleep_auth_read,
1034 DEBUGFS_FWSTATS_ADD(isr, fiqs); 1025 .write = sleep_auth_write,
1035 DEBUGFS_FWSTATS_ADD(isr, rx_headers); 1026 .open = simple_open,
1036 DEBUGFS_FWSTATS_ADD(isr, rx_mem_overflow); 1027 .llseek = default_llseek,
1037 DEBUGFS_FWSTATS_ADD(isr, rx_rdys); 1028};
1038 DEBUGFS_FWSTATS_ADD(isr, irqs); 1029
1039 DEBUGFS_FWSTATS_ADD(isr, tx_procs); 1030static ssize_t dev_mem_read(struct file *file,
1040 DEBUGFS_FWSTATS_ADD(isr, decrypt_done); 1031 char __user *user_buf, size_t count,
1041 DEBUGFS_FWSTATS_ADD(isr, dma0_done); 1032 loff_t *ppos)
1042 DEBUGFS_FWSTATS_ADD(isr, dma1_done); 1033{
1043 DEBUGFS_FWSTATS_ADD(isr, tx_exch_complete); 1034 struct wl1271 *wl = file->private_data;
1044 DEBUGFS_FWSTATS_ADD(isr, commands); 1035 struct wlcore_partition_set part, old_part;
1045 DEBUGFS_FWSTATS_ADD(isr, rx_procs); 1036 size_t bytes = count;
1046 DEBUGFS_FWSTATS_ADD(isr, hw_pm_mode_changes); 1037 int ret;
1047 DEBUGFS_FWSTATS_ADD(isr, host_acknowledges); 1038 char *buf;
1048 DEBUGFS_FWSTATS_ADD(isr, pci_pm); 1039
1049 DEBUGFS_FWSTATS_ADD(isr, wakeups); 1040 /* only requests of dword-aligned size and offset are supported */
1050 DEBUGFS_FWSTATS_ADD(isr, low_rssi); 1041 if (bytes % 4)
1051 1042 return -EINVAL;
1052 DEBUGFS_FWSTATS_ADD(wep, addr_key_count); 1043
1053 DEBUGFS_FWSTATS_ADD(wep, default_key_count); 1044 if (*ppos % 4)
1054 /* skipping wep.reserved */ 1045 return -EINVAL;
1055 DEBUGFS_FWSTATS_ADD(wep, key_not_found); 1046
1056 DEBUGFS_FWSTATS_ADD(wep, decrypt_fail); 1047 /* function should return in reasonable time */
1057 DEBUGFS_FWSTATS_ADD(wep, packets); 1048 bytes = min(bytes, WLCORE_MAX_BLOCK_SIZE);
1058 DEBUGFS_FWSTATS_ADD(wep, interrupt); 1049
1059 1050 if (bytes == 0)
1060 DEBUGFS_FWSTATS_ADD(pwr, ps_enter); 1051 return -EINVAL;
1061 DEBUGFS_FWSTATS_ADD(pwr, elp_enter); 1052
1062 DEBUGFS_FWSTATS_ADD(pwr, missing_bcns); 1053 memset(&part, 0, sizeof(part));
1063 DEBUGFS_FWSTATS_ADD(pwr, wake_on_host); 1054 part.mem.start = file->f_pos;
1064 DEBUGFS_FWSTATS_ADD(pwr, wake_on_timer_exp); 1055 part.mem.size = bytes;
1065 DEBUGFS_FWSTATS_ADD(pwr, tx_with_ps); 1056
1066 DEBUGFS_FWSTATS_ADD(pwr, tx_without_ps); 1057 buf = kmalloc(bytes, GFP_KERNEL);
1067 DEBUGFS_FWSTATS_ADD(pwr, rcvd_beacons); 1058 if (!buf)
1068 DEBUGFS_FWSTATS_ADD(pwr, power_save_off); 1059 return -ENOMEM;
1069 DEBUGFS_FWSTATS_ADD(pwr, enable_ps); 1060
1070 DEBUGFS_FWSTATS_ADD(pwr, disable_ps); 1061 mutex_lock(&wl->mutex);
1071 DEBUGFS_FWSTATS_ADD(pwr, fix_tsf_ps); 1062
1072 /* skipping cont_miss_bcns_spread for now */ 1063 if (wl->state == WL1271_STATE_OFF) {
1073 DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_beacons); 1064 ret = -EFAULT;
1074 1065 goto skip_read;
1075 DEBUGFS_FWSTATS_ADD(mic, rx_pkts); 1066 }
1076 DEBUGFS_FWSTATS_ADD(mic, calc_failure); 1067
1077 1068 ret = wl1271_ps_elp_wakeup(wl);
1078 DEBUGFS_FWSTATS_ADD(aes, encrypt_fail); 1069 if (ret < 0)
1079 DEBUGFS_FWSTATS_ADD(aes, decrypt_fail); 1070 goto skip_read;
1080 DEBUGFS_FWSTATS_ADD(aes, encrypt_packets); 1071
1081 DEBUGFS_FWSTATS_ADD(aes, decrypt_packets); 1072 /* store current partition and switch partition */
1082 DEBUGFS_FWSTATS_ADD(aes, encrypt_interrupt); 1073 memcpy(&old_part, &wl->curr_part, sizeof(old_part));
1083 DEBUGFS_FWSTATS_ADD(aes, decrypt_interrupt); 1074 ret = wlcore_set_partition(wl, &part);
1084 1075 if (ret < 0)
1085 DEBUGFS_FWSTATS_ADD(event, heart_beat); 1076 goto part_err;
1086 DEBUGFS_FWSTATS_ADD(event, calibration); 1077
1087 DEBUGFS_FWSTATS_ADD(event, rx_mismatch); 1078 ret = wlcore_raw_read(wl, 0, buf, bytes, false);
1088 DEBUGFS_FWSTATS_ADD(event, rx_mem_empty); 1079 if (ret < 0)
1089 DEBUGFS_FWSTATS_ADD(event, rx_pool); 1080 goto read_err;
1090 DEBUGFS_FWSTATS_ADD(event, oom_late); 1081
1091 DEBUGFS_FWSTATS_ADD(event, phy_transmit_error); 1082read_err:
1092 DEBUGFS_FWSTATS_ADD(event, tx_stuck); 1083 /* recover partition */
1093 1084 ret = wlcore_set_partition(wl, &old_part);
1094 DEBUGFS_FWSTATS_ADD(ps, pspoll_timeouts); 1085 if (ret < 0)
1095 DEBUGFS_FWSTATS_ADD(ps, upsd_timeouts); 1086 goto part_err;
1096 DEBUGFS_FWSTATS_ADD(ps, upsd_max_sptime); 1087
1097 DEBUGFS_FWSTATS_ADD(ps, upsd_max_apturn); 1088part_err:
1098 DEBUGFS_FWSTATS_ADD(ps, pspoll_max_apturn); 1089 wl1271_ps_elp_sleep(wl);
1099 DEBUGFS_FWSTATS_ADD(ps, pspoll_utilization); 1090
1100 DEBUGFS_FWSTATS_ADD(ps, upsd_utilization); 1091skip_read:
1101 1092 mutex_unlock(&wl->mutex);
1102 DEBUGFS_FWSTATS_ADD(rxpipe, rx_prep_beacon_drop); 1093
1103 DEBUGFS_FWSTATS_ADD(rxpipe, descr_host_int_trig_rx_data); 1094 if (ret == 0) {
1104 DEBUGFS_FWSTATS_ADD(rxpipe, beacon_buffer_thres_host_int_trig_rx_data); 1095 ret = copy_to_user(user_buf, buf, bytes);
1105 DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data); 1096 if (ret < bytes) {
1106 DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data); 1097 bytes -= ret;
1098 *ppos += bytes;
1099 ret = 0;
1100 } else {
1101 ret = -EFAULT;
1102 }
1103 }
1104
1105 kfree(buf);
1106
1107 return ((ret == 0) ? bytes : ret);
1108}
1109
1110static ssize_t dev_mem_write(struct file *file, const char __user *user_buf,
1111 size_t count, loff_t *ppos)
1112{
1113 struct wl1271 *wl = file->private_data;
1114 struct wlcore_partition_set part, old_part;
1115 size_t bytes = count;
1116 int ret;
1117 char *buf;
1118
1119 /* only requests of dword-aligned size and offset are supported */
1120 if (bytes % 4)
1121 return -EINVAL;
1122
1123 if (*ppos % 4)
1124 return -EINVAL;
1125
1126 /* function should return in reasonable time */
1127 bytes = min(bytes, WLCORE_MAX_BLOCK_SIZE);
1128
1129 if (bytes == 0)
1130 return -EINVAL;
1131
1132 memset(&part, 0, sizeof(part));
1133 part.mem.start = file->f_pos;
1134 part.mem.size = bytes;
1135
1136 buf = kmalloc(bytes, GFP_KERNEL);
1137 if (!buf)
1138 return -ENOMEM;
1139
1140 ret = copy_from_user(buf, user_buf, bytes);
1141 if (ret) {
1142 ret = -EFAULT;
1143 goto err_out;
1144 }
1145
1146 mutex_lock(&wl->mutex);
1147
1148 if (wl->state == WL1271_STATE_OFF) {
1149 ret = -EFAULT;
1150 goto skip_write;
1151 }
1152
1153 ret = wl1271_ps_elp_wakeup(wl);
1154 if (ret < 0)
1155 goto skip_write;
1156
1157 /* store current partition and switch partition */
1158 memcpy(&old_part, &wl->curr_part, sizeof(old_part));
1159 ret = wlcore_set_partition(wl, &part);
1160 if (ret < 0)
1161 goto part_err;
1162
1163 ret = wlcore_raw_write(wl, 0, buf, bytes, false);
1164 if (ret < 0)
1165 goto write_err;
1166
1167write_err:
1168 /* recover partition */
1169 ret = wlcore_set_partition(wl, &old_part);
1170 if (ret < 0)
1171 goto part_err;
1172
1173part_err:
1174 wl1271_ps_elp_sleep(wl);
1175
1176skip_write:
1177 mutex_unlock(&wl->mutex);
1178
1179 if (ret == 0)
1180 *ppos += bytes;
1181
1182err_out:
1183 kfree(buf);
1184
1185 return ((ret == 0) ? bytes : ret);
1186}
1187
1188static loff_t dev_mem_seek(struct file *file, loff_t offset, int orig)
1189{
1190 loff_t ret;
1191
1192 /* only requests of dword-aligned size and offset are supported */
1193 if (offset % 4)
1194 return -EINVAL;
1195
1196 switch (orig) {
1197 case SEEK_SET:
1198 file->f_pos = offset;
1199 ret = file->f_pos;
1200 break;
1201 case SEEK_CUR:
1202 file->f_pos += offset;
1203 ret = file->f_pos;
1204 break;
1205 default:
1206 ret = -EINVAL;
1207 }
1208
1209 return ret;
1210}
1211
1212static const struct file_operations dev_mem_ops = {
1213 .open = simple_open,
1214 .read = dev_mem_read,
1215 .write = dev_mem_write,
1216 .llseek = dev_mem_seek,
1217};
1218
1219static int wl1271_debugfs_add_files(struct wl1271 *wl,
1220 struct dentry *rootdir)
1221{
1222 int ret = 0;
1223 struct dentry *entry, *streaming;
1107 1224
1108 DEBUGFS_ADD(tx_queue_len, rootdir); 1225 DEBUGFS_ADD(tx_queue_len, rootdir);
1109 DEBUGFS_ADD(retry_count, rootdir); 1226 DEBUGFS_ADD(retry_count, rootdir);
@@ -1120,6 +1237,11 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl,
1120 DEBUGFS_ADD(dynamic_ps_timeout, rootdir); 1237 DEBUGFS_ADD(dynamic_ps_timeout, rootdir);
1121 DEBUGFS_ADD(forced_ps, rootdir); 1238 DEBUGFS_ADD(forced_ps, rootdir);
1122 DEBUGFS_ADD(split_scan_timeout, rootdir); 1239 DEBUGFS_ADD(split_scan_timeout, rootdir);
1240 DEBUGFS_ADD(irq_pkt_threshold, rootdir);
1241 DEBUGFS_ADD(irq_blk_threshold, rootdir);
1242 DEBUGFS_ADD(irq_timeout, rootdir);
1243 DEBUGFS_ADD(fw_stats_raw, rootdir);
1244 DEBUGFS_ADD(sleep_auth, rootdir);
1123 1245
1124 streaming = debugfs_create_dir("rx_streaming", rootdir); 1246 streaming = debugfs_create_dir("rx_streaming", rootdir);
1125 if (!streaming || IS_ERR(streaming)) 1247 if (!streaming || IS_ERR(streaming))
@@ -1128,6 +1250,7 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl,
1128 DEBUGFS_ADD_PREFIX(rx_streaming, interval, streaming); 1250 DEBUGFS_ADD_PREFIX(rx_streaming, interval, streaming);
1129 DEBUGFS_ADD_PREFIX(rx_streaming, always, streaming); 1251 DEBUGFS_ADD_PREFIX(rx_streaming, always, streaming);
1130 1252
1253 DEBUGFS_ADD_PREFIX(dev, mem, rootdir);
1131 1254
1132 return 0; 1255 return 0;
1133 1256
@@ -1145,7 +1268,7 @@ void wl1271_debugfs_reset(struct wl1271 *wl)
1145 if (!wl->stats.fw_stats) 1268 if (!wl->stats.fw_stats)
1146 return; 1269 return;
1147 1270
1148 memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats)); 1271 memset(wl->stats.fw_stats, 0, wl->stats.fw_stats_len);
1149 wl->stats.retry_count = 0; 1272 wl->stats.retry_count = 0;
1150 wl->stats.excessive_retries = 0; 1273 wl->stats.excessive_retries = 0;
1151} 1274}
@@ -1160,34 +1283,34 @@ int wl1271_debugfs_init(struct wl1271 *wl)
1160 1283
1161 if (IS_ERR(rootdir)) { 1284 if (IS_ERR(rootdir)) {
1162 ret = PTR_ERR(rootdir); 1285 ret = PTR_ERR(rootdir);
1163 goto err; 1286 goto out;
1164 } 1287 }
1165 1288
1166 wl->stats.fw_stats = kzalloc(sizeof(*wl->stats.fw_stats), 1289 wl->stats.fw_stats = kzalloc(wl->stats.fw_stats_len, GFP_KERNEL);
1167 GFP_KERNEL);
1168
1169 if (!wl->stats.fw_stats) { 1290 if (!wl->stats.fw_stats) {
1170 ret = -ENOMEM; 1291 ret = -ENOMEM;
1171 goto err_fw; 1292 goto out_remove;
1172 } 1293 }
1173 1294
1174 wl->stats.fw_stats_update = jiffies; 1295 wl->stats.fw_stats_update = jiffies;
1175 1296
1176 ret = wl1271_debugfs_add_files(wl, rootdir); 1297 ret = wl1271_debugfs_add_files(wl, rootdir);
1298 if (ret < 0)
1299 goto out_exit;
1177 1300
1301 ret = wlcore_debugfs_init(wl, rootdir);
1178 if (ret < 0) 1302 if (ret < 0)
1179 goto err_file; 1303 goto out_exit;
1180 1304
1181 return 0; 1305 goto out;
1182 1306
1183err_file: 1307out_exit:
1184 kfree(wl->stats.fw_stats); 1308 wl1271_debugfs_exit(wl);
1185 wl->stats.fw_stats = NULL;
1186 1309
1187err_fw: 1310out_remove:
1188 debugfs_remove_recursive(rootdir); 1311 debugfs_remove_recursive(rootdir);
1189 1312
1190err: 1313out:
1191 return ret; 1314 return ret;
1192} 1315}
1193 1316
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
index a8d3aef011f..f7381dd6900 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.h
+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
@@ -26,8 +26,95 @@
26 26
27#include "wlcore.h" 27#include "wlcore.h"
28 28
29int wl1271_format_buffer(char __user *userbuf, size_t count,
30 loff_t *ppos, char *fmt, ...);
31
29int wl1271_debugfs_init(struct wl1271 *wl); 32int wl1271_debugfs_init(struct wl1271 *wl);
30void wl1271_debugfs_exit(struct wl1271 *wl); 33void wl1271_debugfs_exit(struct wl1271 *wl);
31void wl1271_debugfs_reset(struct wl1271 *wl); 34void wl1271_debugfs_reset(struct wl1271 *wl);
35void wl1271_debugfs_update_stats(struct wl1271 *wl);
36
37#define DEBUGFS_FORMAT_BUFFER_SIZE 256
38
39#define DEBUGFS_READONLY_FILE(name, fmt, value...) \
40static ssize_t name## _read(struct file *file, char __user *userbuf, \
41 size_t count, loff_t *ppos) \
42{ \
43 struct wl1271 *wl = file->private_data; \
44 return wl1271_format_buffer(userbuf, count, ppos, \
45 fmt "\n", ##value); \
46} \
47 \
48static const struct file_operations name## _ops = { \
49 .read = name## _read, \
50 .open = simple_open, \
51 .llseek = generic_file_llseek, \
52};
53
54#define DEBUGFS_ADD(name, parent) \
55 do { \
56 entry = debugfs_create_file(#name, 0400, parent, \
57 wl, &name## _ops); \
58 if (!entry || IS_ERR(entry)) \
59 goto err; \
60 } while (0);
61
62
63#define DEBUGFS_ADD_PREFIX(prefix, name, parent) \
64 do { \
65 entry = debugfs_create_file(#name, 0400, parent, \
66 wl, &prefix## _## name## _ops); \
67 if (!entry || IS_ERR(entry)) \
68 goto err; \
69 } while (0);
70
71#define DEBUGFS_FWSTATS_FILE(sub, name, fmt, struct_type) \
72static ssize_t sub## _ ##name## _read(struct file *file, \
73 char __user *userbuf, \
74 size_t count, loff_t *ppos) \
75{ \
76 struct wl1271 *wl = file->private_data; \
77 struct struct_type *stats = wl->stats.fw_stats; \
78 \
79 wl1271_debugfs_update_stats(wl); \
80 \
81 return wl1271_format_buffer(userbuf, count, ppos, fmt "\n", \
82 stats->sub.name); \
83} \
84 \
85static const struct file_operations sub## _ ##name## _ops = { \
86 .read = sub## _ ##name## _read, \
87 .open = simple_open, \
88 .llseek = generic_file_llseek, \
89};
90
91#define DEBUGFS_FWSTATS_FILE_ARRAY(sub, name, len, struct_type) \
92static ssize_t sub## _ ##name## _read(struct file *file, \
93 char __user *userbuf, \
94 size_t count, loff_t *ppos) \
95{ \
96 struct wl1271 *wl = file->private_data; \
97 struct struct_type *stats = wl->stats.fw_stats; \
98 char buf[DEBUGFS_FORMAT_BUFFER_SIZE] = ""; \
99 int res, i; \
100 \
101 wl1271_debugfs_update_stats(wl); \
102 \
103 for (i = 0; i < len; i++) \
104 res = snprintf(buf, sizeof(buf), "%s[%d] = %d\n", \
105 buf, i, stats->sub.name[i]); \
106 \
107 return wl1271_format_buffer(userbuf, count, ppos, "%s", buf); \
108} \
109 \
110static const struct file_operations sub## _ ##name## _ops = { \
111 .read = sub## _ ##name## _read, \
112 .open = simple_open, \
113 .llseek = generic_file_llseek, \
114};
115
116#define DEBUGFS_FWSTATS_ADD(sub, name) \
117 DEBUGFS_ADD(sub## _ ##name, stats)
118
32 119
33#endif /* WL1271_DEBUGFS_H */ 120#endif /* WL1271_DEBUGFS_H */
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 28e2a633c3b..48907054d49 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -105,6 +105,7 @@ static int wl1271_event_process(struct wl1271 *wl)
105 u32 vector; 105 u32 vector;
106 bool disconnect_sta = false; 106 bool disconnect_sta = false;
107 unsigned long sta_bitmap = 0; 107 unsigned long sta_bitmap = 0;
108 int ret;
108 109
109 wl1271_event_mbox_dump(mbox); 110 wl1271_event_mbox_dump(mbox);
110 111
@@ -148,15 +149,33 @@ static int wl1271_event_process(struct wl1271 *wl)
148 int delay = wl->conf.conn.synch_fail_thold * 149 int delay = wl->conf.conn.synch_fail_thold *
149 wl->conf.conn.bss_lose_timeout; 150 wl->conf.conn.bss_lose_timeout;
150 wl1271_info("Beacon loss detected."); 151 wl1271_info("Beacon loss detected.");
151 cancel_delayed_work_sync(&wl->connection_loss_work); 152
153 /*
154 * if the work is already queued, it should take place. We
155 * don't want to delay the connection loss indication
156 * any more.
157 */
152 ieee80211_queue_delayed_work(wl->hw, &wl->connection_loss_work, 158 ieee80211_queue_delayed_work(wl->hw, &wl->connection_loss_work,
153 msecs_to_jiffies(delay)); 159 msecs_to_jiffies(delay));
160
161 wl12xx_for_each_wlvif_sta(wl, wlvif) {
162 vif = wl12xx_wlvif_to_vif(wlvif);
163
164 ieee80211_cqm_rssi_notify(
165 vif,
166 NL80211_CQM_RSSI_BEACON_LOSS_EVENT,
167 GFP_KERNEL);
168 }
154 } 169 }
155 170
156 if (vector & REGAINED_BSS_EVENT_ID) { 171 if (vector & REGAINED_BSS_EVENT_ID) {
157 /* TODO: check for multi-role */ 172 /* TODO: check for multi-role */
158 wl1271_info("Beacon regained."); 173 wl1271_info("Beacon regained.");
159 cancel_delayed_work_sync(&wl->connection_loss_work); 174 cancel_delayed_work(&wl->connection_loss_work);
175
176 /* sanity check - we can't lose and gain the beacon together */
177 WARN(vector & BSS_LOSE_EVENT_ID,
178 "Concurrent beacon loss and gain from FW");
160 } 179 }
161 180
162 if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) { 181 if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
@@ -210,7 +229,9 @@ static int wl1271_event_process(struct wl1271 *wl)
210 229
211 if ((vector & DUMMY_PACKET_EVENT_ID)) { 230 if ((vector & DUMMY_PACKET_EVENT_ID)) {
212 wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID"); 231 wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
213 wl1271_tx_dummy_packet(wl); 232 ret = wl1271_tx_dummy_packet(wl);
233 if (ret < 0)
234 return ret;
214 } 235 }
215 236
216 /* 237 /*
@@ -283,8 +304,10 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
283 return -EINVAL; 304 return -EINVAL;
284 305
285 /* first we read the mbox descriptor */ 306 /* first we read the mbox descriptor */
286 wl1271_read(wl, wl->mbox_ptr[mbox_num], wl->mbox, 307 ret = wlcore_read(wl, wl->mbox_ptr[mbox_num], wl->mbox,
287 sizeof(*wl->mbox), false); 308 sizeof(*wl->mbox), false);
309 if (ret < 0)
310 return ret;
288 311
289 /* process the descriptor */ 312 /* process the descriptor */
290 ret = wl1271_event_process(wl); 313 ret = wl1271_event_process(wl);
@@ -295,7 +318,7 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
295 * TODO: we just need this because one bit is in a different 318 * TODO: we just need this because one bit is in a different
296 * place. Is there any better way? 319 * place. Is there any better way?
297 */ 320 */
298 wl->ops->ack_event(wl); 321 ret = wl->ops->ack_event(wl);
299 322
300 return 0; 323 return ret;
301} 324}
diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h
index 9384b4d56c2..2673d783ec1 100644
--- a/drivers/net/wireless/ti/wlcore/hw_ops.h
+++ b/drivers/net/wireless/ti/wlcore/hw_ops.h
@@ -65,11 +65,13 @@ wlcore_hw_get_rx_buf_align(struct wl1271 *wl, u32 rx_desc)
65 return wl->ops->get_rx_buf_align(wl, rx_desc); 65 return wl->ops->get_rx_buf_align(wl, rx_desc);
66} 66}
67 67
68static inline void 68static inline int
69wlcore_hw_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len) 69wlcore_hw_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
70{ 70{
71 if (wl->ops->prepare_read) 71 if (wl->ops->prepare_read)
72 wl->ops->prepare_read(wl, rx_desc, len); 72 return wl->ops->prepare_read(wl, rx_desc, len);
73
74 return 0;
73} 75}
74 76
75static inline u32 77static inline u32
@@ -81,10 +83,12 @@ wlcore_hw_get_rx_packet_len(struct wl1271 *wl, void *rx_data, u32 data_len)
81 return wl->ops->get_rx_packet_len(wl, rx_data, data_len); 83 return wl->ops->get_rx_packet_len(wl, rx_data, data_len);
82} 84}
83 85
84static inline void wlcore_hw_tx_delayed_compl(struct wl1271 *wl) 86static inline int wlcore_hw_tx_delayed_compl(struct wl1271 *wl)
85{ 87{
86 if (wl->ops->tx_delayed_compl) 88 if (wl->ops->tx_delayed_compl)
87 wl->ops->tx_delayed_compl(wl); 89 return wl->ops->tx_delayed_compl(wl);
90
91 return 0;
88} 92}
89 93
90static inline void wlcore_hw_tx_immediate_compl(struct wl1271 *wl) 94static inline void wlcore_hw_tx_immediate_compl(struct wl1271 *wl)
@@ -119,4 +123,82 @@ static inline int wlcore_identify_fw(struct wl1271 *wl)
119 return 0; 123 return 0;
120} 124}
121 125
126static inline void
127wlcore_hw_set_tx_desc_csum(struct wl1271 *wl,
128 struct wl1271_tx_hw_descr *desc,
129 struct sk_buff *skb)
130{
131 if (!wl->ops->set_tx_desc_csum)
132 BUG_ON(1);
133
134 wl->ops->set_tx_desc_csum(wl, desc, skb);
135}
136
137static inline void
138wlcore_hw_set_rx_csum(struct wl1271 *wl,
139 struct wl1271_rx_descriptor *desc,
140 struct sk_buff *skb)
141{
142 if (wl->ops->set_rx_csum)
143 wl->ops->set_rx_csum(wl, desc, skb);
144}
145
146static inline u32
147wlcore_hw_ap_get_mimo_wide_rate_mask(struct wl1271 *wl,
148 struct wl12xx_vif *wlvif)
149{
150 if (wl->ops->ap_get_mimo_wide_rate_mask)
151 return wl->ops->ap_get_mimo_wide_rate_mask(wl, wlvif);
152
153 return 0;
154}
155
156static inline int
157wlcore_debugfs_init(struct wl1271 *wl, struct dentry *rootdir)
158{
159 if (wl->ops->debugfs_init)
160 return wl->ops->debugfs_init(wl, rootdir);
161
162 return 0;
163}
164
165static inline int
166wlcore_handle_static_data(struct wl1271 *wl, void *static_data)
167{
168 if (wl->ops->handle_static_data)
169 return wl->ops->handle_static_data(wl, static_data);
170
171 return 0;
172}
173
174static inline int
175wlcore_hw_get_spare_blocks(struct wl1271 *wl, bool is_gem)
176{
177 if (!wl->ops->get_spare_blocks)
178 BUG_ON(1);
179
180 return wl->ops->get_spare_blocks(wl, is_gem);
181}
182
183static inline int
184wlcore_hw_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
185 struct ieee80211_vif *vif,
186 struct ieee80211_sta *sta,
187 struct ieee80211_key_conf *key_conf)
188{
189 if (!wl->ops->set_key)
190 BUG_ON(1);
191
192 return wl->ops->set_key(wl, cmd, vif, sta, key_conf);
193}
194
195static inline u32
196wlcore_hw_pre_pkt_send(struct wl1271 *wl, u32 buf_offset, u32 last_len)
197{
198 if (wl->ops->pre_pkt_send)
199 return wl->ops->pre_pkt_send(wl, buf_offset, last_len);
200
201 return buf_offset;
202}
203
122#endif 204#endif
diff --git a/drivers/net/wireless/ti/wlcore/ini.h b/drivers/net/wireless/ti/wlcore/ini.h
index 4cf9ecc5621..d24fe3bbc67 100644
--- a/drivers/net/wireless/ti/wlcore/ini.h
+++ b/drivers/net/wireless/ti/wlcore/ini.h
@@ -172,7 +172,19 @@ struct wl128x_ini_fem_params_5 {
172 172
173/* NVS data structure */ 173/* NVS data structure */
174#define WL1271_INI_NVS_SECTION_SIZE 468 174#define WL1271_INI_NVS_SECTION_SIZE 468
175#define WL1271_INI_FEM_MODULE_COUNT 2 175
176/* We have four FEM module types: 0-RFMD, 1-TQS, 2-SKW, 3-TQS_HP */
177#define WL1271_INI_FEM_MODULE_COUNT 4
178
179/*
180 * In NVS we only store two FEM module entries -
181 * FEM modules 0,2,3 are stored in entry 0
182 * FEM module 1 is stored in entry 1
183 */
184#define WL12XX_NVS_FEM_MODULE_COUNT 2
185
186#define WL12XX_FEM_TO_NVS_ENTRY(ini_fem_module) \
187 ((ini_fem_module) == 1 ? 1 : 0)
176 188
177#define WL1271_INI_LEGACY_NVS_FILE_SIZE 800 189#define WL1271_INI_LEGACY_NVS_FILE_SIZE 800
178 190
@@ -188,13 +200,13 @@ struct wl1271_nvs_file {
188 struct { 200 struct {
189 struct wl1271_ini_fem_params_2 params; 201 struct wl1271_ini_fem_params_2 params;
190 u8 padding; 202 u8 padding;
191 } dyn_radio_params_2[WL1271_INI_FEM_MODULE_COUNT]; 203 } dyn_radio_params_2[WL12XX_NVS_FEM_MODULE_COUNT];
192 struct wl1271_ini_band_params_5 stat_radio_params_5; 204 struct wl1271_ini_band_params_5 stat_radio_params_5;
193 u8 padding3; 205 u8 padding3;
194 struct { 206 struct {
195 struct wl1271_ini_fem_params_5 params; 207 struct wl1271_ini_fem_params_5 params;
196 u8 padding; 208 u8 padding;
197 } dyn_radio_params_5[WL1271_INI_FEM_MODULE_COUNT]; 209 } dyn_radio_params_5[WL12XX_NVS_FEM_MODULE_COUNT];
198} __packed; 210} __packed;
199 211
200struct wl128x_nvs_file { 212struct wl128x_nvs_file {
@@ -209,12 +221,12 @@ struct wl128x_nvs_file {
209 struct { 221 struct {
210 struct wl128x_ini_fem_params_2 params; 222 struct wl128x_ini_fem_params_2 params;
211 u8 padding; 223 u8 padding;
212 } dyn_radio_params_2[WL1271_INI_FEM_MODULE_COUNT]; 224 } dyn_radio_params_2[WL12XX_NVS_FEM_MODULE_COUNT];
213 struct wl128x_ini_band_params_5 stat_radio_params_5; 225 struct wl128x_ini_band_params_5 stat_radio_params_5;
214 u8 padding3; 226 u8 padding3;
215 struct { 227 struct {
216 struct wl128x_ini_fem_params_5 params; 228 struct wl128x_ini_fem_params_5 params;
217 u8 padding; 229 u8 padding;
218 } dyn_radio_params_5[WL1271_INI_FEM_MODULE_COUNT]; 230 } dyn_radio_params_5[WL12XX_NVS_FEM_MODULE_COUNT];
219} __packed; 231} __packed;
220#endif 232#endif
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index 9f89255eb6e..8a8a8971bef 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -460,6 +460,9 @@ int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif)
460 /* unconditionally enable HT rates */ 460 /* unconditionally enable HT rates */
461 supported_rates |= CONF_TX_MCS_RATES; 461 supported_rates |= CONF_TX_MCS_RATES;
462 462
463 /* get extra MIMO or wide-chan rates where the HW supports it */
464 supported_rates |= wlcore_hw_ap_get_mimo_wide_rate_mask(wl, wlvif);
465
463 /* configure unicast TX rate classes */ 466 /* configure unicast TX rate classes */
464 for (i = 0; i < wl->conf.tx.ac_conf_count; i++) { 467 for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
465 rc.enabled_rates = supported_rates; 468 rc.enabled_rates = supported_rates;
@@ -551,29 +554,28 @@ int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
551 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); 554 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
552 int ret, i; 555 int ret, i;
553 556
554 /* 557 /* consider all existing roles before configuring psm. */
555 * consider all existing roles before configuring psm. 558
556 * TODO: reconfigure on interface removal. 559 if (wl->ap_count == 0 && is_ap) { /* first AP */
557 */ 560 /* Configure for power always on */
558 if (!wl->ap_count) { 561 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
559 if (is_ap) { 562 if (ret < 0)
560 /* Configure for power always on */ 563 return ret;
564 /* first STA, no APs */
565 } else if (wl->sta_count == 0 && wl->ap_count == 0 && !is_ap) {
566 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
567 /* Configure for power according to debugfs */
568 if (sta_auth != WL1271_PSM_ILLEGAL)
569 ret = wl1271_acx_sleep_auth(wl, sta_auth);
570 /* Configure for power always on */
571 else if (wl->quirks & WLCORE_QUIRK_NO_ELP)
561 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM); 572 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
562 if (ret < 0) 573 /* Configure for ELP power saving */
563 return ret; 574 else
564 } else if (!wl->sta_count) { 575 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
565 if (wl->quirks & WLCORE_QUIRK_NO_ELP) { 576
566 /* Configure for power always on */ 577 if (ret < 0)
567 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM); 578 return ret;
568 if (ret < 0)
569 return ret;
570 } else {
571 /* Configure for ELP power saving */
572 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
573 if (ret < 0)
574 return ret;
575 }
576 }
577 } 579 }
578 580
579 /* Mode specific init */ 581 /* Mode specific init */
diff --git a/drivers/net/wireless/ti/wlcore/io.c b/drivers/net/wireless/ti/wlcore/io.c
index 7cd0081aede..9976219c4e4 100644
--- a/drivers/net/wireless/ti/wlcore/io.c
+++ b/drivers/net/wireless/ti/wlcore/io.c
@@ -48,6 +48,12 @@ void wlcore_disable_interrupts(struct wl1271 *wl)
48} 48}
49EXPORT_SYMBOL_GPL(wlcore_disable_interrupts); 49EXPORT_SYMBOL_GPL(wlcore_disable_interrupts);
50 50
51void wlcore_disable_interrupts_nosync(struct wl1271 *wl)
52{
53 disable_irq_nosync(wl->irq);
54}
55EXPORT_SYMBOL_GPL(wlcore_disable_interrupts_nosync);
56
51void wlcore_enable_interrupts(struct wl1271 *wl) 57void wlcore_enable_interrupts(struct wl1271 *wl)
52{ 58{
53 enable_irq(wl->irq); 59 enable_irq(wl->irq);
@@ -122,9 +128,11 @@ EXPORT_SYMBOL_GPL(wlcore_translate_addr);
122 * | | 128 * | |
123 * 129 *
124 */ 130 */
125void wlcore_set_partition(struct wl1271 *wl, 131int wlcore_set_partition(struct wl1271 *wl,
126 const struct wlcore_partition_set *p) 132 const struct wlcore_partition_set *p)
127{ 133{
134 int ret;
135
128 /* copy partition info */ 136 /* copy partition info */
129 memcpy(&wl->curr_part, p, sizeof(*p)); 137 memcpy(&wl->curr_part, p, sizeof(*p));
130 138
@@ -137,28 +145,41 @@ void wlcore_set_partition(struct wl1271 *wl,
137 wl1271_debug(DEBUG_IO, "mem3_start %08X mem3_size %08X", 145 wl1271_debug(DEBUG_IO, "mem3_start %08X mem3_size %08X",
138 p->mem3.start, p->mem3.size); 146 p->mem3.start, p->mem3.size);
139 147
140 wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start); 148 ret = wlcore_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
141 wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size); 149 if (ret < 0)
142 wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start); 150 goto out;
143 wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size); 151
144 wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start); 152 ret = wlcore_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
145 wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size); 153 if (ret < 0)
154 goto out;
155
156 ret = wlcore_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
157 if (ret < 0)
158 goto out;
159
160 ret = wlcore_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
161 if (ret < 0)
162 goto out;
163
164 ret = wlcore_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
165 if (ret < 0)
166 goto out;
167
168 ret = wlcore_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
169 if (ret < 0)
170 goto out;
171
146 /* 172 /*
147 * We don't need the size of the last partition, as it is 173 * We don't need the size of the last partition, as it is
148 * automatically calculated based on the total memory size and 174 * automatically calculated based on the total memory size and
149 * the sizes of the previous partitions. 175 * the sizes of the previous partitions.
150 */ 176 */
151 wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start); 177 ret = wlcore_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
152}
153EXPORT_SYMBOL_GPL(wlcore_set_partition);
154 178
155void wlcore_select_partition(struct wl1271 *wl, u8 part) 179out:
156{ 180 return ret;
157 wl1271_debug(DEBUG_IO, "setting partition %d", part);
158
159 wlcore_set_partition(wl, &wl->ptable[part]);
160} 181}
161EXPORT_SYMBOL_GPL(wlcore_select_partition); 182EXPORT_SYMBOL_GPL(wlcore_set_partition);
162 183
163void wl1271_io_reset(struct wl1271 *wl) 184void wl1271_io_reset(struct wl1271 *wl)
164{ 185{
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 8942954b56a..fef80adc8bf 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -45,6 +45,7 @@
45struct wl1271; 45struct wl1271;
46 46
47void wlcore_disable_interrupts(struct wl1271 *wl); 47void wlcore_disable_interrupts(struct wl1271 *wl);
48void wlcore_disable_interrupts_nosync(struct wl1271 *wl);
48void wlcore_enable_interrupts(struct wl1271 *wl); 49void wlcore_enable_interrupts(struct wl1271 *wl);
49 50
50void wl1271_io_reset(struct wl1271 *wl); 51void wl1271_io_reset(struct wl1271 *wl);
@@ -52,79 +53,113 @@ void wl1271_io_init(struct wl1271 *wl);
52int wlcore_translate_addr(struct wl1271 *wl, int addr); 53int wlcore_translate_addr(struct wl1271 *wl, int addr);
53 54
54/* Raw target IO, address is not translated */ 55/* Raw target IO, address is not translated */
55static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf, 56static inline int __must_check wlcore_raw_write(struct wl1271 *wl, int addr,
56 size_t len, bool fixed) 57 void *buf, size_t len,
58 bool fixed)
57{ 59{
58 wl->if_ops->write(wl->dev, addr, buf, len, fixed); 60 int ret;
61
62 if (test_bit(WL1271_FLAG_SDIO_FAILED, &wl->flags))
63 return -EIO;
64
65 ret = wl->if_ops->write(wl->dev, addr, buf, len, fixed);
66 if (ret)
67 set_bit(WL1271_FLAG_SDIO_FAILED, &wl->flags);
68
69 return ret;
59} 70}
60 71
61static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf, 72static inline int __must_check wlcore_raw_read(struct wl1271 *wl, int addr,
62 size_t len, bool fixed) 73 void *buf, size_t len,
74 bool fixed)
63{ 75{
64 wl->if_ops->read(wl->dev, addr, buf, len, fixed); 76 int ret;
77
78 if (test_bit(WL1271_FLAG_SDIO_FAILED, &wl->flags))
79 return -EIO;
80
81 ret = wl->if_ops->read(wl->dev, addr, buf, len, fixed);
82 if (ret)
83 set_bit(WL1271_FLAG_SDIO_FAILED, &wl->flags);
84
85 return ret;
65} 86}
66 87
67static inline void wlcore_raw_read_data(struct wl1271 *wl, int reg, void *buf, 88static inline int __must_check wlcore_raw_read_data(struct wl1271 *wl, int reg,
68 size_t len, bool fixed) 89 void *buf, size_t len,
90 bool fixed)
69{ 91{
70 wl1271_raw_read(wl, wl->rtable[reg], buf, len, fixed); 92 return wlcore_raw_read(wl, wl->rtable[reg], buf, len, fixed);
71} 93}
72 94
73static inline void wlcore_raw_write_data(struct wl1271 *wl, int reg, void *buf, 95static inline int __must_check wlcore_raw_write_data(struct wl1271 *wl, int reg,
74 size_t len, bool fixed) 96 void *buf, size_t len,
97 bool fixed)
75{ 98{
76 wl1271_raw_write(wl, wl->rtable[reg], buf, len, fixed); 99 return wlcore_raw_write(wl, wl->rtable[reg], buf, len, fixed);
77} 100}
78 101
79static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr) 102static inline int __must_check wlcore_raw_read32(struct wl1271 *wl, int addr,
103 u32 *val)
80{ 104{
81 wl1271_raw_read(wl, addr, &wl->buffer_32, 105 int ret;
82 sizeof(wl->buffer_32), false); 106
107 ret = wlcore_raw_read(wl, addr, &wl->buffer_32,
108 sizeof(wl->buffer_32), false);
109 if (ret < 0)
110 return ret;
111
112 if (val)
113 *val = le32_to_cpu(wl->buffer_32);
83 114
84 return le32_to_cpu(wl->buffer_32); 115 return 0;
85} 116}
86 117
87static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val) 118static inline int __must_check wlcore_raw_write32(struct wl1271 *wl, int addr,
119 u32 val)
88{ 120{
89 wl->buffer_32 = cpu_to_le32(val); 121 wl->buffer_32 = cpu_to_le32(val);
90 wl1271_raw_write(wl, addr, &wl->buffer_32, 122 return wlcore_raw_write(wl, addr, &wl->buffer_32,
91 sizeof(wl->buffer_32), false); 123 sizeof(wl->buffer_32), false);
92} 124}
93 125
94static inline void wl1271_read(struct wl1271 *wl, int addr, void *buf, 126static inline int __must_check wlcore_read(struct wl1271 *wl, int addr,
95 size_t len, bool fixed) 127 void *buf, size_t len, bool fixed)
96{ 128{
97 int physical; 129 int physical;
98 130
99 physical = wlcore_translate_addr(wl, addr); 131 physical = wlcore_translate_addr(wl, addr);
100 132
101 wl1271_raw_read(wl, physical, buf, len, fixed); 133 return wlcore_raw_read(wl, physical, buf, len, fixed);
102} 134}
103 135
104static inline void wl1271_write(struct wl1271 *wl, int addr, void *buf, 136static inline int __must_check wlcore_write(struct wl1271 *wl, int addr,
105 size_t len, bool fixed) 137 void *buf, size_t len, bool fixed)
106{ 138{
107 int physical; 139 int physical;
108 140
109 physical = wlcore_translate_addr(wl, addr); 141 physical = wlcore_translate_addr(wl, addr);
110 142
111 wl1271_raw_write(wl, physical, buf, len, fixed); 143 return wlcore_raw_write(wl, physical, buf, len, fixed);
112} 144}
113 145
114static inline void wlcore_write_data(struct wl1271 *wl, int reg, void *buf, 146static inline int __must_check wlcore_write_data(struct wl1271 *wl, int reg,
115 size_t len, bool fixed) 147 void *buf, size_t len,
148 bool fixed)
116{ 149{
117 wl1271_write(wl, wl->rtable[reg], buf, len, fixed); 150 return wlcore_write(wl, wl->rtable[reg], buf, len, fixed);
118} 151}
119 152
120static inline void wlcore_read_data(struct wl1271 *wl, int reg, void *buf, 153static inline int __must_check wlcore_read_data(struct wl1271 *wl, int reg,
121 size_t len, bool fixed) 154 void *buf, size_t len,
155 bool fixed)
122{ 156{
123 wl1271_read(wl, wl->rtable[reg], buf, len, fixed); 157 return wlcore_read(wl, wl->rtable[reg], buf, len, fixed);
124} 158}
125 159
126static inline void wl1271_read_hwaddr(struct wl1271 *wl, int hwaddr, 160static inline int __must_check wlcore_read_hwaddr(struct wl1271 *wl, int hwaddr,
127 void *buf, size_t len, bool fixed) 161 void *buf, size_t len,
162 bool fixed)
128{ 163{
129 int physical; 164 int physical;
130 int addr; 165 int addr;
@@ -134,34 +169,47 @@ static inline void wl1271_read_hwaddr(struct wl1271 *wl, int hwaddr,
134 169
135 physical = wlcore_translate_addr(wl, addr); 170 physical = wlcore_translate_addr(wl, addr);
136 171
137 wl1271_raw_read(wl, physical, buf, len, fixed); 172 return wlcore_raw_read(wl, physical, buf, len, fixed);
138} 173}
139 174
140static inline u32 wl1271_read32(struct wl1271 *wl, int addr) 175static inline int __must_check wlcore_read32(struct wl1271 *wl, int addr,
176 u32 *val)
141{ 177{
142 return wl1271_raw_read32(wl, wlcore_translate_addr(wl, addr)); 178 return wlcore_raw_read32(wl, wlcore_translate_addr(wl, addr), val);
143} 179}
144 180
145static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val) 181static inline int __must_check wlcore_write32(struct wl1271 *wl, int addr,
182 u32 val)
146{ 183{
147 wl1271_raw_write32(wl, wlcore_translate_addr(wl, addr), val); 184 return wlcore_raw_write32(wl, wlcore_translate_addr(wl, addr), val);
148} 185}
149 186
150static inline u32 wlcore_read_reg(struct wl1271 *wl, int reg) 187static inline int __must_check wlcore_read_reg(struct wl1271 *wl, int reg,
188 u32 *val)
151{ 189{
152 return wl1271_raw_read32(wl, 190 return wlcore_raw_read32(wl,
153 wlcore_translate_addr(wl, wl->rtable[reg])); 191 wlcore_translate_addr(wl, wl->rtable[reg]),
192 val);
154} 193}
155 194
156static inline void wlcore_write_reg(struct wl1271 *wl, int reg, u32 val) 195static inline int __must_check wlcore_write_reg(struct wl1271 *wl, int reg,
196 u32 val)
157{ 197{
158 wl1271_raw_write32(wl, wlcore_translate_addr(wl, wl->rtable[reg]), val); 198 return wlcore_raw_write32(wl,
199 wlcore_translate_addr(wl, wl->rtable[reg]),
200 val);
159} 201}
160 202
161static inline void wl1271_power_off(struct wl1271 *wl) 203static inline void wl1271_power_off(struct wl1271 *wl)
162{ 204{
163 wl->if_ops->power(wl->dev, false); 205 int ret;
164 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); 206
207 if (!test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags))
208 return;
209
210 ret = wl->if_ops->power(wl->dev, false);
211 if (!ret)
212 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
165} 213}
166 214
167static inline int wl1271_power_on(struct wl1271 *wl) 215static inline int wl1271_power_on(struct wl1271 *wl)
@@ -173,8 +221,8 @@ static inline int wl1271_power_on(struct wl1271 *wl)
173 return ret; 221 return ret;
174} 222}
175 223
176void wlcore_set_partition(struct wl1271 *wl, 224int wlcore_set_partition(struct wl1271 *wl,
177 const struct wlcore_partition_set *p); 225 const struct wlcore_partition_set *p);
178 226
179bool wl1271_set_block_size(struct wl1271 *wl); 227bool wl1271_set_block_size(struct wl1271 *wl);
180 228
@@ -182,6 +230,4 @@ bool wl1271_set_block_size(struct wl1271 *wl);
182 230
183int wl1271_tx_dummy_packet(struct wl1271 *wl); 231int wl1271_tx_dummy_packet(struct wl1271 *wl);
184 232
185void wlcore_select_partition(struct wl1271 *wl, u8 part);
186
187#endif 233#endif
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index acef93390d3..2240cca597a 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -320,46 +320,6 @@ static void wlcore_adjust_conf(struct wl1271 *wl)
320 } 320 }
321} 321}
322 322
323static int wl1271_plt_init(struct wl1271 *wl)
324{
325 int ret;
326
327 ret = wl->ops->hw_init(wl);
328 if (ret < 0)
329 return ret;
330
331 ret = wl1271_acx_init_mem_config(wl);
332 if (ret < 0)
333 return ret;
334
335 ret = wl12xx_acx_mem_cfg(wl);
336 if (ret < 0)
337 goto out_free_memmap;
338
339 /* Enable data path */
340 ret = wl1271_cmd_data_path(wl, 1);
341 if (ret < 0)
342 goto out_free_memmap;
343
344 /* Configure for CAM power saving (ie. always active) */
345 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
346 if (ret < 0)
347 goto out_free_memmap;
348
349 /* configure PM */
350 ret = wl1271_acx_pm_config(wl);
351 if (ret < 0)
352 goto out_free_memmap;
353
354 return 0;
355
356 out_free_memmap:
357 kfree(wl->target_mem_map);
358 wl->target_mem_map = NULL;
359
360 return ret;
361}
362
363static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, 323static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
364 struct wl12xx_vif *wlvif, 324 struct wl12xx_vif *wlvif,
365 u8 hlid, u8 tx_pkts) 325 u8 hlid, u8 tx_pkts)
@@ -387,7 +347,7 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
387 347
388static void wl12xx_irq_update_links_status(struct wl1271 *wl, 348static void wl12xx_irq_update_links_status(struct wl1271 *wl,
389 struct wl12xx_vif *wlvif, 349 struct wl12xx_vif *wlvif,
390 struct wl_fw_status *status) 350 struct wl_fw_status_2 *status)
391{ 351{
392 struct wl1271_link *lnk; 352 struct wl1271_link *lnk;
393 u32 cur_fw_ps_map; 353 u32 cur_fw_ps_map;
@@ -418,8 +378,9 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
418 } 378 }
419} 379}
420 380
421static void wl12xx_fw_status(struct wl1271 *wl, 381static int wlcore_fw_status(struct wl1271 *wl,
422 struct wl_fw_status *status) 382 struct wl_fw_status_1 *status_1,
383 struct wl_fw_status_2 *status_2)
423{ 384{
424 struct wl12xx_vif *wlvif; 385 struct wl12xx_vif *wlvif;
425 struct timespec ts; 386 struct timespec ts;
@@ -427,38 +388,42 @@ static void wl12xx_fw_status(struct wl1271 *wl,
427 int avail, freed_blocks; 388 int avail, freed_blocks;
428 int i; 389 int i;
429 size_t status_len; 390 size_t status_len;
391 int ret;
430 392
431 status_len = sizeof(*status) + wl->fw_status_priv_len; 393 status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
394 sizeof(*status_2) + wl->fw_status_priv_len;
432 395
433 wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status, 396 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
434 status_len, false); 397 status_len, false);
398 if (ret < 0)
399 return ret;
435 400
436 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " 401 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
437 "drv_rx_counter = %d, tx_results_counter = %d)", 402 "drv_rx_counter = %d, tx_results_counter = %d)",
438 status->intr, 403 status_1->intr,
439 status->fw_rx_counter, 404 status_1->fw_rx_counter,
440 status->drv_rx_counter, 405 status_1->drv_rx_counter,
441 status->tx_results_counter); 406 status_1->tx_results_counter);
442 407
443 for (i = 0; i < NUM_TX_QUEUES; i++) { 408 for (i = 0; i < NUM_TX_QUEUES; i++) {
444 /* prevent wrap-around in freed-packets counter */ 409 /* prevent wrap-around in freed-packets counter */
445 wl->tx_allocated_pkts[i] -= 410 wl->tx_allocated_pkts[i] -=
446 (status->counters.tx_released_pkts[i] - 411 (status_2->counters.tx_released_pkts[i] -
447 wl->tx_pkts_freed[i]) & 0xff; 412 wl->tx_pkts_freed[i]) & 0xff;
448 413
449 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i]; 414 wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
450 } 415 }
451 416
452 /* prevent wrap-around in total blocks counter */ 417 /* prevent wrap-around in total blocks counter */
453 if (likely(wl->tx_blocks_freed <= 418 if (likely(wl->tx_blocks_freed <=
454 le32_to_cpu(status->total_released_blks))) 419 le32_to_cpu(status_2->total_released_blks)))
455 freed_blocks = le32_to_cpu(status->total_released_blks) - 420 freed_blocks = le32_to_cpu(status_2->total_released_blks) -
456 wl->tx_blocks_freed; 421 wl->tx_blocks_freed;
457 else 422 else
458 freed_blocks = 0x100000000LL - wl->tx_blocks_freed + 423 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
459 le32_to_cpu(status->total_released_blks); 424 le32_to_cpu(status_2->total_released_blks);
460 425
461 wl->tx_blocks_freed = le32_to_cpu(status->total_released_blks); 426 wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
462 427
463 wl->tx_allocated_blocks -= freed_blocks; 428 wl->tx_allocated_blocks -= freed_blocks;
464 429
@@ -474,7 +439,7 @@ static void wl12xx_fw_status(struct wl1271 *wl,
474 cancel_delayed_work(&wl->tx_watchdog_work); 439 cancel_delayed_work(&wl->tx_watchdog_work);
475 } 440 }
476 441
477 avail = le32_to_cpu(status->tx_total) - wl->tx_allocated_blocks; 442 avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
478 443
479 /* 444 /*
480 * The FW might change the total number of TX memblocks before 445 * The FW might change the total number of TX memblocks before
@@ -493,13 +458,15 @@ static void wl12xx_fw_status(struct wl1271 *wl,
493 458
494 /* for AP update num of allocated TX blocks per link and ps status */ 459 /* for AP update num of allocated TX blocks per link and ps status */
495 wl12xx_for_each_wlvif_ap(wl, wlvif) { 460 wl12xx_for_each_wlvif_ap(wl, wlvif) {
496 wl12xx_irq_update_links_status(wl, wlvif, status); 461 wl12xx_irq_update_links_status(wl, wlvif, status_2);
497 } 462 }
498 463
499 /* update the host-chipset time offset */ 464 /* update the host-chipset time offset */
500 getnstimeofday(&ts); 465 getnstimeofday(&ts);
501 wl->time_offset = (timespec_to_ns(&ts) >> 10) - 466 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
502 (s64)le32_to_cpu(status->fw_localtime); 467 (s64)le32_to_cpu(status_2->fw_localtime);
468
469 return 0;
503} 470}
504 471
505static void wl1271_flush_deferred_work(struct wl1271 *wl) 472static void wl1271_flush_deferred_work(struct wl1271 *wl)
@@ -527,20 +494,15 @@ static void wl1271_netstack_work(struct work_struct *work)
527 494
528#define WL1271_IRQ_MAX_LOOPS 256 495#define WL1271_IRQ_MAX_LOOPS 256
529 496
530static irqreturn_t wl1271_irq(int irq, void *cookie) 497static int wlcore_irq_locked(struct wl1271 *wl)
531{ 498{
532 int ret; 499 int ret = 0;
533 u32 intr; 500 u32 intr;
534 int loopcount = WL1271_IRQ_MAX_LOOPS; 501 int loopcount = WL1271_IRQ_MAX_LOOPS;
535 struct wl1271 *wl = (struct wl1271 *)cookie;
536 bool done = false; 502 bool done = false;
537 unsigned int defer_count; 503 unsigned int defer_count;
538 unsigned long flags; 504 unsigned long flags;
539 505
540 /* TX might be handled here, avoid redundant work */
541 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
542 cancel_work_sync(&wl->tx_work);
543
544 /* 506 /*
545 * In case edge triggered interrupt must be used, we cannot iterate 507 * In case edge triggered interrupt must be used, we cannot iterate
546 * more than once without introducing race conditions with the hardirq. 508 * more than once without introducing race conditions with the hardirq.
@@ -548,8 +510,6 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
548 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) 510 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
549 loopcount = 1; 511 loopcount = 1;
550 512
551 mutex_lock(&wl->mutex);
552
553 wl1271_debug(DEBUG_IRQ, "IRQ work"); 513 wl1271_debug(DEBUG_IRQ, "IRQ work");
554 514
555 if (unlikely(wl->state == WL1271_STATE_OFF)) 515 if (unlikely(wl->state == WL1271_STATE_OFF))
@@ -568,21 +528,33 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
568 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); 528 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
569 smp_mb__after_clear_bit(); 529 smp_mb__after_clear_bit();
570 530
571 wl12xx_fw_status(wl, wl->fw_status); 531 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
532 if (ret < 0)
533 goto out;
572 534
573 wlcore_hw_tx_immediate_compl(wl); 535 wlcore_hw_tx_immediate_compl(wl);
574 536
575 intr = le32_to_cpu(wl->fw_status->intr); 537 intr = le32_to_cpu(wl->fw_status_1->intr);
576 intr &= WL1271_INTR_MASK; 538 intr &= WLCORE_ALL_INTR_MASK;
577 if (!intr) { 539 if (!intr) {
578 done = true; 540 done = true;
579 continue; 541 continue;
580 } 542 }
581 543
582 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) { 544 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
583 wl1271_error("watchdog interrupt received! " 545 wl1271_error("HW watchdog interrupt received! starting recovery.");
546 wl->watchdog_recovery = true;
547 ret = -EIO;
548
549 /* restarting the chip. ignore any other interrupt. */
550 goto out;
551 }
552
553 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
554 wl1271_error("SW watchdog interrupt received! "
584 "starting recovery."); 555 "starting recovery.");
585 wl12xx_queue_recovery_work(wl); 556 wl->watchdog_recovery = true;
557 ret = -EIO;
586 558
587 /* restarting the chip. ignore any other interrupt. */ 559 /* restarting the chip. ignore any other interrupt. */
588 goto out; 560 goto out;
@@ -591,7 +563,9 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
591 if (likely(intr & WL1271_ACX_INTR_DATA)) { 563 if (likely(intr & WL1271_ACX_INTR_DATA)) {
592 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); 564 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
593 565
594 wl12xx_rx(wl, wl->fw_status); 566 ret = wlcore_rx(wl, wl->fw_status_1);
567 if (ret < 0)
568 goto out;
595 569
596 /* Check if any tx blocks were freed */ 570 /* Check if any tx blocks were freed */
597 spin_lock_irqsave(&wl->wl_lock, flags); 571 spin_lock_irqsave(&wl->wl_lock, flags);
@@ -602,13 +576,17 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
602 * In order to avoid starvation of the TX path, 576 * In order to avoid starvation of the TX path,
603 * call the work function directly. 577 * call the work function directly.
604 */ 578 */
605 wl1271_tx_work_locked(wl); 579 ret = wlcore_tx_work_locked(wl);
580 if (ret < 0)
581 goto out;
606 } else { 582 } else {
607 spin_unlock_irqrestore(&wl->wl_lock, flags); 583 spin_unlock_irqrestore(&wl->wl_lock, flags);
608 } 584 }
609 585
610 /* check for tx results */ 586 /* check for tx results */
611 wlcore_hw_tx_delayed_compl(wl); 587 ret = wlcore_hw_tx_delayed_compl(wl);
588 if (ret < 0)
589 goto out;
612 590
613 /* Make sure the deferred queues don't get too long */ 591 /* Make sure the deferred queues don't get too long */
614 defer_count = skb_queue_len(&wl->deferred_tx_queue) + 592 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
@@ -619,12 +597,16 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
619 597
620 if (intr & WL1271_ACX_INTR_EVENT_A) { 598 if (intr & WL1271_ACX_INTR_EVENT_A) {
621 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A"); 599 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
622 wl1271_event_handle(wl, 0); 600 ret = wl1271_event_handle(wl, 0);
601 if (ret < 0)
602 goto out;
623 } 603 }
624 604
625 if (intr & WL1271_ACX_INTR_EVENT_B) { 605 if (intr & WL1271_ACX_INTR_EVENT_B) {
626 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B"); 606 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
627 wl1271_event_handle(wl, 1); 607 ret = wl1271_event_handle(wl, 1);
608 if (ret < 0)
609 goto out;
628 } 610 }
629 611
630 if (intr & WL1271_ACX_INTR_INIT_COMPLETE) 612 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
@@ -638,6 +620,25 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
638 wl1271_ps_elp_sleep(wl); 620 wl1271_ps_elp_sleep(wl);
639 621
640out: 622out:
623 return ret;
624}
625
626static irqreturn_t wlcore_irq(int irq, void *cookie)
627{
628 int ret;
629 unsigned long flags;
630 struct wl1271 *wl = cookie;
631
632 /* TX might be handled here, avoid redundant work */
633 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
634 cancel_work_sync(&wl->tx_work);
635
636 mutex_lock(&wl->mutex);
637
638 ret = wlcore_irq_locked(wl);
639 if (ret)
640 wl12xx_queue_recovery_work(wl);
641
641 spin_lock_irqsave(&wl->wl_lock, flags); 642 spin_lock_irqsave(&wl->wl_lock, flags);
642 /* In case TX was not handled here, queue TX work */ 643 /* In case TX was not handled here, queue TX work */
643 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags); 644 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
@@ -743,7 +744,7 @@ out:
743 return ret; 744 return ret;
744} 745}
745 746
746static int wl1271_fetch_nvs(struct wl1271 *wl) 747static void wl1271_fetch_nvs(struct wl1271 *wl)
747{ 748{
748 const struct firmware *fw; 749 const struct firmware *fw;
749 int ret; 750 int ret;
@@ -751,16 +752,15 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
751 ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev); 752 ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
752 753
753 if (ret < 0) { 754 if (ret < 0) {
754 wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME, 755 wl1271_debug(DEBUG_BOOT, "could not get nvs file %s: %d",
755 ret); 756 WL12XX_NVS_NAME, ret);
756 return ret; 757 return;
757 } 758 }
758 759
759 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL); 760 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
760 761
761 if (!wl->nvs) { 762 if (!wl->nvs) {
762 wl1271_error("could not allocate memory for the nvs file"); 763 wl1271_error("could not allocate memory for the nvs file");
763 ret = -ENOMEM;
764 goto out; 764 goto out;
765 } 765 }
766 766
@@ -768,14 +768,17 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
768 768
769out: 769out:
770 release_firmware(fw); 770 release_firmware(fw);
771
772 return ret;
773} 771}
774 772
775void wl12xx_queue_recovery_work(struct wl1271 *wl) 773void wl12xx_queue_recovery_work(struct wl1271 *wl)
776{ 774{
777 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) 775 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
776
777 /* Avoid a recursive recovery */
778 if (!test_and_set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
779 wlcore_disable_interrupts_nosync(wl);
778 ieee80211_queue_work(wl->hw, &wl->recovery_work); 780 ieee80211_queue_work(wl->hw, &wl->recovery_work);
781 }
779} 782}
780 783
781size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen) 784size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
@@ -801,14 +804,17 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
801 return len; 804 return len;
802} 805}
803 806
807#define WLCORE_FW_LOG_END 0x2000000
808
804static void wl12xx_read_fwlog_panic(struct wl1271 *wl) 809static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
805{ 810{
806 u32 addr; 811 u32 addr;
807 u32 first_addr; 812 u32 offset;
813 u32 end_of_log;
808 u8 *block; 814 u8 *block;
815 int ret;
809 816
810 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) || 817 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
811 (wl->conf.fwlog.mode != WL12XX_FWLOG_ON_DEMAND) ||
812 (wl->conf.fwlog.mem_blocks == 0)) 818 (wl->conf.fwlog.mem_blocks == 0))
813 return; 819 return;
814 820
@@ -820,34 +826,49 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
820 826
821 /* 827 /*
822 * Make sure the chip is awake and the logger isn't active. 828 * Make sure the chip is awake and the logger isn't active.
823 * This might fail if the firmware hanged. 829 * Do not send a stop fwlog command if the fw is hanged.
824 */ 830 */
825 if (!wl1271_ps_elp_wakeup(wl)) 831 if (wl1271_ps_elp_wakeup(wl))
832 goto out;
833 if (!wl->watchdog_recovery)
826 wl12xx_cmd_stop_fwlog(wl); 834 wl12xx_cmd_stop_fwlog(wl);
827 835
828 /* Read the first memory block address */ 836 /* Read the first memory block address */
829 wl12xx_fw_status(wl, wl->fw_status); 837 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
830 first_addr = le32_to_cpu(wl->fw_status->log_start_addr); 838 if (ret < 0)
831 if (!first_addr) 839 goto out;
840
841 addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
842 if (!addr)
832 goto out; 843 goto out;
833 844
845 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
846 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
847 end_of_log = WLCORE_FW_LOG_END;
848 } else {
849 offset = sizeof(addr);
850 end_of_log = addr;
851 }
852
834 /* Traverse the memory blocks linked list */ 853 /* Traverse the memory blocks linked list */
835 addr = first_addr;
836 do { 854 do {
837 memset(block, 0, WL12XX_HW_BLOCK_SIZE); 855 memset(block, 0, WL12XX_HW_BLOCK_SIZE);
838 wl1271_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE, 856 ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
839 false); 857 false);
858 if (ret < 0)
859 goto out;
840 860
841 /* 861 /*
842 * Memory blocks are linked to one another. The first 4 bytes 862 * Memory blocks are linked to one another. The first 4 bytes
843 * of each memory block hold the hardware address of the next 863 * of each memory block hold the hardware address of the next
844 * one. The last memory block points to the first one. 864 * one. The last memory block points to the first one in
865 * on demand mode and is equal to 0x2000000 in continuous mode.
845 */ 866 */
846 addr = le32_to_cpup((__le32 *)block); 867 addr = le32_to_cpup((__le32 *)block);
847 if (!wl12xx_copy_fwlog(wl, block + sizeof(addr), 868 if (!wl12xx_copy_fwlog(wl, block + offset,
848 WL12XX_HW_BLOCK_SIZE - sizeof(addr))) 869 WL12XX_HW_BLOCK_SIZE - offset))
849 break; 870 break;
850 } while (addr && (addr != first_addr)); 871 } while (addr && (addr != end_of_log));
851 872
852 wake_up_interruptible(&wl->fwlog_waitq); 873 wake_up_interruptible(&wl->fwlog_waitq);
853 874
@@ -855,6 +876,34 @@ out:
855 kfree(block); 876 kfree(block);
856} 877}
857 878
879static void wlcore_print_recovery(struct wl1271 *wl)
880{
881 u32 pc = 0;
882 u32 hint_sts = 0;
883 int ret;
884
885 wl1271_info("Hardware recovery in progress. FW ver: %s",
886 wl->chip.fw_ver_str);
887
888 /* change partitions momentarily so we can read the FW pc */
889 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
890 if (ret < 0)
891 return;
892
893 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
894 if (ret < 0)
895 return;
896
897 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
898 if (ret < 0)
899 return;
900
901 wl1271_info("pc: 0x%x, hint_sts: 0x%08x", pc, hint_sts);
902
903 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
904}
905
906
858static void wl1271_recovery_work(struct work_struct *work) 907static void wl1271_recovery_work(struct work_struct *work)
859{ 908{
860 struct wl1271 *wl = 909 struct wl1271 *wl =
@@ -867,14 +916,9 @@ static void wl1271_recovery_work(struct work_struct *work)
867 if (wl->state != WL1271_STATE_ON || wl->plt) 916 if (wl->state != WL1271_STATE_ON || wl->plt)
868 goto out_unlock; 917 goto out_unlock;
869 918
870 /* Avoid a recursive recovery */
871 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
872
873 wl12xx_read_fwlog_panic(wl); 919 wl12xx_read_fwlog_panic(wl);
874 920
875 wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x", 921 wlcore_print_recovery(wl);
876 wl->chip.fw_ver_str,
877 wlcore_read_reg(wl, REG_PC_ON_RECOVERY));
878 922
879 BUG_ON(bug_on_recovery && 923 BUG_ON(bug_on_recovery &&
880 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)); 924 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
@@ -885,8 +929,6 @@ static void wl1271_recovery_work(struct work_struct *work)
885 goto out_unlock; 929 goto out_unlock;
886 } 930 }
887 931
888 BUG_ON(bug_on_recovery);
889
890 /* 932 /*
891 * Advance security sequence number to overcome potential progress 933 * Advance security sequence number to overcome potential progress
892 * in the firmware during recovery. This doens't hurt if the network is 934 * in the firmware during recovery. This doens't hurt if the network is
@@ -900,7 +942,7 @@ static void wl1271_recovery_work(struct work_struct *work)
900 } 942 }
901 943
902 /* Prevent spurious TX during FW restart */ 944 /* Prevent spurious TX during FW restart */
903 ieee80211_stop_queues(wl->hw); 945 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
904 946
905 if (wl->sched_scanning) { 947 if (wl->sched_scanning) {
906 ieee80211_sched_scan_stopped(wl->hw); 948 ieee80211_sched_scan_stopped(wl->hw);
@@ -914,37 +956,43 @@ static void wl1271_recovery_work(struct work_struct *work)
914 vif = wl12xx_wlvif_to_vif(wlvif); 956 vif = wl12xx_wlvif_to_vif(wlvif);
915 __wl1271_op_remove_interface(wl, vif, false); 957 __wl1271_op_remove_interface(wl, vif, false);
916 } 958 }
959 wl->watchdog_recovery = false;
917 mutex_unlock(&wl->mutex); 960 mutex_unlock(&wl->mutex);
918 wl1271_op_stop(wl->hw); 961 wl1271_op_stop(wl->hw);
919 962
920 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
921
922 ieee80211_restart_hw(wl->hw); 963 ieee80211_restart_hw(wl->hw);
923 964
924 /* 965 /*
925 * Its safe to enable TX now - the queues are stopped after a request 966 * Its safe to enable TX now - the queues are stopped after a request
926 * to restart the HW. 967 * to restart the HW.
927 */ 968 */
928 ieee80211_wake_queues(wl->hw); 969 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
929 return; 970 return;
930out_unlock: 971out_unlock:
972 wl->watchdog_recovery = false;
931 mutex_unlock(&wl->mutex); 973 mutex_unlock(&wl->mutex);
932} 974}
933 975
934static void wl1271_fw_wakeup(struct wl1271 *wl) 976static int wlcore_fw_wakeup(struct wl1271 *wl)
935{ 977{
936 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); 978 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
937} 979}
938 980
939static int wl1271_setup(struct wl1271 *wl) 981static int wl1271_setup(struct wl1271 *wl)
940{ 982{
941 wl->fw_status = kmalloc(sizeof(*wl->fw_status), GFP_KERNEL); 983 wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
942 if (!wl->fw_status) 984 sizeof(*wl->fw_status_2) +
985 wl->fw_status_priv_len, GFP_KERNEL);
986 if (!wl->fw_status_1)
943 return -ENOMEM; 987 return -ENOMEM;
944 988
989 wl->fw_status_2 = (struct wl_fw_status_2 *)
990 (((u8 *) wl->fw_status_1) +
991 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
992
945 wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL); 993 wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
946 if (!wl->tx_res_if) { 994 if (!wl->tx_res_if) {
947 kfree(wl->fw_status); 995 kfree(wl->fw_status_1);
948 return -ENOMEM; 996 return -ENOMEM;
949 } 997 }
950 998
@@ -963,13 +1011,21 @@ static int wl12xx_set_power_on(struct wl1271 *wl)
963 wl1271_io_reset(wl); 1011 wl1271_io_reset(wl);
964 wl1271_io_init(wl); 1012 wl1271_io_init(wl);
965 1013
966 wlcore_set_partition(wl, &wl->ptable[PART_BOOT]); 1014 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1015 if (ret < 0)
1016 goto fail;
967 1017
968 /* ELP module wake up */ 1018 /* ELP module wake up */
969 wl1271_fw_wakeup(wl); 1019 ret = wlcore_fw_wakeup(wl);
1020 if (ret < 0)
1021 goto fail;
970 1022
971out: 1023out:
972 return ret; 1024 return ret;
1025
1026fail:
1027 wl1271_power_off(wl);
1028 return ret;
973} 1029}
974 1030
975static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt) 1031static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
@@ -987,13 +1043,12 @@ static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
987 * simplify the code and since the performance impact is 1043 * simplify the code and since the performance impact is
988 * negligible, we use the same block size for all different 1044 * negligible, we use the same block size for all different
989 * chip types. 1045 * chip types.
1046 *
1047 * Check if the bus supports blocksize alignment and, if it
1048 * doesn't, make sure we don't have the quirk.
990 */ 1049 */
991 if (wl1271_set_block_size(wl)) 1050 if (!wl1271_set_block_size(wl))
992 wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN; 1051 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
993
994 ret = wl->ops->identify_chip(wl);
995 if (ret < 0)
996 goto out;
997 1052
998 /* TODO: make sure the lower driver has set things up correctly */ 1053 /* TODO: make sure the lower driver has set things up correctly */
999 1054
@@ -1005,13 +1060,6 @@ static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1005 if (ret < 0) 1060 if (ret < 0)
1006 goto out; 1061 goto out;
1007 1062
1008 /* No NVS from netlink, try to get it from the filesystem */
1009 if (wl->nvs == NULL) {
1010 ret = wl1271_fetch_nvs(wl);
1011 if (ret < 0)
1012 goto out;
1013 }
1014
1015out: 1063out:
1016 return ret; 1064 return ret;
1017} 1065}
@@ -1039,14 +1087,10 @@ int wl1271_plt_start(struct wl1271 *wl)
1039 if (ret < 0) 1087 if (ret < 0)
1040 goto power_off; 1088 goto power_off;
1041 1089
1042 ret = wl->ops->boot(wl); 1090 ret = wl->ops->plt_init(wl);
1043 if (ret < 0) 1091 if (ret < 0)
1044 goto power_off; 1092 goto power_off;
1045 1093
1046 ret = wl1271_plt_init(wl);
1047 if (ret < 0)
1048 goto irq_disable;
1049
1050 wl->plt = true; 1094 wl->plt = true;
1051 wl->state = WL1271_STATE_ON; 1095 wl->state = WL1271_STATE_ON;
1052 wl1271_notice("firmware booted in PLT mode (%s)", 1096 wl1271_notice("firmware booted in PLT mode (%s)",
@@ -1059,19 +1103,6 @@ int wl1271_plt_start(struct wl1271 *wl)
1059 1103
1060 goto out; 1104 goto out;
1061 1105
1062irq_disable:
1063 mutex_unlock(&wl->mutex);
1064 /* Unlocking the mutex in the middle of handling is
1065 inherently unsafe. In this case we deem it safe to do,
1066 because we need to let any possibly pending IRQ out of
1067 the system (and while we are WL1271_STATE_OFF the IRQ
1068 work function will not do anything.) Also, any other
1069 possible concurrent operations will fail due to the
1070 current state, hence the wl1271 struct should be safe. */
1071 wlcore_disable_interrupts(wl);
1072 wl1271_flush_deferred_work(wl);
1073 cancel_work_sync(&wl->netstack_work);
1074 mutex_lock(&wl->mutex);
1075power_off: 1106power_off:
1076 wl1271_power_off(wl); 1107 wl1271_power_off(wl);
1077 } 1108 }
@@ -1125,6 +1156,7 @@ int wl1271_plt_stop(struct wl1271 *wl)
1125 mutex_lock(&wl->mutex); 1156 mutex_lock(&wl->mutex);
1126 wl1271_power_off(wl); 1157 wl1271_power_off(wl);
1127 wl->flags = 0; 1158 wl->flags = 0;
1159 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1128 wl->state = WL1271_STATE_OFF; 1160 wl->state = WL1271_STATE_OFF;
1129 wl->plt = false; 1161 wl->plt = false;
1130 wl->rx_counter = 0; 1162 wl->rx_counter = 0;
@@ -1154,9 +1186,16 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1154 1186
1155 spin_lock_irqsave(&wl->wl_lock, flags); 1187 spin_lock_irqsave(&wl->wl_lock, flags);
1156 1188
1157 /* queue the packet */ 1189 /*
1190 * drop the packet if the link is invalid or the queue is stopped
1191 * for any reason but watermark. Watermark is a "soft"-stop so we
1192 * allow these packets through.
1193 */
1158 if (hlid == WL12XX_INVALID_LINK_ID || 1194 if (hlid == WL12XX_INVALID_LINK_ID ||
1159 (wlvif && !test_bit(hlid, wlvif->links_map))) { 1195 (wlvif && !test_bit(hlid, wlvif->links_map)) ||
1196 (wlcore_is_queue_stopped(wl, q) &&
1197 !wlcore_is_queue_stopped_by_reason(wl, q,
1198 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1160 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q); 1199 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1161 ieee80211_free_txskb(hw, skb); 1200 ieee80211_free_txskb(hw, skb);
1162 goto out; 1201 goto out;
@@ -1174,8 +1213,8 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1174 */ 1213 */
1175 if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK) { 1214 if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
1176 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q); 1215 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1177 ieee80211_stop_queue(wl->hw, mapping); 1216 wlcore_stop_queue_locked(wl, q,
1178 set_bit(q, &wl->stopped_queues_map); 1217 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1179 } 1218 }
1180 1219
1181 /* 1220 /*
@@ -1209,7 +1248,7 @@ int wl1271_tx_dummy_packet(struct wl1271 *wl)
1209 1248
1210 /* The FW is low on RX memory blocks, so send the dummy packet asap */ 1249 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1211 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) 1250 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1212 wl1271_tx_work_locked(wl); 1251 return wlcore_tx_work_locked(wl);
1213 1252
1214 /* 1253 /*
1215 * If the FW TX is busy, TX work will be scheduled by the threaded 1254 * If the FW TX is busy, TX work will be scheduled by the threaded
@@ -1476,8 +1515,15 @@ static int wl1271_configure_wowlan(struct wl1271 *wl,
1476 int i, ret; 1515 int i, ret;
1477 1516
1478 if (!wow || wow->any || !wow->n_patterns) { 1517 if (!wow || wow->any || !wow->n_patterns) {
1479 wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL); 1518 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1480 wl1271_rx_filter_clear_all(wl); 1519 FILTER_SIGNAL);
1520 if (ret)
1521 goto out;
1522
1523 ret = wl1271_rx_filter_clear_all(wl);
1524 if (ret)
1525 goto out;
1526
1481 return 0; 1527 return 0;
1482 } 1528 }
1483 1529
@@ -1493,8 +1539,13 @@ static int wl1271_configure_wowlan(struct wl1271 *wl,
1493 } 1539 }
1494 } 1540 }
1495 1541
1496 wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL); 1542 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1497 wl1271_rx_filter_clear_all(wl); 1543 if (ret)
1544 goto out;
1545
1546 ret = wl1271_rx_filter_clear_all(wl);
1547 if (ret)
1548 goto out;
1498 1549
1499 /* Translate WoWLAN patterns into filters */ 1550 /* Translate WoWLAN patterns into filters */
1500 for (i = 0; i < wow->n_patterns; i++) { 1551 for (i = 0; i < wow->n_patterns; i++) {
@@ -1536,7 +1587,10 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1536 if (ret < 0) 1587 if (ret < 0)
1537 goto out; 1588 goto out;
1538 1589
1539 wl1271_configure_wowlan(wl, wow); 1590 ret = wl1271_configure_wowlan(wl, wow);
1591 if (ret < 0)
1592 goto out_sleep;
1593
1540 ret = wl1271_acx_wake_up_conditions(wl, wlvif, 1594 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1541 wl->conf.conn.suspend_wake_up_event, 1595 wl->conf.conn.suspend_wake_up_event,
1542 wl->conf.conn.suspend_listen_interval); 1596 wl->conf.conn.suspend_listen_interval);
@@ -1544,8 +1598,8 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1544 if (ret < 0) 1598 if (ret < 0)
1545 wl1271_error("suspend: set wake up conditions failed: %d", ret); 1599 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1546 1600
1601out_sleep:
1547 wl1271_ps_elp_sleep(wl); 1602 wl1271_ps_elp_sleep(wl);
1548
1549out: 1603out:
1550 return ret; 1604 return ret;
1551 1605
@@ -1624,6 +1678,12 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
1624 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow); 1678 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1625 WARN_ON(!wow); 1679 WARN_ON(!wow);
1626 1680
1681 /* we want to perform the recovery before suspending */
1682 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1683 wl1271_warning("postponing suspend to perform recovery");
1684 return -EBUSY;
1685 }
1686
1627 wl1271_tx_flush(wl); 1687 wl1271_tx_flush(wl);
1628 1688
1629 mutex_lock(&wl->mutex); 1689 mutex_lock(&wl->mutex);
@@ -1664,7 +1724,8 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
1664 struct wl1271 *wl = hw->priv; 1724 struct wl1271 *wl = hw->priv;
1665 struct wl12xx_vif *wlvif; 1725 struct wl12xx_vif *wlvif;
1666 unsigned long flags; 1726 unsigned long flags;
1667 bool run_irq_work = false; 1727 bool run_irq_work = false, pending_recovery;
1728 int ret;
1668 1729
1669 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d", 1730 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1670 wl->wow_enabled); 1731 wl->wow_enabled);
@@ -1680,17 +1741,37 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
1680 run_irq_work = true; 1741 run_irq_work = true;
1681 spin_unlock_irqrestore(&wl->wl_lock, flags); 1742 spin_unlock_irqrestore(&wl->wl_lock, flags);
1682 1743
1744 mutex_lock(&wl->mutex);
1745
1746 /* test the recovery flag before calling any SDIO functions */
1747 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1748 &wl->flags);
1749
1683 if (run_irq_work) { 1750 if (run_irq_work) {
1684 wl1271_debug(DEBUG_MAC80211, 1751 wl1271_debug(DEBUG_MAC80211,
1685 "run postponed irq_work directly"); 1752 "run postponed irq_work directly");
1686 wl1271_irq(0, wl); 1753
1754 /* don't talk to the HW if recovery is pending */
1755 if (!pending_recovery) {
1756 ret = wlcore_irq_locked(wl);
1757 if (ret)
1758 wl12xx_queue_recovery_work(wl);
1759 }
1760
1687 wlcore_enable_interrupts(wl); 1761 wlcore_enable_interrupts(wl);
1688 } 1762 }
1689 1763
1690 mutex_lock(&wl->mutex); 1764 if (pending_recovery) {
1765 wl1271_warning("queuing forgotten recovery on resume");
1766 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1767 goto out;
1768 }
1769
1691 wl12xx_for_each_wlvif(wl, wlvif) { 1770 wl12xx_for_each_wlvif(wl, wlvif) {
1692 wl1271_configure_resume(wl, wlvif); 1771 wl1271_configure_resume(wl, wlvif);
1693 } 1772 }
1773
1774out:
1694 wl->wow_enabled = false; 1775 wl->wow_enabled = false;
1695 mutex_unlock(&wl->mutex); 1776 mutex_unlock(&wl->mutex);
1696 1777
@@ -1731,6 +1812,10 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
1731 wlcore_disable_interrupts(wl); 1812 wlcore_disable_interrupts(wl);
1732 mutex_lock(&wl->mutex); 1813 mutex_lock(&wl->mutex);
1733 if (wl->state == WL1271_STATE_OFF) { 1814 if (wl->state == WL1271_STATE_OFF) {
1815 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1816 &wl->flags))
1817 wlcore_enable_interrupts(wl);
1818
1734 mutex_unlock(&wl->mutex); 1819 mutex_unlock(&wl->mutex);
1735 1820
1736 /* 1821 /*
@@ -1758,15 +1843,23 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
1758 cancel_delayed_work_sync(&wl->connection_loss_work); 1843 cancel_delayed_work_sync(&wl->connection_loss_work);
1759 1844
1760 /* let's notify MAC80211 about the remaining pending TX frames */ 1845 /* let's notify MAC80211 about the remaining pending TX frames */
1761 wl12xx_tx_reset(wl, true); 1846 wl12xx_tx_reset(wl);
1762 mutex_lock(&wl->mutex); 1847 mutex_lock(&wl->mutex);
1763 1848
1764 wl1271_power_off(wl); 1849 wl1271_power_off(wl);
1850 /*
1851 * In case a recovery was scheduled, interrupts were disabled to avoid
1852 * an interrupt storm. Now that the power is down, it is safe to
1853 * re-enable interrupts to balance the disable depth
1854 */
1855 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1856 wlcore_enable_interrupts(wl);
1765 1857
1766 wl->band = IEEE80211_BAND_2GHZ; 1858 wl->band = IEEE80211_BAND_2GHZ;
1767 1859
1768 wl->rx_counter = 0; 1860 wl->rx_counter = 0;
1769 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 1861 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1862 wl->channel_type = NL80211_CHAN_NO_HT;
1770 wl->tx_blocks_available = 0; 1863 wl->tx_blocks_available = 0;
1771 wl->tx_allocated_blocks = 0; 1864 wl->tx_allocated_blocks = 0;
1772 wl->tx_results_count = 0; 1865 wl->tx_results_count = 0;
@@ -1775,6 +1868,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
1775 wl->ap_fw_ps_map = 0; 1868 wl->ap_fw_ps_map = 0;
1776 wl->ap_ps_map = 0; 1869 wl->ap_ps_map = 0;
1777 wl->sched_scanning = false; 1870 wl->sched_scanning = false;
1871 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1778 memset(wl->roles_map, 0, sizeof(wl->roles_map)); 1872 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1779 memset(wl->links_map, 0, sizeof(wl->links_map)); 1873 memset(wl->links_map, 0, sizeof(wl->links_map));
1780 memset(wl->roc_map, 0, sizeof(wl->roc_map)); 1874 memset(wl->roc_map, 0, sizeof(wl->roc_map));
@@ -1799,8 +1893,9 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
1799 1893
1800 wl1271_debugfs_reset(wl); 1894 wl1271_debugfs_reset(wl);
1801 1895
1802 kfree(wl->fw_status); 1896 kfree(wl->fw_status_1);
1803 wl->fw_status = NULL; 1897 wl->fw_status_1 = NULL;
1898 wl->fw_status_2 = NULL;
1804 kfree(wl->tx_res_if); 1899 kfree(wl->tx_res_if);
1805 wl->tx_res_if = NULL; 1900 wl->tx_res_if = NULL;
1806 kfree(wl->target_mem_map); 1901 kfree(wl->target_mem_map);
@@ -1894,6 +1989,9 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
1894 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx); 1989 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
1895 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx); 1990 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
1896 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx); 1991 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
1992 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
1993 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
1994 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
1897 } else { 1995 } else {
1898 /* init ap data */ 1996 /* init ap data */
1899 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID; 1997 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
@@ -1903,13 +2001,19 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
1903 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++) 2001 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
1904 wl12xx_allocate_rate_policy(wl, 2002 wl12xx_allocate_rate_policy(wl,
1905 &wlvif->ap.ucast_rate_idx[i]); 2003 &wlvif->ap.ucast_rate_idx[i]);
2004 wlvif->basic_rate_set = CONF_TX_AP_ENABLED_RATES;
2005 /*
2006 * TODO: check if basic_rate shouldn't be
2007 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2008 * instead (the same thing for STA above).
2009 */
2010 wlvif->basic_rate = CONF_TX_AP_ENABLED_RATES;
2011 /* TODO: this seems to be used only for STA, check it */
2012 wlvif->rate_set = CONF_TX_AP_ENABLED_RATES;
1906 } 2013 }
1907 2014
1908 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate; 2015 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
1909 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5; 2016 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
1910 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
1911 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
1912 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
1913 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT; 2017 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
1914 2018
1915 /* 2019 /*
@@ -1919,6 +2023,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
1919 wlvif->band = wl->band; 2023 wlvif->band = wl->band;
1920 wlvif->channel = wl->channel; 2024 wlvif->channel = wl->channel;
1921 wlvif->power_level = wl->power_level; 2025 wlvif->power_level = wl->power_level;
2026 wlvif->channel_type = wl->channel_type;
1922 2027
1923 INIT_WORK(&wlvif->rx_streaming_enable_work, 2028 INIT_WORK(&wlvif->rx_streaming_enable_work,
1924 wl1271_rx_streaming_enable_work); 2029 wl1271_rx_streaming_enable_work);
@@ -2170,6 +2275,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
2170{ 2275{
2171 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 2276 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2172 int i, ret; 2277 int i, ret;
2278 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2173 2279
2174 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface"); 2280 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2175 2281
@@ -2250,11 +2356,25 @@ deinit:
2250 wlvif->role_id = WL12XX_INVALID_ROLE_ID; 2356 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2251 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID; 2357 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2252 2358
2253 if (wlvif->bss_type == BSS_TYPE_AP_BSS) 2359 if (is_ap)
2254 wl->ap_count--; 2360 wl->ap_count--;
2255 else 2361 else
2256 wl->sta_count--; 2362 wl->sta_count--;
2257 2363
2364 /* Last AP, have more stations. Configure according to STA. */
2365 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2366 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2367 /* Configure for power according to debugfs */
2368 if (sta_auth != WL1271_PSM_ILLEGAL)
2369 wl1271_acx_sleep_auth(wl, sta_auth);
2370 /* Configure for power always on */
2371 else if (wl->quirks & WLCORE_QUIRK_NO_ELP)
2372 wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
2373 /* Configure for ELP power saving */
2374 else
2375 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2376 }
2377
2258 mutex_unlock(&wl->mutex); 2378 mutex_unlock(&wl->mutex);
2259 2379
2260 del_timer_sync(&wlvif->rx_streaming_timer); 2380 del_timer_sync(&wlvif->rx_streaming_timer);
@@ -2444,7 +2564,7 @@ static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2444 } else { 2564 } else {
2445 /* The current firmware only supports sched_scan in idle */ 2565 /* The current firmware only supports sched_scan in idle */
2446 if (wl->sched_scanning) { 2566 if (wl->sched_scanning) {
2447 wl1271_scan_sched_scan_stop(wl); 2567 wl1271_scan_sched_scan_stop(wl, wlvif);
2448 ieee80211_sched_scan_stopped(wl->hw); 2568 ieee80211_sched_scan_stopped(wl->hw);
2449 } 2569 }
2450 2570
@@ -2469,13 +2589,24 @@ static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2469 /* if the channel changes while joined, join again */ 2589 /* if the channel changes while joined, join again */
2470 if (changed & IEEE80211_CONF_CHANGE_CHANNEL && 2590 if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
2471 ((wlvif->band != conf->channel->band) || 2591 ((wlvif->band != conf->channel->band) ||
2472 (wlvif->channel != channel))) { 2592 (wlvif->channel != channel) ||
2593 (wlvif->channel_type != conf->channel_type))) {
2473 /* send all pending packets */ 2594 /* send all pending packets */
2474 wl1271_tx_work_locked(wl); 2595 ret = wlcore_tx_work_locked(wl);
2596 if (ret < 0)
2597 return ret;
2598
2475 wlvif->band = conf->channel->band; 2599 wlvif->band = conf->channel->band;
2476 wlvif->channel = channel; 2600 wlvif->channel = channel;
2601 wlvif->channel_type = conf->channel_type;
2477 2602
2478 if (!is_ap) { 2603 if (is_ap) {
2604 wl1271_set_band_rate(wl, wlvif);
2605 ret = wl1271_init_ap_rates(wl, wlvif);
2606 if (ret < 0)
2607 wl1271_error("AP rate policy change failed %d",
2608 ret);
2609 } else {
2479 /* 2610 /*
2480 * FIXME: the mac80211 should really provide a fixed 2611 * FIXME: the mac80211 should really provide a fixed
2481 * rate to use here. for now, just use the smallest 2612 * rate to use here. for now, just use the smallest
@@ -2583,8 +2714,9 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2583 * frames, such as the deauth. To make sure those frames reach the air, 2714 * frames, such as the deauth. To make sure those frames reach the air,
2584 * wait here until the TX queue is fully flushed. 2715 * wait here until the TX queue is fully flushed.
2585 */ 2716 */
2586 if ((changed & IEEE80211_CONF_CHANGE_IDLE) && 2717 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) ||
2587 (conf->flags & IEEE80211_CONF_IDLE)) 2718 ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
2719 (conf->flags & IEEE80211_CONF_IDLE)))
2588 wl1271_tx_flush(wl); 2720 wl1271_tx_flush(wl);
2589 2721
2590 mutex_lock(&wl->mutex); 2722 mutex_lock(&wl->mutex);
@@ -2593,6 +2725,7 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2593 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 2725 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2594 wl->band = conf->channel->band; 2726 wl->band = conf->channel->band;
2595 wl->channel = channel; 2727 wl->channel = channel;
2728 wl->channel_type = conf->channel_type;
2596 } 2729 }
2597 2730
2598 if (changed & IEEE80211_CONF_CHANGE_POWER) 2731 if (changed & IEEE80211_CONF_CHANGE_POWER)
@@ -2825,17 +2958,6 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2825 int ret; 2958 int ret;
2826 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); 2959 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2827 2960
2828 /*
2829 * A role set to GEM cipher requires different Tx settings (namely
2830 * spare blocks). Note when we are in this mode so the HW can adjust.
2831 */
2832 if (key_type == KEY_GEM) {
2833 if (action == KEY_ADD_OR_REPLACE)
2834 wlvif->is_gem = true;
2835 else if (action == KEY_REMOVE)
2836 wlvif->is_gem = false;
2837 }
2838
2839 if (is_ap) { 2961 if (is_ap) {
2840 struct wl1271_station *wl_sta; 2962 struct wl1271_station *wl_sta;
2841 u8 hlid; 2963 u8 hlid;
@@ -2913,12 +3035,21 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2913 return 0; 3035 return 0;
2914} 3036}
2915 3037
2916static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 3038static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2917 struct ieee80211_vif *vif, 3039 struct ieee80211_vif *vif,
2918 struct ieee80211_sta *sta, 3040 struct ieee80211_sta *sta,
2919 struct ieee80211_key_conf *key_conf) 3041 struct ieee80211_key_conf *key_conf)
2920{ 3042{
2921 struct wl1271 *wl = hw->priv; 3043 struct wl1271 *wl = hw->priv;
3044
3045 return wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3046}
3047
3048int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3049 struct ieee80211_vif *vif,
3050 struct ieee80211_sta *sta,
3051 struct ieee80211_key_conf *key_conf)
3052{
2922 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 3053 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2923 int ret; 3054 int ret;
2924 u32 tx_seq_32 = 0; 3055 u32 tx_seq_32 = 0;
@@ -3029,6 +3160,7 @@ out_unlock:
3029 3160
3030 return ret; 3161 return ret;
3031} 3162}
3163EXPORT_SYMBOL_GPL(wlcore_set_key);
3032 3164
3033static int wl1271_op_hw_scan(struct ieee80211_hw *hw, 3165static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3034 struct ieee80211_vif *vif, 3166 struct ieee80211_vif *vif,
@@ -3167,6 +3299,7 @@ static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3167 struct ieee80211_vif *vif) 3299 struct ieee80211_vif *vif)
3168{ 3300{
3169 struct wl1271 *wl = hw->priv; 3301 struct wl1271 *wl = hw->priv;
3302 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3170 int ret; 3303 int ret;
3171 3304
3172 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop"); 3305 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
@@ -3180,7 +3313,7 @@ static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3180 if (ret < 0) 3313 if (ret < 0)
3181 goto out; 3314 goto out;
3182 3315
3183 wl1271_scan_sched_scan_stop(wl); 3316 wl1271_scan_sched_scan_stop(wl, wlvif);
3184 3317
3185 wl1271_ps_elp_sleep(wl); 3318 wl1271_ps_elp_sleep(wl);
3186out: 3319out:
@@ -3316,8 +3449,15 @@ static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3316 skb->data, 3449 skb->data,
3317 skb->len, 0, 3450 skb->len, 0,
3318 rates); 3451 rates);
3319
3320 dev_kfree_skb(skb); 3452 dev_kfree_skb(skb);
3453
3454 if (ret < 0)
3455 goto out;
3456
3457 wl1271_debug(DEBUG_AP, "probe response updated");
3458 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3459
3460out:
3321 return ret; 3461 return ret;
3322} 3462}
3323 3463
@@ -3422,6 +3562,87 @@ out:
3422 return ret; 3562 return ret;
3423} 3563}
3424 3564
3565static int wlcore_set_beacon_template(struct wl1271 *wl,
3566 struct ieee80211_vif *vif,
3567 bool is_ap)
3568{
3569 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3570 struct ieee80211_hdr *hdr;
3571 u32 min_rate;
3572 int ret;
3573 int ieoffset = offsetof(struct ieee80211_mgmt,
3574 u.beacon.variable);
3575 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3576 u16 tmpl_id;
3577
3578 if (!beacon) {
3579 ret = -EINVAL;
3580 goto out;
3581 }
3582
3583 wl1271_debug(DEBUG_MASTER, "beacon updated");
3584
3585 ret = wl1271_ssid_set(vif, beacon, ieoffset);
3586 if (ret < 0) {
3587 dev_kfree_skb(beacon);
3588 goto out;
3589 }
3590 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3591 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3592 CMD_TEMPL_BEACON;
3593 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3594 beacon->data,
3595 beacon->len, 0,
3596 min_rate);
3597 if (ret < 0) {
3598 dev_kfree_skb(beacon);
3599 goto out;
3600 }
3601
3602 /*
3603 * In case we already have a probe-resp beacon set explicitly
3604 * by usermode, don't use the beacon data.
3605 */
3606 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3607 goto end_bcn;
3608
3609 /* remove TIM ie from probe response */
3610 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3611
3612 /*
3613 * remove p2p ie from probe response.
3614 * the fw reponds to probe requests that don't include
3615 * the p2p ie. probe requests with p2p ie will be passed,
3616 * and will be responded by the supplicant (the spec
3617 * forbids including the p2p ie when responding to probe
3618 * requests that didn't include it).
3619 */
3620 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3621 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3622
3623 hdr = (struct ieee80211_hdr *) beacon->data;
3624 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3625 IEEE80211_STYPE_PROBE_RESP);
3626 if (is_ap)
3627 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3628 beacon->data,
3629 beacon->len,
3630 min_rate);
3631 else
3632 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3633 CMD_TEMPL_PROBE_RESPONSE,
3634 beacon->data,
3635 beacon->len, 0,
3636 min_rate);
3637end_bcn:
3638 dev_kfree_skb(beacon);
3639 if (ret < 0)
3640 goto out;
3641
3642out:
3643 return ret;
3644}
3645
3425static int wl1271_bss_beacon_info_changed(struct wl1271 *wl, 3646static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3426 struct ieee80211_vif *vif, 3647 struct ieee80211_vif *vif,
3427 struct ieee80211_bss_conf *bss_conf, 3648 struct ieee80211_bss_conf *bss_conf,
@@ -3440,81 +3661,12 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3440 3661
3441 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) { 3662 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3442 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); 3663 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3443 if (!wl1271_ap_set_probe_resp_tmpl(wl, rate, vif)) { 3664
3444 wl1271_debug(DEBUG_AP, "probe response updated"); 3665 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
3445 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3446 }
3447 } 3666 }
3448 3667
3449 if ((changed & BSS_CHANGED_BEACON)) { 3668 if ((changed & BSS_CHANGED_BEACON)) {
3450 struct ieee80211_hdr *hdr; 3669 ret = wlcore_set_beacon_template(wl, vif, is_ap);
3451 u32 min_rate;
3452 int ieoffset = offsetof(struct ieee80211_mgmt,
3453 u.beacon.variable);
3454 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3455 u16 tmpl_id;
3456
3457 if (!beacon) {
3458 ret = -EINVAL;
3459 goto out;
3460 }
3461
3462 wl1271_debug(DEBUG_MASTER, "beacon updated");
3463
3464 ret = wl1271_ssid_set(vif, beacon, ieoffset);
3465 if (ret < 0) {
3466 dev_kfree_skb(beacon);
3467 goto out;
3468 }
3469 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3470 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3471 CMD_TEMPL_BEACON;
3472 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3473 beacon->data,
3474 beacon->len, 0,
3475 min_rate);
3476 if (ret < 0) {
3477 dev_kfree_skb(beacon);
3478 goto out;
3479 }
3480
3481 /*
3482 * In case we already have a probe-resp beacon set explicitly
3483 * by usermode, don't use the beacon data.
3484 */
3485 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3486 goto end_bcn;
3487
3488 /* remove TIM ie from probe response */
3489 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3490
3491 /*
3492 * remove p2p ie from probe response.
3493 * the fw reponds to probe requests that don't include
3494 * the p2p ie. probe requests with p2p ie will be passed,
3495 * and will be responded by the supplicant (the spec
3496 * forbids including the p2p ie when responding to probe
3497 * requests that didn't include it).
3498 */
3499 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3500 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3501
3502 hdr = (struct ieee80211_hdr *) beacon->data;
3503 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3504 IEEE80211_STYPE_PROBE_RESP);
3505 if (is_ap)
3506 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3507 beacon->data,
3508 beacon->len,
3509 min_rate);
3510 else
3511 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3512 CMD_TEMPL_PROBE_RESPONSE,
3513 beacon->data,
3514 beacon->len, 0,
3515 min_rate);
3516end_bcn:
3517 dev_kfree_skb(beacon);
3518 if (ret < 0) 3670 if (ret < 0)
3519 goto out; 3671 goto out;
3520 } 3672 }
@@ -3551,6 +3703,14 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3551 ret = wl1271_ap_init_templates(wl, vif); 3703 ret = wl1271_ap_init_templates(wl, vif);
3552 if (ret < 0) 3704 if (ret < 0)
3553 goto out; 3705 goto out;
3706
3707 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
3708 if (ret < 0)
3709 goto out;
3710
3711 ret = wlcore_set_beacon_template(wl, vif, true);
3712 if (ret < 0)
3713 goto out;
3554 } 3714 }
3555 3715
3556 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed); 3716 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
@@ -3691,7 +3851,8 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3691 sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band]; 3851 sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
3692 if (sta->ht_cap.ht_supported) 3852 if (sta->ht_cap.ht_supported)
3693 sta_rate_set |= 3853 sta_rate_set |=
3694 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET); 3854 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) |
3855 (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET);
3695 sta_ht_cap = sta->ht_cap; 3856 sta_ht_cap = sta->ht_cap;
3696 sta_exists = true; 3857 sta_exists = true;
3697 3858
@@ -3704,13 +3865,11 @@ sta_not_found:
3704 u32 rates; 3865 u32 rates;
3705 int ieoffset; 3866 int ieoffset;
3706 wlvif->aid = bss_conf->aid; 3867 wlvif->aid = bss_conf->aid;
3868 wlvif->channel_type = bss_conf->channel_type;
3707 wlvif->beacon_int = bss_conf->beacon_int; 3869 wlvif->beacon_int = bss_conf->beacon_int;
3708 do_join = true; 3870 do_join = true;
3709 set_assoc = true; 3871 set_assoc = true;
3710 3872
3711 /* Cancel connection_loss_work */
3712 cancel_delayed_work_sync(&wl->connection_loss_work);
3713
3714 /* 3873 /*
3715 * use basic rates from AP, and determine lowest rate 3874 * use basic rates from AP, and determine lowest rate
3716 * to use with control frames. 3875 * to use with control frames.
@@ -3960,6 +4119,17 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
3960 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x", 4119 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
3961 (int)changed); 4120 (int)changed);
3962 4121
4122 /*
4123 * make sure to cancel pending disconnections if our association
4124 * state changed
4125 */
4126 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4127 cancel_delayed_work_sync(&wl->connection_loss_work);
4128
4129 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4130 !bss_conf->enable_beacon)
4131 wl1271_tx_flush(wl);
4132
3963 mutex_lock(&wl->mutex); 4133 mutex_lock(&wl->mutex);
3964 4134
3965 if (unlikely(wl->state == WL1271_STATE_OFF)) 4135 if (unlikely(wl->state == WL1271_STATE_OFF))
@@ -4068,16 +4238,13 @@ out:
4068static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx, 4238static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4069 struct survey_info *survey) 4239 struct survey_info *survey)
4070{ 4240{
4071 struct wl1271 *wl = hw->priv;
4072 struct ieee80211_conf *conf = &hw->conf; 4241 struct ieee80211_conf *conf = &hw->conf;
4073 4242
4074 if (idx != 0) 4243 if (idx != 0)
4075 return -ENOENT; 4244 return -ENOENT;
4076 4245
4077 survey->channel = conf->channel; 4246 survey->channel = conf->channel;
4078 survey->filled = SURVEY_INFO_NOISE_DBM; 4247 survey->filled = 0;
4079 survey->noise = wl->noise;
4080
4081 return 0; 4248 return 0;
4082} 4249}
4083 4250
@@ -4343,9 +4510,14 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4343 4510
4344 case IEEE80211_AMPDU_RX_STOP: 4511 case IEEE80211_AMPDU_RX_STOP:
4345 if (!(*ba_bitmap & BIT(tid))) { 4512 if (!(*ba_bitmap & BIT(tid))) {
4346 ret = -EINVAL; 4513 /*
4347 wl1271_error("no active RX BA session on tid: %d", 4514 * this happens on reconfig - so only output a debug
4515 * message for now, and don't fail the function.
4516 */
4517 wl1271_debug(DEBUG_MAC80211,
4518 "no active RX BA session on tid: %d",
4348 tid); 4519 tid);
4520 ret = 0;
4349 break; 4521 break;
4350 } 4522 }
4351 4523
@@ -4636,7 +4808,7 @@ static const struct ieee80211_ops wl1271_ops = {
4636 .prepare_multicast = wl1271_op_prepare_multicast, 4808 .prepare_multicast = wl1271_op_prepare_multicast,
4637 .configure_filter = wl1271_op_configure_filter, 4809 .configure_filter = wl1271_op_configure_filter,
4638 .tx = wl1271_op_tx, 4810 .tx = wl1271_op_tx,
4639 .set_key = wl1271_op_set_key, 4811 .set_key = wlcore_op_set_key,
4640 .hw_scan = wl1271_op_hw_scan, 4812 .hw_scan = wl1271_op_hw_scan,
4641 .cancel_hw_scan = wl1271_op_cancel_hw_scan, 4813 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
4642 .sched_scan_start = wl1271_op_sched_scan_start, 4814 .sched_scan_start = wl1271_op_sched_scan_start,
@@ -4882,18 +5054,22 @@ static int wl12xx_get_hw_info(struct wl1271 *wl)
4882 if (ret < 0) 5054 if (ret < 0)
4883 goto out; 5055 goto out;
4884 5056
4885 wl->chip.id = wlcore_read_reg(wl, REG_CHIP_ID_B); 5057 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5058 if (ret < 0)
5059 goto out;
4886 5060
4887 wl->fuse_oui_addr = 0; 5061 wl->fuse_oui_addr = 0;
4888 wl->fuse_nic_addr = 0; 5062 wl->fuse_nic_addr = 0;
4889 5063
4890 wl->hw_pg_ver = wl->ops->get_pg_ver(wl); 5064 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5065 if (ret < 0)
5066 goto out;
4891 5067
4892 if (wl->ops->get_mac) 5068 if (wl->ops->get_mac)
4893 wl->ops->get_mac(wl); 5069 ret = wl->ops->get_mac(wl);
4894 5070
4895 wl1271_power_off(wl);
4896out: 5071out:
5072 wl1271_power_off(wl);
4897 return ret; 5073 return ret;
4898} 5074}
4899 5075
@@ -4905,14 +5081,8 @@ static int wl1271_register_hw(struct wl1271 *wl)
4905 if (wl->mac80211_registered) 5081 if (wl->mac80211_registered)
4906 return 0; 5082 return 0;
4907 5083
4908 ret = wl12xx_get_hw_info(wl); 5084 wl1271_fetch_nvs(wl);
4909 if (ret < 0) { 5085 if (wl->nvs != NULL) {
4910 wl1271_error("couldn't get hw info");
4911 goto out;
4912 }
4913
4914 ret = wl1271_fetch_nvs(wl);
4915 if (ret == 0) {
4916 /* NOTE: The wl->nvs->nvs element must be first, in 5086 /* NOTE: The wl->nvs->nvs element must be first, in
4917 * order to simplify the casting, we assume it is at 5087 * order to simplify the casting, we assume it is at
4918 * the beginning of the wl->nvs structure. 5088 * the beginning of the wl->nvs structure.
@@ -4960,6 +5130,29 @@ static void wl1271_unregister_hw(struct wl1271 *wl)
4960 5130
4961} 5131}
4962 5132
5133static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5134 {
5135 .max = 2,
5136 .types = BIT(NL80211_IFTYPE_STATION),
5137 },
5138 {
5139 .max = 1,
5140 .types = BIT(NL80211_IFTYPE_AP) |
5141 BIT(NL80211_IFTYPE_P2P_GO) |
5142 BIT(NL80211_IFTYPE_P2P_CLIENT),
5143 },
5144};
5145
5146static const struct ieee80211_iface_combination
5147wlcore_iface_combinations[] = {
5148 {
5149 .num_different_channels = 1,
5150 .max_interfaces = 2,
5151 .limits = wlcore_iface_limits,
5152 .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5153 },
5154};
5155
4963static int wl1271_init_ieee80211(struct wl1271 *wl) 5156static int wl1271_init_ieee80211(struct wl1271 *wl)
4964{ 5157{
4965 static const u32 cipher_suites[] = { 5158 static const u32 cipher_suites[] = {
@@ -4970,9 +5163,11 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
4970 WL1271_CIPHER_SUITE_GEM, 5163 WL1271_CIPHER_SUITE_GEM,
4971 }; 5164 };
4972 5165
4973 /* The tx descriptor buffer and the TKIP space. */ 5166 /* The tx descriptor buffer */
4974 wl->hw->extra_tx_headroom = WL1271_EXTRA_SPACE_TKIP + 5167 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
4975 sizeof(struct wl1271_tx_hw_descr); 5168
5169 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5170 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
4976 5171
4977 /* unit us */ 5172 /* unit us */
4978 /* FIXME: find a proper value */ 5173 /* FIXME: find a proper value */
@@ -5025,12 +5220,14 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
5025 */ 5220 */
5026 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz, 5221 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5027 sizeof(wl1271_band_2ghz)); 5222 sizeof(wl1271_band_2ghz));
5028 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap, &wl->ht_cap, 5223 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5029 sizeof(wl->ht_cap)); 5224 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5225 sizeof(*wl->ht_cap));
5030 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz, 5226 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5031 sizeof(wl1271_band_5ghz)); 5227 sizeof(wl1271_band_5ghz));
5032 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap, &wl->ht_cap, 5228 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5033 sizeof(wl->ht_cap)); 5229 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5230 sizeof(*wl->ht_cap));
5034 5231
5035 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 5232 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5036 &wl->bands[IEEE80211_BAND_2GHZ]; 5233 &wl->bands[IEEE80211_BAND_2GHZ];
@@ -5049,6 +5246,11 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
5049 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | 5246 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5050 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; 5247 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5051 5248
5249 /* allowed interface combinations */
5250 wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5251 wl->hw->wiphy->n_iface_combinations =
5252 ARRAY_SIZE(wlcore_iface_combinations);
5253
5052 SET_IEEE80211_DEV(wl->hw, wl->dev); 5254 SET_IEEE80211_DEV(wl->hw, wl->dev);
5053 5255
5054 wl->hw->sta_data_size = sizeof(struct wl1271_station); 5256 wl->hw->sta_data_size = sizeof(struct wl1271_station);
@@ -5117,8 +5319,10 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
5117 wl->rx_counter = 0; 5319 wl->rx_counter = 0;
5118 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 5320 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5119 wl->band = IEEE80211_BAND_2GHZ; 5321 wl->band = IEEE80211_BAND_2GHZ;
5322 wl->channel_type = NL80211_CHAN_NO_HT;
5120 wl->flags = 0; 5323 wl->flags = 0;
5121 wl->sg_enabled = true; 5324 wl->sg_enabled = true;
5325 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5122 wl->hw_pg_ver = -1; 5326 wl->hw_pg_ver = -1;
5123 wl->ap_ps_map = 0; 5327 wl->ap_ps_map = 0;
5124 wl->ap_fw_ps_map = 0; 5328 wl->ap_fw_ps_map = 0;
@@ -5142,6 +5346,7 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
5142 wl->state = WL1271_STATE_OFF; 5346 wl->state = WL1271_STATE_OFF;
5143 wl->fw_type = WL12XX_FW_TYPE_NONE; 5347 wl->fw_type = WL12XX_FW_TYPE_NONE;
5144 mutex_init(&wl->mutex); 5348 mutex_init(&wl->mutex);
5349 mutex_init(&wl->flush_mutex);
5145 5350
5146 order = get_order(WL1271_AGGR_BUFFER_SIZE); 5351 order = get_order(WL1271_AGGR_BUFFER_SIZE);
5147 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order); 5352 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
@@ -5222,7 +5427,7 @@ int wlcore_free_hw(struct wl1271 *wl)
5222 kfree(wl->nvs); 5427 kfree(wl->nvs);
5223 wl->nvs = NULL; 5428 wl->nvs = NULL;
5224 5429
5225 kfree(wl->fw_status); 5430 kfree(wl->fw_status_1);
5226 kfree(wl->tx_res_if); 5431 kfree(wl->tx_res_if);
5227 destroy_workqueue(wl->freezable_wq); 5432 destroy_workqueue(wl->freezable_wq);
5228 5433
@@ -5279,8 +5484,6 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5279 wlcore_adjust_conf(wl); 5484 wlcore_adjust_conf(wl);
5280 5485
5281 wl->irq = platform_get_irq(pdev, 0); 5486 wl->irq = platform_get_irq(pdev, 0);
5282 wl->ref_clock = pdata->board_ref_clock;
5283 wl->tcxo_clock = pdata->board_tcxo_clock;
5284 wl->platform_quirks = pdata->platform_quirks; 5487 wl->platform_quirks = pdata->platform_quirks;
5285 wl->set_power = pdata->set_power; 5488 wl->set_power = pdata->set_power;
5286 wl->dev = &pdev->dev; 5489 wl->dev = &pdev->dev;
@@ -5293,7 +5496,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5293 else 5496 else
5294 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT; 5497 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
5295 5498
5296 ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wl1271_irq, 5499 ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wlcore_irq,
5297 irqflags, 5500 irqflags,
5298 pdev->name, wl); 5501 pdev->name, wl);
5299 if (ret < 0) { 5502 if (ret < 0) {
@@ -5316,6 +5519,16 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5316 } 5519 }
5317 disable_irq(wl->irq); 5520 disable_irq(wl->irq);
5318 5521
5522 ret = wl12xx_get_hw_info(wl);
5523 if (ret < 0) {
5524 wl1271_error("couldn't get hw info");
5525 goto out_irq;
5526 }
5527
5528 ret = wl->ops->identify_chip(wl);
5529 if (ret < 0)
5530 goto out_irq;
5531
5319 ret = wl1271_init_ieee80211(wl); 5532 ret = wl1271_init_ieee80211(wl);
5320 if (ret) 5533 if (ret)
5321 goto out_irq; 5534 goto out_irq;
@@ -5328,7 +5541,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5328 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state); 5541 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
5329 if (ret < 0) { 5542 if (ret < 0) {
5330 wl1271_error("failed to create sysfs file bt_coex_state"); 5543 wl1271_error("failed to create sysfs file bt_coex_state");
5331 goto out_irq; 5544 goto out_unreg;
5332 } 5545 }
5333 5546
5334 /* Create sysfs file to get HW PG version */ 5547 /* Create sysfs file to get HW PG version */
@@ -5353,6 +5566,9 @@ out_hw_pg_ver:
5353out_bt_coex_state: 5566out_bt_coex_state:
5354 device_remove_file(wl->dev, &dev_attr_bt_coex_state); 5567 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5355 5568
5569out_unreg:
5570 wl1271_unregister_hw(wl);
5571
5356out_irq: 5572out_irq:
5357 free_irq(wl->irq, wl); 5573 free_irq(wl->irq, wl);
5358 5574
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 756eee2257b..46d36fd30eb 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -28,11 +28,14 @@
28 28
29#define WL1271_WAKEUP_TIMEOUT 500 29#define WL1271_WAKEUP_TIMEOUT 500
30 30
31#define ELP_ENTRY_DELAY 5
32
31void wl1271_elp_work(struct work_struct *work) 33void wl1271_elp_work(struct work_struct *work)
32{ 34{
33 struct delayed_work *dwork; 35 struct delayed_work *dwork;
34 struct wl1271 *wl; 36 struct wl1271 *wl;
35 struct wl12xx_vif *wlvif; 37 struct wl12xx_vif *wlvif;
38 int ret;
36 39
37 dwork = container_of(work, struct delayed_work, work); 40 dwork = container_of(work, struct delayed_work, work);
38 wl = container_of(dwork, struct wl1271, elp_work); 41 wl = container_of(dwork, struct wl1271, elp_work);
@@ -61,7 +64,12 @@ void wl1271_elp_work(struct work_struct *work)
61 } 64 }
62 65
63 wl1271_debug(DEBUG_PSM, "chip to elp"); 66 wl1271_debug(DEBUG_PSM, "chip to elp");
64 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP); 67 ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
68 if (ret < 0) {
69 wl12xx_queue_recovery_work(wl);
70 goto out;
71 }
72
65 set_bit(WL1271_FLAG_IN_ELP, &wl->flags); 73 set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
66 74
67out: 75out:
@@ -72,8 +80,9 @@ out:
72void wl1271_ps_elp_sleep(struct wl1271 *wl) 80void wl1271_ps_elp_sleep(struct wl1271 *wl)
73{ 81{
74 struct wl12xx_vif *wlvif; 82 struct wl12xx_vif *wlvif;
83 u32 timeout;
75 84
76 if (wl->quirks & WLCORE_QUIRK_NO_ELP) 85 if (wl->sleep_auth != WL1271_PSM_ELP)
77 return; 86 return;
78 87
79 /* we shouldn't get consecutive sleep requests */ 88 /* we shouldn't get consecutive sleep requests */
@@ -89,8 +98,13 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
89 return; 98 return;
90 } 99 }
91 100
101 if (wl->conf.conn.forced_ps)
102 timeout = ELP_ENTRY_DELAY;
103 else
104 timeout = wl->conf.conn.dynamic_ps_timeout;
105
92 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, 106 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
93 msecs_to_jiffies(wl->conf.conn.dynamic_ps_timeout)); 107 msecs_to_jiffies(timeout));
94} 108}
95 109
96int wl1271_ps_elp_wakeup(struct wl1271 *wl) 110int wl1271_ps_elp_wakeup(struct wl1271 *wl)
@@ -127,7 +141,11 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl)
127 wl->elp_compl = &compl; 141 wl->elp_compl = &compl;
128 spin_unlock_irqrestore(&wl->wl_lock, flags); 142 spin_unlock_irqrestore(&wl->wl_lock, flags);
129 143
130 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); 144 ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
145 if (ret < 0) {
146 wl12xx_queue_recovery_work(wl);
147 goto err;
148 }
131 149
132 if (!pending) { 150 if (!pending) {
133 ret = wait_for_completion_timeout( 151 ret = wait_for_completion_timeout(
@@ -185,8 +203,12 @@ int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
185 203
186 set_bit(WLVIF_FLAG_IN_PS, &wlvif->flags); 204 set_bit(WLVIF_FLAG_IN_PS, &wlvif->flags);
187 205
188 /* enable beacon early termination. Not relevant for 5GHz */ 206 /*
189 if (wlvif->band == IEEE80211_BAND_2GHZ) { 207 * enable beacon early termination.
208 * Not relevant for 5GHz and for high rates.
209 */
210 if ((wlvif->band == IEEE80211_BAND_2GHZ) &&
211 (wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) {
190 ret = wl1271_acx_bet_enable(wl, wlvif, true); 212 ret = wl1271_acx_bet_enable(wl, wlvif, true);
191 if (ret < 0) 213 if (ret < 0)
192 return ret; 214 return ret;
@@ -196,7 +218,8 @@ int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
196 wl1271_debug(DEBUG_PSM, "leaving psm"); 218 wl1271_debug(DEBUG_PSM, "leaving psm");
197 219
198 /* disable beacon early termination */ 220 /* disable beacon early termination */
199 if (wlvif->band == IEEE80211_BAND_2GHZ) { 221 if ((wlvif->band == IEEE80211_BAND_2GHZ) &&
222 (wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) {
200 ret = wl1271_acx_bet_enable(wl, wlvif, false); 223 ret = wl1271_acx_bet_enable(wl, wlvif, false);
201 if (ret < 0) 224 if (ret < 0)
202 return ret; 225 return ret;
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index d6a3c6b0782..f55e2f9e7ac 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -127,7 +127,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
127 } 127 }
128 128
129 if (rx_align == WLCORE_RX_BUF_UNALIGNED) 129 if (rx_align == WLCORE_RX_BUF_UNALIGNED)
130 reserved = NET_IP_ALIGN; 130 reserved = RX_BUF_ALIGN;
131 131
132 /* the data read starts with the descriptor */ 132 /* the data read starts with the descriptor */
133 desc = (struct wl1271_rx_descriptor *) data; 133 desc = (struct wl1271_rx_descriptor *) data;
@@ -175,7 +175,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
175 */ 175 */
176 memcpy(buf, data + sizeof(*desc), pkt_data_len); 176 memcpy(buf, data + sizeof(*desc), pkt_data_len);
177 if (rx_align == WLCORE_RX_BUF_PADDED) 177 if (rx_align == WLCORE_RX_BUF_PADDED)
178 skb_pull(skb, NET_IP_ALIGN); 178 skb_pull(skb, RX_BUF_ALIGN);
179 179
180 *hlid = desc->hlid; 180 *hlid = desc->hlid;
181 181
@@ -186,6 +186,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
186 is_data = 1; 186 is_data = 1;
187 187
188 wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon); 188 wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
189 wlcore_hw_set_rx_csum(wl, desc, skb);
189 190
190 seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; 191 seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
191 wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d hlid %d", skb, 192 wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d hlid %d", skb,
@@ -199,17 +200,18 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
199 return is_data; 200 return is_data;
200} 201}
201 202
202void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status) 203int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status)
203{ 204{
204 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; 205 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
205 u32 buf_size; 206 u32 buf_size;
206 u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK; 207 u32 fw_rx_counter = status->fw_rx_counter % wl->num_rx_desc;
207 u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; 208 u32 drv_rx_counter = wl->rx_counter % wl->num_rx_desc;
208 u32 rx_counter; 209 u32 rx_counter;
209 u32 pkt_len, align_pkt_len; 210 u32 pkt_len, align_pkt_len;
210 u32 pkt_offset, des; 211 u32 pkt_offset, des;
211 u8 hlid; 212 u8 hlid;
212 enum wl_rx_buf_align rx_align; 213 enum wl_rx_buf_align rx_align;
214 int ret = 0;
213 215
214 while (drv_rx_counter != fw_rx_counter) { 216 while (drv_rx_counter != fw_rx_counter) {
215 buf_size = 0; 217 buf_size = 0;
@@ -223,7 +225,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
223 break; 225 break;
224 buf_size += align_pkt_len; 226 buf_size += align_pkt_len;
225 rx_counter++; 227 rx_counter++;
226 rx_counter &= NUM_RX_PKT_DESC_MOD_MASK; 228 rx_counter %= wl->num_rx_desc;
227 } 229 }
228 230
229 if (buf_size == 0) { 231 if (buf_size == 0) {
@@ -233,9 +235,14 @@ void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
233 235
234 /* Read all available packets at once */ 236 /* Read all available packets at once */
235 des = le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]); 237 des = le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]);
236 wlcore_hw_prepare_read(wl, des, buf_size); 238 ret = wlcore_hw_prepare_read(wl, des, buf_size);
237 wlcore_read_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, 239 if (ret < 0)
238 buf_size, true); 240 goto out;
241
242 ret = wlcore_read_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
243 buf_size, true);
244 if (ret < 0)
245 goto out;
239 246
240 /* Split data into separate packets */ 247 /* Split data into separate packets */
241 pkt_offset = 0; 248 pkt_offset = 0;
@@ -263,7 +270,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
263 270
264 wl->rx_counter++; 271 wl->rx_counter++;
265 drv_rx_counter++; 272 drv_rx_counter++;
266 drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK; 273 drv_rx_counter %= wl->num_rx_desc;
267 pkt_offset += wlcore_rx_get_align_buf_size(wl, pkt_len); 274 pkt_offset += wlcore_rx_get_align_buf_size(wl, pkt_len);
268 } 275 }
269 } 276 }
@@ -272,11 +279,17 @@ void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
272 * Write the driver's packet counter to the FW. This is only required 279 * Write the driver's packet counter to the FW. This is only required
273 * for older hardware revisions 280 * for older hardware revisions
274 */ 281 */
275 if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) 282 if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) {
276 wl1271_write32(wl, WL12XX_REG_RX_DRIVER_COUNTER, 283 ret = wlcore_write32(wl, WL12XX_REG_RX_DRIVER_COUNTER,
277 wl->rx_counter); 284 wl->rx_counter);
285 if (ret < 0)
286 goto out;
287 }
278 288
279 wl12xx_rearm_rx_streaming(wl, active_hlids); 289 wl12xx_rearm_rx_streaming(wl, active_hlids);
290
291out:
292 return ret;
280} 293}
281 294
282#ifdef CONFIG_PM 295#ifdef CONFIG_PM
@@ -305,14 +318,19 @@ int wl1271_rx_filter_enable(struct wl1271 *wl,
305 return 0; 318 return 0;
306} 319}
307 320
308void wl1271_rx_filter_clear_all(struct wl1271 *wl) 321int wl1271_rx_filter_clear_all(struct wl1271 *wl)
309{ 322{
310 int i; 323 int i, ret = 0;
311 324
312 for (i = 0; i < WL1271_MAX_RX_FILTERS; i++) { 325 for (i = 0; i < WL1271_MAX_RX_FILTERS; i++) {
313 if (!wl->rx_filter_enabled[i]) 326 if (!wl->rx_filter_enabled[i])
314 continue; 327 continue;
315 wl1271_rx_filter_enable(wl, i, 0, NULL); 328 ret = wl1271_rx_filter_enable(wl, i, 0, NULL);
329 if (ret)
330 goto out;
316 } 331 }
332
333out:
334 return ret;
317} 335}
318#endif /* CONFIG_PM */ 336#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index e9a162a864c..71eba189991 100644
--- a/drivers/net/wireless/ti/wlcore/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -38,8 +38,6 @@
38#define RX_DESC_PACKETID_SHIFT 11 38#define RX_DESC_PACKETID_SHIFT 11
39#define RX_MAX_PACKET_ID 3 39#define RX_MAX_PACKET_ID 3
40 40
41#define NUM_RX_PKT_DESC_MOD_MASK 7
42
43#define RX_DESC_VALID_FCS 0x0001 41#define RX_DESC_VALID_FCS 0x0001
44#define RX_DESC_MATCH_RXADDR1 0x0002 42#define RX_DESC_MATCH_RXADDR1 0x0002
45#define RX_DESC_MCAST 0x0004 43#define RX_DESC_MCAST 0x0004
@@ -102,6 +100,15 @@
102/* If set, the start of IP payload is not 4 bytes aligned */ 100/* If set, the start of IP payload is not 4 bytes aligned */
103#define RX_BUF_UNALIGNED_PAYLOAD BIT(20) 101#define RX_BUF_UNALIGNED_PAYLOAD BIT(20)
104 102
103/* If set, the buffer was padded by the FW to be 4 bytes aligned */
104#define RX_BUF_PADDED_PAYLOAD BIT(30)
105
106/*
107 * Account for the padding inserted by the FW in case of RX_ALIGNMENT
108 * or for fixing alignment in case the packet wasn't aligned.
109 */
110#define RX_BUF_ALIGN 2
111
105/* Describes the alignment state of a Rx buffer */ 112/* Describes the alignment state of a Rx buffer */
106enum wl_rx_buf_align { 113enum wl_rx_buf_align {
107 WLCORE_RX_BUF_ALIGNED, 114 WLCORE_RX_BUF_ALIGNED,
@@ -136,11 +143,11 @@ struct wl1271_rx_descriptor {
136 u8 reserved; 143 u8 reserved;
137} __packed; 144} __packed;
138 145
139void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status); 146int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status);
140u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); 147u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
141int wl1271_rx_filter_enable(struct wl1271 *wl, 148int wl1271_rx_filter_enable(struct wl1271 *wl,
142 int index, bool enable, 149 int index, bool enable,
143 struct wl12xx_rx_filter *filter); 150 struct wl12xx_rx_filter *filter);
144void wl1271_rx_filter_clear_all(struct wl1271 *wl); 151int wl1271_rx_filter_clear_all(struct wl1271 *wl);
145 152
146#endif 153#endif
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index ade21a011c4..d9daed53ceb 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -411,7 +411,8 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
411 struct cfg80211_sched_scan_request *req, 411 struct cfg80211_sched_scan_request *req,
412 struct conn_scan_ch_params *channels, 412 struct conn_scan_ch_params *channels,
413 u32 band, bool radar, bool passive, 413 u32 band, bool radar, bool passive,
414 int start, int max_channels) 414 int start, int max_channels,
415 u8 *n_pactive_ch)
415{ 416{
416 struct conf_sched_scan_settings *c = &wl->conf.sched_scan; 417 struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
417 int i, j; 418 int i, j;
@@ -479,6 +480,23 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
479 channels[j].tx_power_att = req->channels[i]->max_power; 480 channels[j].tx_power_att = req->channels[i]->max_power;
480 channels[j].channel = req->channels[i]->hw_value; 481 channels[j].channel = req->channels[i]->hw_value;
481 482
483 if ((band == IEEE80211_BAND_2GHZ) &&
484 (channels[j].channel >= 12) &&
485 (channels[j].channel <= 14) &&
486 (flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
487 !force_passive) {
488 /* pactive channels treated as DFS */
489 channels[j].flags = SCAN_CHANNEL_FLAGS_DFS;
490
491 /*
492 * n_pactive_ch is counted down from the end of
493 * the passive channel list
494 */
495 (*n_pactive_ch)++;
496 wl1271_debug(DEBUG_SCAN, "n_pactive_ch = %d",
497 *n_pactive_ch);
498 }
499
482 j++; 500 j++;
483 } 501 }
484 } 502 }
@@ -491,38 +509,47 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
491 struct cfg80211_sched_scan_request *req, 509 struct cfg80211_sched_scan_request *req,
492 struct wl1271_cmd_sched_scan_config *cfg) 510 struct wl1271_cmd_sched_scan_config *cfg)
493{ 511{
512 u8 n_pactive_ch = 0;
513
494 cfg->passive[0] = 514 cfg->passive[0] =
495 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2, 515 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2,
496 IEEE80211_BAND_2GHZ, 516 IEEE80211_BAND_2GHZ,
497 false, true, 0, 517 false, true, 0,
498 MAX_CHANNELS_2GHZ); 518 MAX_CHANNELS_2GHZ,
519 &n_pactive_ch);
499 cfg->active[0] = 520 cfg->active[0] =
500 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2, 521 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2,
501 IEEE80211_BAND_2GHZ, 522 IEEE80211_BAND_2GHZ,
502 false, false, 523 false, false,
503 cfg->passive[0], 524 cfg->passive[0],
504 MAX_CHANNELS_2GHZ); 525 MAX_CHANNELS_2GHZ,
526 &n_pactive_ch);
505 cfg->passive[1] = 527 cfg->passive[1] =
506 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5, 528 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
507 IEEE80211_BAND_5GHZ, 529 IEEE80211_BAND_5GHZ,
508 false, true, 0, 530 false, true, 0,
509 MAX_CHANNELS_5GHZ); 531 MAX_CHANNELS_5GHZ,
532 &n_pactive_ch);
510 cfg->dfs = 533 cfg->dfs =
511 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5, 534 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
512 IEEE80211_BAND_5GHZ, 535 IEEE80211_BAND_5GHZ,
513 true, true, 536 true, true,
514 cfg->passive[1], 537 cfg->passive[1],
515 MAX_CHANNELS_5GHZ); 538 MAX_CHANNELS_5GHZ,
539 &n_pactive_ch);
516 cfg->active[1] = 540 cfg->active[1] =
517 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5, 541 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
518 IEEE80211_BAND_5GHZ, 542 IEEE80211_BAND_5GHZ,
519 false, false, 543 false, false,
520 cfg->passive[1] + cfg->dfs, 544 cfg->passive[1] + cfg->dfs,
521 MAX_CHANNELS_5GHZ); 545 MAX_CHANNELS_5GHZ,
546 &n_pactive_ch);
522 /* 802.11j channels are not supported yet */ 547 /* 802.11j channels are not supported yet */
523 cfg->passive[2] = 0; 548 cfg->passive[2] = 0;
524 cfg->active[2] = 0; 549 cfg->active[2] = 0;
525 550
551 cfg->n_pactive_ch = n_pactive_ch;
552
526 wl1271_debug(DEBUG_SCAN, " 2.4GHz: active %d passive %d", 553 wl1271_debug(DEBUG_SCAN, " 2.4GHz: active %d passive %d",
527 cfg->active[0], cfg->passive[0]); 554 cfg->active[0], cfg->passive[0]);
528 wl1271_debug(DEBUG_SCAN, " 5GHz: active %d passive %d", 555 wl1271_debug(DEBUG_SCAN, " 5GHz: active %d passive %d",
@@ -537,6 +564,7 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
537/* Returns the scan type to be used or a negative value on error */ 564/* Returns the scan type to be used or a negative value on error */
538static int 565static int
539wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl, 566wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
567 struct wl12xx_vif *wlvif,
540 struct cfg80211_sched_scan_request *req) 568 struct cfg80211_sched_scan_request *req)
541{ 569{
542 struct wl1271_cmd_sched_scan_ssid_list *cmd = NULL; 570 struct wl1271_cmd_sched_scan_ssid_list *cmd = NULL;
@@ -565,6 +593,7 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
565 goto out; 593 goto out;
566 } 594 }
567 595
596 cmd->role_id = wlvif->dev_role_id;
568 if (!n_match_ssids) { 597 if (!n_match_ssids) {
569 /* No filter, with ssids */ 598 /* No filter, with ssids */
570 type = SCAN_SSID_FILTER_DISABLED; 599 type = SCAN_SSID_FILTER_DISABLED;
@@ -603,7 +632,9 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
603 continue; 632 continue;
604 633
605 for (j = 0; j < cmd->n_ssids; j++) 634 for (j = 0; j < cmd->n_ssids; j++)
606 if (!memcmp(req->ssids[i].ssid, 635 if ((req->ssids[i].ssid_len ==
636 req->ssids[j].ssid_len) &&
637 !memcmp(req->ssids[i].ssid,
607 cmd->ssids[j].ssid, 638 cmd->ssids[j].ssid,
608 req->ssids[i].ssid_len)) { 639 req->ssids[i].ssid_len)) {
609 cmd->ssids[j].type = 640 cmd->ssids[j].type =
@@ -652,6 +683,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
652 if (!cfg) 683 if (!cfg)
653 return -ENOMEM; 684 return -ENOMEM;
654 685
686 cfg->role_id = wlvif->dev_role_id;
655 cfg->rssi_threshold = c->rssi_threshold; 687 cfg->rssi_threshold = c->rssi_threshold;
656 cfg->snr_threshold = c->snr_threshold; 688 cfg->snr_threshold = c->snr_threshold;
657 cfg->n_probe_reqs = c->num_probe_reqs; 689 cfg->n_probe_reqs = c->num_probe_reqs;
@@ -669,7 +701,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
669 cfg->intervals[i] = cpu_to_le32(req->interval); 701 cfg->intervals[i] = cpu_to_le32(req->interval);
670 702
671 cfg->ssid_len = 0; 703 cfg->ssid_len = 0;
672 ret = wl12xx_scan_sched_scan_ssid_list(wl, req); 704 ret = wl12xx_scan_sched_scan_ssid_list(wl, wlvif, req);
673 if (ret < 0) 705 if (ret < 0)
674 goto out; 706 goto out;
675 707
@@ -741,6 +773,7 @@ int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
741 if (!start) 773 if (!start)
742 return -ENOMEM; 774 return -ENOMEM;
743 775
776 start->role_id = wlvif->dev_role_id;
744 start->tag = WL1271_SCAN_DEFAULT_TAG; 777 start->tag = WL1271_SCAN_DEFAULT_TAG;
745 778
746 ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start, 779 ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start,
@@ -762,7 +795,7 @@ void wl1271_scan_sched_scan_results(struct wl1271 *wl)
762 ieee80211_sched_scan_results(wl->hw); 795 ieee80211_sched_scan_results(wl->hw);
763} 796}
764 797
765void wl1271_scan_sched_scan_stop(struct wl1271 *wl) 798void wl1271_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
766{ 799{
767 struct wl1271_cmd_sched_scan_stop *stop; 800 struct wl1271_cmd_sched_scan_stop *stop;
768 int ret = 0; 801 int ret = 0;
@@ -776,6 +809,7 @@ void wl1271_scan_sched_scan_stop(struct wl1271 *wl)
776 return; 809 return;
777 } 810 }
778 811
812 stop->role_id = wlvif->dev_role_id;
779 stop->tag = WL1271_SCAN_DEFAULT_TAG; 813 stop->tag = WL1271_SCAN_DEFAULT_TAG;
780 814
781 ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop, 815 ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop,
diff --git a/drivers/net/wireless/ti/wlcore/scan.h b/drivers/net/wireless/ti/wlcore/scan.h
index 81ee36ac207..29f3c8d6b04 100644
--- a/drivers/net/wireless/ti/wlcore/scan.h
+++ b/drivers/net/wireless/ti/wlcore/scan.h
@@ -40,7 +40,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
40 struct cfg80211_sched_scan_request *req, 40 struct cfg80211_sched_scan_request *req,
41 struct ieee80211_sched_scan_ies *ies); 41 struct ieee80211_sched_scan_ies *ies);
42int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif); 42int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif);
43void wl1271_scan_sched_scan_stop(struct wl1271 *wl); 43void wl1271_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif);
44void wl1271_scan_sched_scan_results(struct wl1271 *wl); 44void wl1271_scan_sched_scan_results(struct wl1271 *wl);
45 45
46#define WL1271_SCAN_MAX_CHANNELS 24 46#define WL1271_SCAN_MAX_CHANNELS 24
@@ -142,7 +142,8 @@ enum {
142 SCAN_BSS_TYPE_ANY, 142 SCAN_BSS_TYPE_ANY,
143}; 143};
144 144
145#define SCAN_CHANNEL_FLAGS_DFS BIT(0) 145#define SCAN_CHANNEL_FLAGS_DFS BIT(0) /* channel is passive until an
146 activity is detected on it */
146#define SCAN_CHANNEL_FLAGS_DFS_ENABLED BIT(1) 147#define SCAN_CHANNEL_FLAGS_DFS_ENABLED BIT(1)
147 148
148struct conn_scan_ch_params { 149struct conn_scan_ch_params {
@@ -185,7 +186,10 @@ struct wl1271_cmd_sched_scan_config {
185 186
186 u8 dfs; 187 u8 dfs;
187 188
188 u8 padding[3]; 189 u8 n_pactive_ch; /* number of pactive (passive until fw detects energy)
190 channels in BG band */
191 u8 role_id;
192 u8 padding[1];
189 193
190 struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ]; 194 struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ];
191 struct conn_scan_ch_params channels_5[MAX_CHANNELS_5GHZ]; 195 struct conn_scan_ch_params channels_5[MAX_CHANNELS_5GHZ];
@@ -212,21 +216,24 @@ struct wl1271_cmd_sched_scan_ssid_list {
212 216
213 u8 n_ssids; 217 u8 n_ssids;
214 struct wl1271_ssid ssids[SCHED_SCAN_MAX_SSIDS]; 218 struct wl1271_ssid ssids[SCHED_SCAN_MAX_SSIDS];
215 u8 padding[3]; 219 u8 role_id;
220 u8 padding[2];
216} __packed; 221} __packed;
217 222
218struct wl1271_cmd_sched_scan_start { 223struct wl1271_cmd_sched_scan_start {
219 struct wl1271_cmd_header header; 224 struct wl1271_cmd_header header;
220 225
221 u8 tag; 226 u8 tag;
222 u8 padding[3]; 227 u8 role_id;
228 u8 padding[2];
223} __packed; 229} __packed;
224 230
225struct wl1271_cmd_sched_scan_stop { 231struct wl1271_cmd_sched_scan_stop {
226 struct wl1271_cmd_header header; 232 struct wl1271_cmd_header header;
227 233
228 u8 tag; 234 u8 tag;
229 u8 padding[3]; 235 u8 role_id;
236 u8 padding[2];
230} __packed; 237} __packed;
231 238
232 239
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 0a72347cfc4..204e69fa932 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/mmc/sdio.h>
28#include <linux/mmc/sdio_func.h> 29#include <linux/mmc/sdio_func.h>
29#include <linux/mmc/sdio_ids.h> 30#include <linux/mmc/sdio_ids.h>
30#include <linux/mmc/card.h> 31#include <linux/mmc/card.h>
@@ -32,6 +33,7 @@
32#include <linux/gpio.h> 33#include <linux/gpio.h>
33#include <linux/wl12xx.h> 34#include <linux/wl12xx.h>
34#include <linux/pm_runtime.h> 35#include <linux/pm_runtime.h>
36#include <linux/printk.h>
35 37
36#include "wlcore.h" 38#include "wlcore.h"
37#include "wl12xx_80211.h" 39#include "wl12xx_80211.h"
@@ -45,6 +47,8 @@
45#define SDIO_DEVICE_ID_TI_WL1271 0x4076 47#define SDIO_DEVICE_ID_TI_WL1271 0x4076
46#endif 48#endif
47 49
50static bool dump = false;
51
48struct wl12xx_sdio_glue { 52struct wl12xx_sdio_glue {
49 struct device *dev; 53 struct device *dev;
50 struct platform_device *core; 54 struct platform_device *core;
@@ -67,8 +71,8 @@ static void wl1271_sdio_set_block_size(struct device *child,
67 sdio_release_host(func); 71 sdio_release_host(func);
68} 72}
69 73
70static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf, 74static int __must_check wl12xx_sdio_raw_read(struct device *child, int addr,
71 size_t len, bool fixed) 75 void *buf, size_t len, bool fixed)
72{ 76{
73 int ret; 77 int ret;
74 struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent); 78 struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
@@ -76,6 +80,13 @@ static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
76 80
77 sdio_claim_host(func); 81 sdio_claim_host(func);
78 82
83 if (unlikely(dump)) {
84 printk(KERN_DEBUG "wlcore_sdio: READ from 0x%04x\n", addr);
85 print_hex_dump(KERN_DEBUG, "wlcore_sdio: READ ",
86 DUMP_PREFIX_OFFSET, 16, 1,
87 buf, len, false);
88 }
89
79 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) { 90 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) {
80 ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret); 91 ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
81 dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n", 92 dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n",
@@ -92,12 +103,14 @@ static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
92 103
93 sdio_release_host(func); 104 sdio_release_host(func);
94 105
95 if (ret) 106 if (WARN_ON(ret))
96 dev_err(child->parent, "sdio read failed (%d)\n", ret); 107 dev_err(child->parent, "sdio read failed (%d)\n", ret);
108
109 return ret;
97} 110}
98 111
99static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf, 112static int __must_check wl12xx_sdio_raw_write(struct device *child, int addr,
100 size_t len, bool fixed) 113 void *buf, size_t len, bool fixed)
101{ 114{
102 int ret; 115 int ret;
103 struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent); 116 struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
@@ -105,6 +118,13 @@ static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
105 118
106 sdio_claim_host(func); 119 sdio_claim_host(func);
107 120
121 if (unlikely(dump)) {
122 printk(KERN_DEBUG "wlcore_sdio: WRITE to 0x%04x\n", addr);
123 print_hex_dump(KERN_DEBUG, "wlcore_sdio: WRITE ",
124 DUMP_PREFIX_OFFSET, 16, 1,
125 buf, len, false);
126 }
127
108 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) { 128 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) {
109 sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret); 129 sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
110 dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n", 130 dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n",
@@ -121,25 +141,30 @@ static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
121 141
122 sdio_release_host(func); 142 sdio_release_host(func);
123 143
124 if (ret) 144 if (WARN_ON(ret))
125 dev_err(child->parent, "sdio write failed (%d)\n", ret); 145 dev_err(child->parent, "sdio write failed (%d)\n", ret);
146
147 return ret;
126} 148}
127 149
128static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue) 150static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
129{ 151{
130 int ret; 152 int ret;
131 struct sdio_func *func = dev_to_sdio_func(glue->dev); 153 struct sdio_func *func = dev_to_sdio_func(glue->dev);
154 struct mmc_card *card = func->card;
132 155
133 /* If enabled, tell runtime PM not to power off the card */ 156 ret = pm_runtime_get_sync(&card->dev);
134 if (pm_runtime_enabled(&func->dev)) { 157 if (ret) {
135 ret = pm_runtime_get_sync(&func->dev); 158 /*
136 if (ret < 0) 159 * Runtime PM might be temporarily disabled, or the device
137 goto out; 160 * might have a positive reference counter. Make sure it is
138 } else { 161 * really powered on.
139 /* Runtime PM is disabled: power up the card manually */ 162 */
140 ret = mmc_power_restore_host(func->card->host); 163 ret = mmc_power_restore_host(card->host);
141 if (ret < 0) 164 if (ret < 0) {
165 pm_runtime_put_sync(&card->dev);
142 goto out; 166 goto out;
167 }
143 } 168 }
144 169
145 sdio_claim_host(func); 170 sdio_claim_host(func);
@@ -154,20 +179,21 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
154{ 179{
155 int ret; 180 int ret;
156 struct sdio_func *func = dev_to_sdio_func(glue->dev); 181 struct sdio_func *func = dev_to_sdio_func(glue->dev);
182 struct mmc_card *card = func->card;
157 183
158 sdio_claim_host(func); 184 sdio_claim_host(func);
159 sdio_disable_func(func); 185 sdio_disable_func(func);
160 sdio_release_host(func); 186 sdio_release_host(func);
161 187
162 /* Power off the card manually, even if runtime PM is enabled. */ 188 /* Power off the card manually in case it wasn't powered off above */
163 ret = mmc_power_save_host(func->card->host); 189 ret = mmc_power_save_host(card->host);
164 if (ret < 0) 190 if (ret < 0)
165 return ret; 191 goto out;
166 192
167 /* If enabled, let runtime PM know the card is powered off */ 193 /* Let runtime PM know the card is powered off */
168 if (pm_runtime_enabled(&func->dev)) 194 pm_runtime_put_sync(&card->dev);
169 ret = pm_runtime_put_sync(&func->dev);
170 195
196out:
171 return ret; 197 return ret;
172} 198}
173 199
@@ -196,6 +222,7 @@ static int __devinit wl1271_probe(struct sdio_func *func,
196 struct resource res[1]; 222 struct resource res[1];
197 mmc_pm_flag_t mmcflags; 223 mmc_pm_flag_t mmcflags;
198 int ret = -ENOMEM; 224 int ret = -ENOMEM;
225 const char *chip_family;
199 226
200 /* We are only able to handle the wlan function */ 227 /* We are only able to handle the wlan function */
201 if (func->num != 0x02) 228 if (func->num != 0x02)
@@ -236,7 +263,18 @@ static int __devinit wl1271_probe(struct sdio_func *func,
236 /* Tell PM core that we don't need the card to be powered now */ 263 /* Tell PM core that we don't need the card to be powered now */
237 pm_runtime_put_noidle(&func->dev); 264 pm_runtime_put_noidle(&func->dev);
238 265
239 glue->core = platform_device_alloc("wl12xx", -1); 266 /*
267 * Due to a hardware bug, we can't differentiate wl18xx from
268 * wl12xx, because both report the same device ID. The only
269 * way to differentiate is by checking the SDIO revision,
270 * which is 3.00 on the wl18xx chips.
271 */
272 if (func->card->cccr.sdio_vsn == SDIO_SDIO_REV_3_00)
273 chip_family = "wl18xx";
274 else
275 chip_family = "wl12xx";
276
277 glue->core = platform_device_alloc(chip_family, -1);
240 if (!glue->core) { 278 if (!glue->core) {
241 dev_err(glue->dev, "can't allocate platform_device"); 279 dev_err(glue->dev, "can't allocate platform_device");
242 ret = -ENOMEM; 280 ret = -ENOMEM;
@@ -367,6 +405,9 @@ static void __exit wl1271_exit(void)
367module_init(wl1271_init); 405module_init(wl1271_init);
368module_exit(wl1271_exit); 406module_exit(wl1271_exit);
369 407
408module_param(dump, bool, S_IRUSR | S_IWUSR);
409MODULE_PARM_DESC(dump, "Enable sdio read/write dumps.");
410
370MODULE_LICENSE("GPL"); 411MODULE_LICENSE("GPL");
371MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); 412MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
372MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 413MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 553cd3cbb98..6420abae40e 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -193,8 +193,8 @@ static int wl12xx_spi_read_busy(struct device *child)
193 return -ETIMEDOUT; 193 return -ETIMEDOUT;
194} 194}
195 195
196static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf, 196static int __must_check wl12xx_spi_raw_read(struct device *child, int addr,
197 size_t len, bool fixed) 197 void *buf, size_t len, bool fixed)
198{ 198{
199 struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); 199 struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
200 struct wl1271 *wl = dev_get_drvdata(child); 200 struct wl1271 *wl = dev_get_drvdata(child);
@@ -238,7 +238,7 @@ static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf,
238 if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) && 238 if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) &&
239 wl12xx_spi_read_busy(child)) { 239 wl12xx_spi_read_busy(child)) {
240 memset(buf, 0, chunk_len); 240 memset(buf, 0, chunk_len);
241 return; 241 return 0;
242 } 242 }
243 243
244 spi_message_init(&m); 244 spi_message_init(&m);
@@ -256,10 +256,12 @@ static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf,
256 buf += chunk_len; 256 buf += chunk_len;
257 len -= chunk_len; 257 len -= chunk_len;
258 } 258 }
259
260 return 0;
259} 261}
260 262
261static void wl12xx_spi_raw_write(struct device *child, int addr, void *buf, 263static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
262 size_t len, bool fixed) 264 void *buf, size_t len, bool fixed)
263{ 265{
264 struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); 266 struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
265 struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS]; 267 struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
@@ -304,6 +306,8 @@ static void wl12xx_spi_raw_write(struct device *child, int addr, void *buf,
304 } 306 }
305 307
306 spi_sync(to_spi_device(glue->dev), &m); 308 spi_sync(to_spi_device(glue->dev), &m);
309
310 return 0;
307} 311}
308 312
309static struct wl1271_if_operations spi_ops = { 313static struct wl1271_if_operations spi_ops = {
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 0e59ea2cdd3..eeb339d61d1 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -108,6 +108,20 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
108 } 108 }
109 109
110 if (answer) { 110 if (answer) {
111 /* If we got bip calibration answer print radio status */
112 struct wl1271_cmd_cal_p2g *params =
113 (struct wl1271_cmd_cal_p2g *) buf;
114
115 s16 radio_status = (s16) le16_to_cpu(params->radio_status);
116
117 if (params->test.id == TEST_CMD_P2G_CAL &&
118 radio_status < 0)
119 wl1271_warning("testmode cmd: radio status=%d",
120 radio_status);
121 else
122 wl1271_info("testmode cmd: radio status=%d",
123 radio_status);
124
111 len = nla_total_size(buf_len); 125 len = nla_total_size(buf_len);
112 skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len); 126 skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len);
113 if (!skb) { 127 if (!skb) {
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 6893bc20799..8038a502693 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -72,7 +72,7 @@ static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
72 return id; 72 return id;
73} 73}
74 74
75static void wl1271_free_tx_id(struct wl1271 *wl, int id) 75void wl1271_free_tx_id(struct wl1271 *wl, int id)
76{ 76{
77 if (__test_and_clear_bit(id, wl->tx_frames_map)) { 77 if (__test_and_clear_bit(id, wl->tx_frames_map)) {
78 if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc)) 78 if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc))
@@ -82,6 +82,7 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id)
82 wl->tx_frames_cnt--; 82 wl->tx_frames_cnt--;
83 } 83 }
84} 84}
85EXPORT_SYMBOL(wl1271_free_tx_id);
85 86
86static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl, 87static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
87 struct sk_buff *skb) 88 struct sk_buff *skb)
@@ -127,6 +128,7 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
127{ 128{
128 return wl->dummy_packet == skb; 129 return wl->dummy_packet == skb;
129} 130}
131EXPORT_SYMBOL(wl12xx_is_dummy_packet);
130 132
131u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, 133u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
132 struct sk_buff *skb) 134 struct sk_buff *skb)
@@ -146,10 +148,10 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
146 return wl->system_hlid; 148 return wl->system_hlid;
147 149
148 hdr = (struct ieee80211_hdr *)skb->data; 150 hdr = (struct ieee80211_hdr *)skb->data;
149 if (ieee80211_is_mgmt(hdr->frame_control)) 151 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
150 return wlvif->ap.global_hlid;
151 else
152 return wlvif->ap.bcast_hlid; 152 return wlvif->ap.bcast_hlid;
153 else
154 return wlvif->ap.global_hlid;
153 } 155 }
154} 156}
155 157
@@ -176,37 +178,34 @@ u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
176unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl, 178unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
177 unsigned int packet_length) 179 unsigned int packet_length)
178{ 180{
179 if (wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN) 181 if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) ||
180 return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE); 182 !(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN))
181 else
182 return ALIGN(packet_length, WL1271_TX_ALIGN_TO); 183 return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
184 else
185 return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
183} 186}
184EXPORT_SYMBOL(wlcore_calc_packet_alignment); 187EXPORT_SYMBOL(wlcore_calc_packet_alignment);
185 188
186static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, 189static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
187 struct sk_buff *skb, u32 extra, u32 buf_offset, 190 struct sk_buff *skb, u32 extra, u32 buf_offset,
188 u8 hlid) 191 u8 hlid, bool is_gem)
189{ 192{
190 struct wl1271_tx_hw_descr *desc; 193 struct wl1271_tx_hw_descr *desc;
191 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; 194 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
192 u32 total_blocks; 195 u32 total_blocks;
193 int id, ret = -EBUSY, ac; 196 int id, ret = -EBUSY, ac;
194 u32 spare_blocks = wl->normal_tx_spare; 197 u32 spare_blocks;
195 bool is_dummy = false;
196 198
197 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE) 199 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
198 return -EAGAIN; 200 return -EAGAIN;
199 201
202 spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem);
203
200 /* allocate free identifier for the packet */ 204 /* allocate free identifier for the packet */
201 id = wl1271_alloc_tx_id(wl, skb); 205 id = wl1271_alloc_tx_id(wl, skb);
202 if (id < 0) 206 if (id < 0)
203 return id; 207 return id;
204 208
205 if (unlikely(wl12xx_is_dummy_packet(wl, skb)))
206 is_dummy = true;
207 else if (wlvif->is_gem)
208 spare_blocks = wl->gem_tx_spare;
209
210 total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks); 209 total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
211 210
212 if (total_blocks <= wl->tx_blocks_available) { 211 if (total_blocks <= wl->tx_blocks_available) {
@@ -228,7 +227,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
228 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 227 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
229 wl->tx_allocated_pkts[ac]++; 228 wl->tx_allocated_pkts[ac]++;
230 229
231 if (!is_dummy && wlvif && 230 if (!wl12xx_is_dummy_packet(wl, skb) && wlvif &&
232 wlvif->bss_type == BSS_TYPE_AP_BSS && 231 wlvif->bss_type == BSS_TYPE_AP_BSS &&
233 test_bit(hlid, wlvif->ap.sta_hlid_map)) 232 test_bit(hlid, wlvif->ap.sta_hlid_map))
234 wl->links[hlid].allocated_pkts++; 233 wl->links[hlid].allocated_pkts++;
@@ -268,6 +267,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
268 if (extra) { 267 if (extra) {
269 int hdrlen = ieee80211_hdrlen(frame_control); 268 int hdrlen = ieee80211_hdrlen(frame_control);
270 memmove(frame_start, hdr, hdrlen); 269 memmove(frame_start, hdr, hdrlen);
270 skb_set_network_header(skb, skb_network_offset(skb) + extra);
271 } 271 }
272 272
273 /* configure packet life time */ 273 /* configure packet life time */
@@ -305,11 +305,15 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
305 if (is_dummy || !wlvif) 305 if (is_dummy || !wlvif)
306 rate_idx = 0; 306 rate_idx = 0;
307 else if (wlvif->bss_type != BSS_TYPE_AP_BSS) { 307 else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
308 /* if the packets are destined for AP (have a STA entry) 308 /*
309 send them with AP rate policies, otherwise use default 309 * if the packets are destined for AP (have a STA entry)
310 basic rates */ 310 * send them with AP rate policies (EAPOLs are an exception),
311 * otherwise use default basic rates
312 */
311 if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE) 313 if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
312 rate_idx = wlvif->sta.p2p_rate_idx; 314 rate_idx = wlvif->sta.p2p_rate_idx;
315 else if (skb->protocol == cpu_to_be16(ETH_P_PAE))
316 rate_idx = wlvif->sta.basic_rate_idx;
313 else if (control->control.sta) 317 else if (control->control.sta)
314 rate_idx = wlvif->sta.ap_rate_idx; 318 rate_idx = wlvif->sta.ap_rate_idx;
315 else 319 else
@@ -330,9 +334,9 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
330 ieee80211_has_protected(frame_control)) 334 ieee80211_has_protected(frame_control))
331 tx_attr |= TX_HW_ATTR_HOST_ENCRYPT; 335 tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
332 336
333 desc->reserved = 0;
334 desc->tx_attr = cpu_to_le16(tx_attr); 337 desc->tx_attr = cpu_to_le16(tx_attr);
335 338
339 wlcore_hw_set_tx_desc_csum(wl, desc, skb);
336 wlcore_hw_set_tx_desc_data_len(wl, desc, skb); 340 wlcore_hw_set_tx_desc_data_len(wl, desc, skb);
337} 341}
338 342
@@ -346,16 +350,20 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
346 u32 total_len; 350 u32 total_len;
347 u8 hlid; 351 u8 hlid;
348 bool is_dummy; 352 bool is_dummy;
353 bool is_gem = false;
349 354
350 if (!skb) 355 if (!skb) {
356 wl1271_error("discarding null skb");
351 return -EINVAL; 357 return -EINVAL;
358 }
352 359
353 info = IEEE80211_SKB_CB(skb); 360 info = IEEE80211_SKB_CB(skb);
354 361
355 /* TODO: handle dummy packets on multi-vifs */ 362 /* TODO: handle dummy packets on multi-vifs */
356 is_dummy = wl12xx_is_dummy_packet(wl, skb); 363 is_dummy = wl12xx_is_dummy_packet(wl, skb);
357 364
358 if (info->control.hw_key && 365 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
366 info->control.hw_key &&
359 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) 367 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
360 extra = WL1271_EXTRA_SPACE_TKIP; 368 extra = WL1271_EXTRA_SPACE_TKIP;
361 369
@@ -373,6 +381,8 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
373 return ret; 381 return ret;
374 wlvif->default_key = idx; 382 wlvif->default_key = idx;
375 } 383 }
384
385 is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
376 } 386 }
377 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); 387 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
378 if (hlid == WL12XX_INVALID_LINK_ID) { 388 if (hlid == WL12XX_INVALID_LINK_ID) {
@@ -380,7 +390,8 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
380 return -EINVAL; 390 return -EINVAL;
381 } 391 }
382 392
383 ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid); 393 ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
394 is_gem);
384 if (ret < 0) 395 if (ret < 0)
385 return ret; 396 return ret;
386 397
@@ -425,10 +436,10 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
425 rate_set >>= 1; 436 rate_set >>= 1;
426 } 437 }
427 438
428 /* MCS rates indication are on bits 16 - 23 */ 439 /* MCS rates indication are on bits 16 - 31 */
429 rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates; 440 rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
430 441
431 for (bit = 0; bit < 8; bit++) { 442 for (bit = 0; bit < 16; bit++) {
432 if (rate_set & 0x1) 443 if (rate_set & 0x1)
433 enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit); 444 enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
434 rate_set >>= 1; 445 rate_set >>= 1;
@@ -439,18 +450,15 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
439 450
440void wl1271_handle_tx_low_watermark(struct wl1271 *wl) 451void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
441{ 452{
442 unsigned long flags;
443 int i; 453 int i;
444 454
445 for (i = 0; i < NUM_TX_QUEUES; i++) { 455 for (i = 0; i < NUM_TX_QUEUES; i++) {
446 if (test_bit(i, &wl->stopped_queues_map) && 456 if (wlcore_is_queue_stopped_by_reason(wl, i,
457 WLCORE_QUEUE_STOP_REASON_WATERMARK) &&
447 wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) { 458 wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) {
448 /* firmware buffer has space, restart queues */ 459 /* firmware buffer has space, restart queues */
449 spin_lock_irqsave(&wl->wl_lock, flags); 460 wlcore_wake_queue(wl, i,
450 ieee80211_wake_queue(wl->hw, 461 WLCORE_QUEUE_STOP_REASON_WATERMARK);
451 wl1271_tx_get_mac80211_queue(i));
452 clear_bit(i, &wl->stopped_queues_map);
453 spin_unlock_irqrestore(&wl->wl_lock, flags);
454 } 462 }
455 } 463 }
456} 464}
@@ -656,18 +664,29 @@ void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
656 } 664 }
657} 665}
658 666
659void wl1271_tx_work_locked(struct wl1271 *wl) 667/*
668 * Returns failure values only in case of failed bus ops within this function.
669 * wl1271_prepare_tx_frame retvals won't be returned in order to avoid
670 * triggering recovery by higher layers when not necessary.
671 * In case a FW command fails within wl1271_prepare_tx_frame fails a recovery
672 * will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame
673 * can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING
674 * within prepare_tx_frame code but there's nothing we should do about those
675 * as well.
676 */
677int wlcore_tx_work_locked(struct wl1271 *wl)
660{ 678{
661 struct wl12xx_vif *wlvif; 679 struct wl12xx_vif *wlvif;
662 struct sk_buff *skb; 680 struct sk_buff *skb;
663 struct wl1271_tx_hw_descr *desc; 681 struct wl1271_tx_hw_descr *desc;
664 u32 buf_offset = 0; 682 u32 buf_offset = 0, last_len = 0;
665 bool sent_packets = false; 683 bool sent_packets = false;
666 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; 684 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
667 int ret; 685 int ret = 0;
686 int bus_ret = 0;
668 687
669 if (unlikely(wl->state == WL1271_STATE_OFF)) 688 if (unlikely(wl->state == WL1271_STATE_OFF))
670 return; 689 return 0;
671 690
672 while ((skb = wl1271_skb_dequeue(wl))) { 691 while ((skb = wl1271_skb_dequeue(wl))) {
673 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 692 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -685,8 +704,14 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
685 * Flush buffer and try again. 704 * Flush buffer and try again.
686 */ 705 */
687 wl1271_skb_queue_head(wl, wlvif, skb); 706 wl1271_skb_queue_head(wl, wlvif, skb);
688 wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, 707
689 buf_offset, true); 708 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
709 last_len);
710 bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA,
711 wl->aggr_buf, buf_offset, true);
712 if (bus_ret < 0)
713 goto out;
714
690 sent_packets = true; 715 sent_packets = true;
691 buf_offset = 0; 716 buf_offset = 0;
692 continue; 717 continue;
@@ -710,7 +735,8 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
710 ieee80211_free_txskb(wl->hw, skb); 735 ieee80211_free_txskb(wl->hw, skb);
711 goto out_ack; 736 goto out_ack;
712 } 737 }
713 buf_offset += ret; 738 last_len = ret;
739 buf_offset += last_len;
714 wl->tx_packets_count++; 740 wl->tx_packets_count++;
715 if (has_data) { 741 if (has_data) {
716 desc = (struct wl1271_tx_hw_descr *) skb->data; 742 desc = (struct wl1271_tx_hw_descr *) skb->data;
@@ -720,8 +746,12 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
720 746
721out_ack: 747out_ack:
722 if (buf_offset) { 748 if (buf_offset) {
723 wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, 749 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len);
724 buf_offset, true); 750 bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
751 buf_offset, true);
752 if (bus_ret < 0)
753 goto out;
754
725 sent_packets = true; 755 sent_packets = true;
726 } 756 }
727 if (sent_packets) { 757 if (sent_packets) {
@@ -729,13 +759,19 @@ out_ack:
729 * Interrupt the firmware with the new packets. This is only 759 * Interrupt the firmware with the new packets. This is only
730 * required for older hardware revisions 760 * required for older hardware revisions
731 */ 761 */
732 if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) 762 if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) {
733 wl1271_write32(wl, WL12XX_HOST_WR_ACCESS, 763 bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS,
734 wl->tx_packets_count); 764 wl->tx_packets_count);
765 if (bus_ret < 0)
766 goto out;
767 }
735 768
736 wl1271_handle_tx_low_watermark(wl); 769 wl1271_handle_tx_low_watermark(wl);
737 } 770 }
738 wl12xx_rearm_rx_streaming(wl, active_hlids); 771 wl12xx_rearm_rx_streaming(wl, active_hlids);
772
773out:
774 return bus_ret;
739} 775}
740 776
741void wl1271_tx_work(struct work_struct *work) 777void wl1271_tx_work(struct work_struct *work)
@@ -748,7 +784,11 @@ void wl1271_tx_work(struct work_struct *work)
748 if (ret < 0) 784 if (ret < 0)
749 goto out; 785 goto out;
750 786
751 wl1271_tx_work_locked(wl); 787 ret = wlcore_tx_work_locked(wl);
788 if (ret < 0) {
789 wl12xx_queue_recovery_work(wl);
790 goto out;
791 }
752 792
753 wl1271_ps_elp_sleep(wl); 793 wl1271_ps_elp_sleep(wl);
754out: 794out:
@@ -849,7 +889,8 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
849 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); 889 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
850 890
851 /* remove TKIP header space if present */ 891 /* remove TKIP header space if present */
852 if (info->control.hw_key && 892 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
893 info->control.hw_key &&
853 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { 894 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
854 int hdrlen = ieee80211_get_hdrlen_from_skb(skb); 895 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
855 memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data, 896 memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data,
@@ -869,22 +910,27 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
869} 910}
870 911
871/* Called upon reception of a TX complete interrupt */ 912/* Called upon reception of a TX complete interrupt */
872void wl1271_tx_complete(struct wl1271 *wl) 913int wlcore_tx_complete(struct wl1271 *wl)
873{ 914{
874 struct wl1271_acx_mem_map *memmap = 915 struct wl1271_acx_mem_map *memmap = wl->target_mem_map;
875 (struct wl1271_acx_mem_map *)wl->target_mem_map;
876 u32 count, fw_counter; 916 u32 count, fw_counter;
877 u32 i; 917 u32 i;
918 int ret;
878 919
879 /* read the tx results from the chipset */ 920 /* read the tx results from the chipset */
880 wl1271_read(wl, le32_to_cpu(memmap->tx_result), 921 ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result),
881 wl->tx_res_if, sizeof(*wl->tx_res_if), false); 922 wl->tx_res_if, sizeof(*wl->tx_res_if), false);
923 if (ret < 0)
924 goto out;
925
882 fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter); 926 fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
883 927
884 /* write host counter to chipset (to ack) */ 928 /* write host counter to chipset (to ack) */
885 wl1271_write32(wl, le32_to_cpu(memmap->tx_result) + 929 ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) +
886 offsetof(struct wl1271_tx_hw_res_if, 930 offsetof(struct wl1271_tx_hw_res_if,
887 tx_result_host_counter), fw_counter); 931 tx_result_host_counter), fw_counter);
932 if (ret < 0)
933 goto out;
888 934
889 count = fw_counter - wl->tx_results_count; 935 count = fw_counter - wl->tx_results_count;
890 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count); 936 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
@@ -904,8 +950,11 @@ void wl1271_tx_complete(struct wl1271 *wl)
904 950
905 wl->tx_results_count++; 951 wl->tx_results_count++;
906 } 952 }
953
954out:
955 return ret;
907} 956}
908EXPORT_SYMBOL(wl1271_tx_complete); 957EXPORT_SYMBOL(wlcore_tx_complete);
909 958
910void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) 959void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
911{ 960{
@@ -958,7 +1007,7 @@ void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
958 1007
959} 1008}
960/* caller must hold wl->mutex and TX must be stopped */ 1009/* caller must hold wl->mutex and TX must be stopped */
961void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues) 1010void wl12xx_tx_reset(struct wl1271 *wl)
962{ 1011{
963 int i; 1012 int i;
964 struct sk_buff *skb; 1013 struct sk_buff *skb;
@@ -973,15 +1022,12 @@ void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
973 wl->tx_queue_count[i] = 0; 1022 wl->tx_queue_count[i] = 0;
974 } 1023 }
975 1024
976 wl->stopped_queues_map = 0;
977
978 /* 1025 /*
979 * Make sure the driver is at a consistent state, in case this 1026 * Make sure the driver is at a consistent state, in case this
980 * function is called from a context other than interface removal. 1027 * function is called from a context other than interface removal.
981 * This call will always wake the TX queues. 1028 * This call will always wake the TX queues.
982 */ 1029 */
983 if (reset_tx_queues) 1030 wl1271_handle_tx_low_watermark(wl);
984 wl1271_handle_tx_low_watermark(wl);
985 1031
986 for (i = 0; i < wl->num_tx_desc; i++) { 1032 for (i = 0; i < wl->num_tx_desc; i++) {
987 if (wl->tx_frames[i] == NULL) 1033 if (wl->tx_frames[i] == NULL)
@@ -998,7 +1044,8 @@ void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
998 */ 1044 */
999 info = IEEE80211_SKB_CB(skb); 1045 info = IEEE80211_SKB_CB(skb);
1000 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); 1046 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
1001 if (info->control.hw_key && 1047 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
1048 info->control.hw_key &&
1002 info->control.hw_key->cipher == 1049 info->control.hw_key->cipher ==
1003 WLAN_CIPHER_SUITE_TKIP) { 1050 WLAN_CIPHER_SUITE_TKIP) {
1004 int hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1051 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
@@ -1024,6 +1071,11 @@ void wl1271_tx_flush(struct wl1271 *wl)
1024 int i; 1071 int i;
1025 timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT); 1072 timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
1026 1073
1074 /* only one flush should be in progress, for consistent queue state */
1075 mutex_lock(&wl->flush_mutex);
1076
1077 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1078
1027 while (!time_after(jiffies, timeout)) { 1079 while (!time_after(jiffies, timeout)) {
1028 mutex_lock(&wl->mutex); 1080 mutex_lock(&wl->mutex);
1029 wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d", 1081 wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
@@ -1032,7 +1084,7 @@ void wl1271_tx_flush(struct wl1271 *wl)
1032 if ((wl->tx_frames_cnt == 0) && 1084 if ((wl->tx_frames_cnt == 0) &&
1033 (wl1271_tx_total_queue_count(wl) == 0)) { 1085 (wl1271_tx_total_queue_count(wl) == 0)) {
1034 mutex_unlock(&wl->mutex); 1086 mutex_unlock(&wl->mutex);
1035 return; 1087 goto out;
1036 } 1088 }
1037 mutex_unlock(&wl->mutex); 1089 mutex_unlock(&wl->mutex);
1038 msleep(1); 1090 msleep(1);
@@ -1045,7 +1097,12 @@ void wl1271_tx_flush(struct wl1271 *wl)
1045 for (i = 0; i < WL12XX_MAX_LINKS; i++) 1097 for (i = 0; i < WL12XX_MAX_LINKS; i++)
1046 wl1271_tx_reset_link_queues(wl, i); 1098 wl1271_tx_reset_link_queues(wl, i);
1047 mutex_unlock(&wl->mutex); 1099 mutex_unlock(&wl->mutex);
1100
1101out:
1102 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1103 mutex_unlock(&wl->flush_mutex);
1048} 1104}
1105EXPORT_SYMBOL_GPL(wl1271_tx_flush);
1049 1106
1050u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set) 1107u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
1051{ 1108{
@@ -1054,3 +1111,96 @@ u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
1054 1111
1055 return BIT(__ffs(rate_set)); 1112 return BIT(__ffs(rate_set));
1056} 1113}
1114
1115void wlcore_stop_queue_locked(struct wl1271 *wl, u8 queue,
1116 enum wlcore_queue_stop_reason reason)
1117{
1118 bool stopped = !!wl->queue_stop_reasons[queue];
1119
1120 /* queue should not be stopped for this reason */
1121 WARN_ON(test_and_set_bit(reason, &wl->queue_stop_reasons[queue]));
1122
1123 if (stopped)
1124 return;
1125
1126 ieee80211_stop_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
1127}
1128
1129void wlcore_stop_queue(struct wl1271 *wl, u8 queue,
1130 enum wlcore_queue_stop_reason reason)
1131{
1132 unsigned long flags;
1133
1134 spin_lock_irqsave(&wl->wl_lock, flags);
1135 wlcore_stop_queue_locked(wl, queue, reason);
1136 spin_unlock_irqrestore(&wl->wl_lock, flags);
1137}
1138
1139void wlcore_wake_queue(struct wl1271 *wl, u8 queue,
1140 enum wlcore_queue_stop_reason reason)
1141{
1142 unsigned long flags;
1143
1144 spin_lock_irqsave(&wl->wl_lock, flags);
1145
1146 /* queue should not be clear for this reason */
1147 WARN_ON(!test_and_clear_bit(reason, &wl->queue_stop_reasons[queue]));
1148
1149 if (wl->queue_stop_reasons[queue])
1150 goto out;
1151
1152 ieee80211_wake_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
1153
1154out:
1155 spin_unlock_irqrestore(&wl->wl_lock, flags);
1156}
1157
1158void wlcore_stop_queues(struct wl1271 *wl,
1159 enum wlcore_queue_stop_reason reason)
1160{
1161 int i;
1162
1163 for (i = 0; i < NUM_TX_QUEUES; i++)
1164 wlcore_stop_queue(wl, i, reason);
1165}
1166EXPORT_SYMBOL_GPL(wlcore_stop_queues);
1167
1168void wlcore_wake_queues(struct wl1271 *wl,
1169 enum wlcore_queue_stop_reason reason)
1170{
1171 int i;
1172
1173 for (i = 0; i < NUM_TX_QUEUES; i++)
1174 wlcore_wake_queue(wl, i, reason);
1175}
1176EXPORT_SYMBOL_GPL(wlcore_wake_queues);
1177
1178void wlcore_reset_stopped_queues(struct wl1271 *wl)
1179{
1180 int i;
1181 unsigned long flags;
1182
1183 spin_lock_irqsave(&wl->wl_lock, flags);
1184
1185 for (i = 0; i < NUM_TX_QUEUES; i++) {
1186 if (!wl->queue_stop_reasons[i])
1187 continue;
1188
1189 wl->queue_stop_reasons[i] = 0;
1190 ieee80211_wake_queue(wl->hw,
1191 wl1271_tx_get_mac80211_queue(i));
1192 }
1193
1194 spin_unlock_irqrestore(&wl->wl_lock, flags);
1195}
1196
1197bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, u8 queue,
1198 enum wlcore_queue_stop_reason reason)
1199{
1200 return test_bit(reason, &wl->queue_stop_reasons[queue]);
1201}
1202
1203bool wlcore_is_queue_stopped(struct wl1271 *wl, u8 queue)
1204{
1205 return !!wl->queue_stop_reasons[queue];
1206}
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 2fd6e5dc6f7..1e939b01615 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -85,6 +85,19 @@ struct wl128x_tx_mem {
85 u8 extra_bytes; 85 u8 extra_bytes;
86} __packed; 86} __packed;
87 87
88struct wl18xx_tx_mem {
89 /*
90 * Total number of memory blocks allocated by the host for
91 * this packet.
92 */
93 u8 total_mem_blocks;
94
95 /*
96 * control bits
97 */
98 u8 ctrl;
99} __packed;
100
88/* 101/*
89 * On wl128x based devices, when TX packets are aggregated, each packet 102 * On wl128x based devices, when TX packets are aggregated, each packet
90 * size must be aligned to the SDIO block size. The maximum block size 103 * size must be aligned to the SDIO block size. The maximum block size
@@ -100,6 +113,7 @@ struct wl1271_tx_hw_descr {
100 union { 113 union {
101 struct wl127x_tx_mem wl127x_mem; 114 struct wl127x_tx_mem wl127x_mem;
102 struct wl128x_tx_mem wl128x_mem; 115 struct wl128x_tx_mem wl128x_mem;
116 struct wl18xx_tx_mem wl18xx_mem;
103 } __packed; 117 } __packed;
104 /* Device time (in us) when the packet arrived to the driver */ 118 /* Device time (in us) when the packet arrived to the driver */
105 __le32 start_time; 119 __le32 start_time;
@@ -116,7 +130,16 @@ struct wl1271_tx_hw_descr {
116 u8 tid; 130 u8 tid;
117 /* host link ID (HLID) */ 131 /* host link ID (HLID) */
118 u8 hlid; 132 u8 hlid;
119 u8 reserved; 133
134 union {
135 u8 wl12xx_reserved;
136
137 /*
138 * bit 0 -> 0 = udp, 1 = tcp
139 * bit 1:7 -> IP header offset
140 */
141 u8 wl18xx_checksum_data;
142 } __packed;
120} __packed; 143} __packed;
121 144
122enum wl1271_tx_hw_res_status { 145enum wl1271_tx_hw_res_status {
@@ -161,6 +184,13 @@ struct wl1271_tx_hw_res_if {
161 struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN]; 184 struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN];
162} __packed; 185} __packed;
163 186
187enum wlcore_queue_stop_reason {
188 WLCORE_QUEUE_STOP_REASON_WATERMARK,
189 WLCORE_QUEUE_STOP_REASON_FW_RESTART,
190 WLCORE_QUEUE_STOP_REASON_FLUSH,
191 WLCORE_QUEUE_STOP_REASON_SPARE_BLK, /* 18xx specific */
192};
193
164static inline int wl1271_tx_get_queue(int queue) 194static inline int wl1271_tx_get_queue(int queue)
165{ 195{
166 switch (queue) { 196 switch (queue) {
@@ -204,10 +234,10 @@ static inline int wl1271_tx_total_queue_count(struct wl1271 *wl)
204} 234}
205 235
206void wl1271_tx_work(struct work_struct *work); 236void wl1271_tx_work(struct work_struct *work);
207void wl1271_tx_work_locked(struct wl1271 *wl); 237int wlcore_tx_work_locked(struct wl1271 *wl);
208void wl1271_tx_complete(struct wl1271 *wl); 238int wlcore_tx_complete(struct wl1271 *wl);
209void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif); 239void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
210void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues); 240void wl12xx_tx_reset(struct wl1271 *wl);
211void wl1271_tx_flush(struct wl1271 *wl); 241void wl1271_tx_flush(struct wl1271 *wl);
212u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band); 242u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band);
213u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, 243u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
@@ -223,6 +253,21 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
223void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids); 253void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids);
224unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl, 254unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
225 unsigned int packet_length); 255 unsigned int packet_length);
256void wl1271_free_tx_id(struct wl1271 *wl, int id);
257void wlcore_stop_queue_locked(struct wl1271 *wl, u8 queue,
258 enum wlcore_queue_stop_reason reason);
259void wlcore_stop_queue(struct wl1271 *wl, u8 queue,
260 enum wlcore_queue_stop_reason reason);
261void wlcore_wake_queue(struct wl1271 *wl, u8 queue,
262 enum wlcore_queue_stop_reason reason);
263void wlcore_stop_queues(struct wl1271 *wl,
264 enum wlcore_queue_stop_reason reason);
265void wlcore_wake_queues(struct wl1271 *wl,
266 enum wlcore_queue_stop_reason reason);
267void wlcore_reset_stopped_queues(struct wl1271 *wl);
268bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, u8 queue,
269 enum wlcore_queue_stop_reason reason);
270bool wlcore_is_queue_stopped(struct wl1271 *wl, u8 queue);
226 271
227/* from main.c */ 272/* from main.c */
228void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid); 273void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 0b3f0b586f4..e796974df59 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -24,8 +24,9 @@
24 24
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26 26
27#include "wl12xx.h" 27#include "wlcore_i.h"
28#include "event.h" 28#include "event.h"
29#include "boot.h"
29 30
30/* The maximum number of Tx descriptors in all chip families */ 31/* The maximum number of Tx descriptors in all chip families */
31#define WLCORE_MAX_TX_DESCRIPTORS 32 32#define WLCORE_MAX_TX_DESCRIPTORS 32
@@ -33,14 +34,16 @@
33/* forward declaration */ 34/* forward declaration */
34struct wl1271_tx_hw_descr; 35struct wl1271_tx_hw_descr;
35enum wl_rx_buf_align; 36enum wl_rx_buf_align;
37struct wl1271_rx_descriptor;
36 38
37struct wlcore_ops { 39struct wlcore_ops {
38 int (*identify_chip)(struct wl1271 *wl); 40 int (*identify_chip)(struct wl1271 *wl);
39 int (*identify_fw)(struct wl1271 *wl); 41 int (*identify_fw)(struct wl1271 *wl);
40 int (*boot)(struct wl1271 *wl); 42 int (*boot)(struct wl1271 *wl);
41 void (*trigger_cmd)(struct wl1271 *wl, int cmd_box_addr, 43 int (*plt_init)(struct wl1271 *wl);
42 void *buf, size_t len); 44 int (*trigger_cmd)(struct wl1271 *wl, int cmd_box_addr,
43 void (*ack_event)(struct wl1271 *wl); 45 void *buf, size_t len);
46 int (*ack_event)(struct wl1271 *wl);
44 u32 (*calc_tx_blocks)(struct wl1271 *wl, u32 len, u32 spare_blks); 47 u32 (*calc_tx_blocks)(struct wl1271 *wl, u32 len, u32 spare_blks);
45 void (*set_tx_desc_blocks)(struct wl1271 *wl, 48 void (*set_tx_desc_blocks)(struct wl1271 *wl,
46 struct wl1271_tx_hw_descr *desc, 49 struct wl1271_tx_hw_descr *desc,
@@ -50,17 +53,34 @@ struct wlcore_ops {
50 struct sk_buff *skb); 53 struct sk_buff *skb);
51 enum wl_rx_buf_align (*get_rx_buf_align)(struct wl1271 *wl, 54 enum wl_rx_buf_align (*get_rx_buf_align)(struct wl1271 *wl,
52 u32 rx_desc); 55 u32 rx_desc);
53 void (*prepare_read)(struct wl1271 *wl, u32 rx_desc, u32 len); 56 int (*prepare_read)(struct wl1271 *wl, u32 rx_desc, u32 len);
54 u32 (*get_rx_packet_len)(struct wl1271 *wl, void *rx_data, 57 u32 (*get_rx_packet_len)(struct wl1271 *wl, void *rx_data,
55 u32 data_len); 58 u32 data_len);
56 void (*tx_delayed_compl)(struct wl1271 *wl); 59 int (*tx_delayed_compl)(struct wl1271 *wl);
57 void (*tx_immediate_compl)(struct wl1271 *wl); 60 void (*tx_immediate_compl)(struct wl1271 *wl);
58 int (*hw_init)(struct wl1271 *wl); 61 int (*hw_init)(struct wl1271 *wl);
59 int (*init_vif)(struct wl1271 *wl, struct wl12xx_vif *wlvif); 62 int (*init_vif)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
60 u32 (*sta_get_ap_rate_mask)(struct wl1271 *wl, 63 u32 (*sta_get_ap_rate_mask)(struct wl1271 *wl,
61 struct wl12xx_vif *wlvif); 64 struct wl12xx_vif *wlvif);
62 s8 (*get_pg_ver)(struct wl1271 *wl); 65 int (*get_pg_ver)(struct wl1271 *wl, s8 *ver);
63 void (*get_mac)(struct wl1271 *wl); 66 int (*get_mac)(struct wl1271 *wl);
67 void (*set_tx_desc_csum)(struct wl1271 *wl,
68 struct wl1271_tx_hw_descr *desc,
69 struct sk_buff *skb);
70 void (*set_rx_csum)(struct wl1271 *wl,
71 struct wl1271_rx_descriptor *desc,
72 struct sk_buff *skb);
73 u32 (*ap_get_mimo_wide_rate_mask)(struct wl1271 *wl,
74 struct wl12xx_vif *wlvif);
75 int (*debugfs_init)(struct wl1271 *wl, struct dentry *rootdir);
76 int (*handle_static_data)(struct wl1271 *wl,
77 struct wl1271_static_data *static_data);
78 int (*get_spare_blocks)(struct wl1271 *wl, bool is_gem);
79 int (*set_key)(struct wl1271 *wl, enum set_key_cmd cmd,
80 struct ieee80211_vif *vif,
81 struct ieee80211_sta *sta,
82 struct ieee80211_key_conf *key_conf);
83 u32 (*pre_pkt_send)(struct wl1271 *wl, u32 buf_offset, u32 last_len);
64}; 84};
65 85
66enum wlcore_partitions { 86enum wlcore_partitions {
@@ -109,6 +129,15 @@ enum wlcore_registers {
109 REG_TABLE_LEN, 129 REG_TABLE_LEN,
110}; 130};
111 131
132struct wl1271_stats {
133 void *fw_stats;
134 unsigned long fw_stats_update;
135 size_t fw_stats_len;
136
137 unsigned int retry_count;
138 unsigned int excessive_retries;
139};
140
112struct wl1271 { 141struct wl1271 {
113 struct ieee80211_hw *hw; 142 struct ieee80211_hw *hw;
114 bool mac80211_registered; 143 bool mac80211_registered;
@@ -121,7 +150,6 @@ struct wl1271 {
121 150
122 void (*set_power)(bool enable); 151 void (*set_power)(bool enable);
123 int irq; 152 int irq;
124 int ref_clock;
125 153
126 spinlock_t wl_lock; 154 spinlock_t wl_lock;
127 155
@@ -186,7 +214,7 @@ struct wl1271 {
186 214
187 /* Frames scheduled for transmission, not handled yet */ 215 /* Frames scheduled for transmission, not handled yet */
188 int tx_queue_count[NUM_TX_QUEUES]; 216 int tx_queue_count[NUM_TX_QUEUES];
189 long stopped_queues_map; 217 unsigned long queue_stop_reasons[NUM_TX_QUEUES];
190 218
191 /* Frames received, not handled yet by mac80211 */ 219 /* Frames received, not handled yet by mac80211 */
192 struct sk_buff_head deferred_rx_queue; 220 struct sk_buff_head deferred_rx_queue;
@@ -205,9 +233,6 @@ struct wl1271 {
205 /* FW Rx counter */ 233 /* FW Rx counter */
206 u32 rx_counter; 234 u32 rx_counter;
207 235
208 /* Rx memory pool address */
209 struct wl1271_rx_mem_pool_addr rx_mem_pool_addr;
210
211 /* Intermediate buffer, used for packet aggregation */ 236 /* Intermediate buffer, used for packet aggregation */
212 u8 *aggr_buf; 237 u8 *aggr_buf;
213 238
@@ -228,6 +253,7 @@ struct wl1271 {
228 253
229 /* Hardware recovery work */ 254 /* Hardware recovery work */
230 struct work_struct recovery_work; 255 struct work_struct recovery_work;
256 bool watchdog_recovery;
231 257
232 /* Pointer that holds DMA-friendly block for the mailbox */ 258 /* Pointer that holds DMA-friendly block for the mailbox */
233 struct event_mailbox *mbox; 259 struct event_mailbox *mbox;
@@ -263,7 +289,8 @@ struct wl1271 {
263 u32 buffer_cmd; 289 u32 buffer_cmd;
264 u32 buffer_busyword[WL1271_BUSY_WORD_CNT]; 290 u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
265 291
266 struct wl_fw_status *fw_status; 292 struct wl_fw_status_1 *fw_status_1;
293 struct wl_fw_status_2 *fw_status_2;
267 struct wl1271_tx_hw_res_if *tx_res_if; 294 struct wl1271_tx_hw_res_if *tx_res_if;
268 295
269 /* Current chipset configuration */ 296 /* Current chipset configuration */
@@ -279,8 +306,6 @@ struct wl1271 {
279 /* bands supported by this instance of wl12xx */ 306 /* bands supported by this instance of wl12xx */
280 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; 307 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
281 308
282 int tcxo_clock;
283
284 /* 309 /*
285 * wowlan trigger was configured during suspend. 310 * wowlan trigger was configured during suspend.
286 * (currently, only "ANY" trigger is supported) 311 * (currently, only "ANY" trigger is supported)
@@ -333,10 +358,8 @@ struct wl1271 {
333 358
334 /* number of TX descriptors the HW supports. */ 359 /* number of TX descriptors the HW supports. */
335 u32 num_tx_desc; 360 u32 num_tx_desc;
336 361 /* number of RX descriptors the HW supports. */
337 /* spare Tx blocks for normal/GEM operating modes */ 362 u32 num_rx_desc;
338 u32 normal_tx_spare;
339 u32 gem_tx_spare;
340 363
341 /* translate HW Tx rates to standard rate-indices */ 364 /* translate HW Tx rates to standard rate-indices */
342 const u8 **band_rate_to_idx; 365 const u8 **band_rate_to_idx;
@@ -348,19 +371,42 @@ struct wl1271 {
348 u8 hw_min_ht_rate; 371 u8 hw_min_ht_rate;
349 372
350 /* HW HT (11n) capabilities */ 373 /* HW HT (11n) capabilities */
351 struct ieee80211_sta_ht_cap ht_cap; 374 struct ieee80211_sta_ht_cap ht_cap[IEEE80211_NUM_BANDS];
352 375
353 /* size of the private FW status data */ 376 /* size of the private FW status data */
354 size_t fw_status_priv_len; 377 size_t fw_status_priv_len;
355 378
356 /* RX Data filter rule state - enabled/disabled */ 379 /* RX Data filter rule state - enabled/disabled */
357 bool rx_filter_enabled[WL1271_MAX_RX_FILTERS]; 380 bool rx_filter_enabled[WL1271_MAX_RX_FILTERS];
381
382 /* size of the private static data */
383 size_t static_data_priv_len;
384
385 /* the current channel type */
386 enum nl80211_channel_type channel_type;
387
388 /* mutex for protecting the tx_flush function */
389 struct mutex flush_mutex;
390
391 /* sleep auth value currently configured to FW */
392 int sleep_auth;
358}; 393};
359 394
360int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev); 395int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
361int __devexit wlcore_remove(struct platform_device *pdev); 396int __devexit wlcore_remove(struct platform_device *pdev);
362struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size); 397struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size);
363int wlcore_free_hw(struct wl1271 *wl); 398int wlcore_free_hw(struct wl1271 *wl);
399int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
400 struct ieee80211_vif *vif,
401 struct ieee80211_sta *sta,
402 struct ieee80211_key_conf *key_conf);
403
404static inline void
405wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band,
406 struct ieee80211_sta_ht_cap *ht_cap)
407{
408 memcpy(&wl->ht_cap[band], ht_cap, sizeof(*ht_cap));
409}
364 410
365/* Firmware image load chunk size */ 411/* Firmware image load chunk size */
366#define CHUNK_SIZE 16384 412#define CHUNK_SIZE 16384
@@ -385,6 +431,12 @@ int wlcore_free_hw(struct wl1271 *wl);
385/* Some firmwares may not support ELP */ 431/* Some firmwares may not support ELP */
386#define WLCORE_QUIRK_NO_ELP BIT(6) 432#define WLCORE_QUIRK_NO_ELP BIT(6)
387 433
434/* pad only the last frame in the aggregate buffer */
435#define WLCORE_QUIRK_TX_PAD_LAST_FRAME BIT(7)
436
437/* extra header space is required for TKIP */
438#define WLCORE_QUIRK_TKIP_HEADER_SPACE BIT(8)
439
388/* TODO: move to the lower drivers when all usages are abstracted */ 440/* TODO: move to the lower drivers when all usages are abstracted */
389#define CHIP_ID_1271_PG10 (0x4030101) 441#define CHIP_ID_1271_PG10 (0x4030101)
390#define CHIP_ID_1271_PG20 (0x4030111) 442#define CHIP_ID_1271_PG20 (0x4030111)
diff --git a/drivers/net/wireless/ti/wlcore/wl12xx.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index f12bdf74518..4273a21cdde 100644
--- a/drivers/net/wireless/ti/wlcore/wl12xx.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -22,8 +22,8 @@
22 * 22 *
23 */ 23 */
24 24
25#ifndef __WL12XX_H__ 25#ifndef __WLCORE_I_H__
26#define __WL12XX_H__ 26#define __WLCORE_I_H__
27 27
28#include <linux/mutex.h> 28#include <linux/mutex.h>
29#include <linux/completion.h> 29#include <linux/completion.h>
@@ -89,7 +89,7 @@
89#define WL1271_AP_BSS_INDEX 0 89#define WL1271_AP_BSS_INDEX 0
90#define WL1271_AP_DEF_BEACON_EXP 20 90#define WL1271_AP_DEF_BEACON_EXP 20
91 91
92#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE) 92#define WL1271_AGGR_BUFFER_SIZE (5 * PAGE_SIZE)
93 93
94enum wl1271_state { 94enum wl1271_state {
95 WL1271_STATE_OFF, 95 WL1271_STATE_OFF,
@@ -132,16 +132,7 @@ struct wl1271_chip {
132 unsigned int fw_ver[NUM_FW_VER]; 132 unsigned int fw_ver[NUM_FW_VER];
133}; 133};
134 134
135struct wl1271_stats {
136 struct acx_statistics *fw_stats;
137 unsigned long fw_stats_update;
138
139 unsigned int retry_count;
140 unsigned int excessive_retries;
141};
142
143#define NUM_TX_QUEUES 4 135#define NUM_TX_QUEUES 4
144#define NUM_RX_PKT_DESC 8
145 136
146#define AP_MAX_STATIONS 8 137#define AP_MAX_STATIONS 8
147 138
@@ -159,13 +150,26 @@ struct wl_fw_packet_counters {
159} __packed; 150} __packed;
160 151
161/* FW status registers */ 152/* FW status registers */
162struct wl_fw_status { 153struct wl_fw_status_1 {
163 __le32 intr; 154 __le32 intr;
164 u8 fw_rx_counter; 155 u8 fw_rx_counter;
165 u8 drv_rx_counter; 156 u8 drv_rx_counter;
166 u8 reserved; 157 u8 reserved;
167 u8 tx_results_counter; 158 u8 tx_results_counter;
168 __le32 rx_pkt_descs[NUM_RX_PKT_DESC]; 159 __le32 rx_pkt_descs[0];
160} __packed;
161
162/*
163 * Each HW arch has a different number of Rx descriptors.
164 * The length of the status depends on it, since it holds an array
165 * of descriptors.
166 */
167#define WLCORE_FW_STATUS_1_LEN(num_rx_desc) \
168 (sizeof(struct wl_fw_status_1) + \
169 (sizeof(((struct wl_fw_status_1 *)0)->rx_pkt_descs[0])) * \
170 num_rx_desc)
171
172struct wl_fw_status_2 {
169 __le32 fw_localtime; 173 __le32 fw_localtime;
170 174
171 /* 175 /*
@@ -194,11 +198,6 @@ struct wl_fw_status {
194 u8 priv[0]; 198 u8 priv[0];
195} __packed; 199} __packed;
196 200
197struct wl1271_rx_mem_pool_addr {
198 u32 addr;
199 u32 addr_extra;
200};
201
202#define WL1271_MAX_CHANNELS 64 201#define WL1271_MAX_CHANNELS 64
203struct wl1271_scan { 202struct wl1271_scan {
204 struct cfg80211_scan_request *req; 203 struct cfg80211_scan_request *req;
@@ -210,10 +209,10 @@ struct wl1271_scan {
210}; 209};
211 210
212struct wl1271_if_operations { 211struct wl1271_if_operations {
213 void (*read)(struct device *child, int addr, void *buf, size_t len, 212 int __must_check (*read)(struct device *child, int addr, void *buf,
214 bool fixed); 213 size_t len, bool fixed);
215 void (*write)(struct device *child, int addr, void *buf, size_t len, 214 int __must_check (*write)(struct device *child, int addr, void *buf,
216 bool fixed); 215 size_t len, bool fixed);
217 void (*reset)(struct device *child); 216 void (*reset)(struct device *child);
218 void (*init)(struct device *child); 217 void (*init)(struct device *child);
219 int (*power)(struct device *child, bool enable); 218 int (*power)(struct device *child, bool enable);
@@ -248,6 +247,7 @@ enum wl12xx_flags {
248 WL1271_FLAG_RECOVERY_IN_PROGRESS, 247 WL1271_FLAG_RECOVERY_IN_PROGRESS,
249 WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, 248 WL1271_FLAG_VIF_CHANGE_IN_PROGRESS,
250 WL1271_FLAG_INTENDED_FW_RECOVERY, 249 WL1271_FLAG_INTENDED_FW_RECOVERY,
250 WL1271_FLAG_SDIO_FAILED,
251}; 251};
252 252
253enum wl12xx_vif_flags { 253enum wl12xx_vif_flags {
@@ -367,6 +367,7 @@ struct wl12xx_vif {
367 /* The current band */ 367 /* The current band */
368 enum ieee80211_band band; 368 enum ieee80211_band band;
369 int channel; 369 int channel;
370 enum nl80211_channel_type channel_type;
370 371
371 u32 bitrate_masks[IEEE80211_NUM_BANDS]; 372 u32 bitrate_masks[IEEE80211_NUM_BANDS];
372 u32 basic_rate_set; 373 u32 basic_rate_set;
@@ -417,9 +418,6 @@ struct wl12xx_vif {
417 struct work_struct rx_streaming_disable_work; 418 struct work_struct rx_streaming_disable_work;
418 struct timer_list rx_streaming_timer; 419 struct timer_list rx_streaming_timer;
419 420
420 /* does the current role use GEM for encryption (AP or STA) */
421 bool is_gem;
422
423 /* 421 /*
424 * This struct must be last! 422 * This struct must be last!
425 * data that has to be saved acrossed reconfigs (e.g. recovery) 423 * data that has to be saved acrossed reconfigs (e.g. recovery)
@@ -501,7 +499,8 @@ void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
501/* Macros to handle wl1271.sta_rate_set */ 499/* Macros to handle wl1271.sta_rate_set */
502#define HW_BG_RATES_MASK 0xffff 500#define HW_BG_RATES_MASK 0xffff
503#define HW_HT_RATES_OFFSET 16 501#define HW_HT_RATES_OFFSET 16
502#define HW_MIMO_RATES_OFFSET 24
504 503
505#define WL12XX_HW_BLOCK_SIZE 256 504#define WL12XX_HW_BLOCK_SIZE 256
506 505
507#endif 506#endif /* __WLCORE_I_H__ */
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 117c4123943..7ab922209b2 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -827,7 +827,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values,
827static inline int zd_ioread32_locked(struct zd_chip *chip, u32 *value, 827static inline int zd_ioread32_locked(struct zd_chip *chip, u32 *value,
828 const zd_addr_t addr) 828 const zd_addr_t addr)
829{ 829{
830 return zd_ioread32v_locked(chip, value, (const zd_addr_t *)&addr, 1); 830 return zd_ioread32v_locked(chip, value, &addr, 1);
831} 831}
832 832
833static inline int zd_iowrite16_locked(struct zd_chip *chip, u16 value, 833static inline int zd_iowrite16_locked(struct zd_chip *chip, u16 value,
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 99193b456a7..45e3bb28a01 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -274,7 +274,7 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
274static inline int zd_usb_ioread16(struct zd_usb *usb, u16 *value, 274static inline int zd_usb_ioread16(struct zd_usb *usb, u16 *value,
275 const zd_addr_t addr) 275 const zd_addr_t addr)
276{ 276{
277 return zd_usb_ioread16v(usb, value, (const zd_addr_t *)&addr, 1); 277 return zd_usb_ioread16v(usb, value, &addr, 1);
278} 278}
279 279
280void zd_usb_iowrite16v_async_start(struct zd_usb *usb); 280void zd_usb_iowrite16v_async_start(struct zd_usb *usb);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f4a6fcaeffb..682633bfe00 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1363,8 +1363,6 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1363 INVALID_PENDING_IDX); 1363 INVALID_PENDING_IDX);
1364 } 1364 }
1365 1365
1366 __skb_queue_tail(&netbk->tx_queue, skb);
1367
1368 netbk->pending_cons++; 1366 netbk->pending_cons++;
1369 1367
1370 request_gop = xen_netbk_get_requests(netbk, vif, 1368 request_gop = xen_netbk_get_requests(netbk, vif,
@@ -1376,6 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1376 } 1374 }
1377 gop = request_gop; 1375 gop = request_gop;
1378 1376
1377 __skb_queue_tail(&netbk->tx_queue, skb);
1378
1379 vif->tx.req_cons = idx; 1379 vif->tx.req_cons = idx;
1380 xen_netbk_check_rx_xenvif(vif); 1380 xen_netbk_check_rx_xenvif(vif);
1381 1381
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index 19110f0eb15..9ac829e22e7 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -45,6 +45,9 @@ static const struct usb_device_id pn533_table[] = {
45}; 45};
46MODULE_DEVICE_TABLE(usb, pn533_table); 46MODULE_DEVICE_TABLE(usb, pn533_table);
47 47
48/* How much time we spend listening for initiators */
49#define PN533_LISTEN_TIME 2
50
48/* frame definitions */ 51/* frame definitions */
49#define PN533_FRAME_TAIL_SIZE 2 52#define PN533_FRAME_TAIL_SIZE 2
50#define PN533_FRAME_SIZE(f) (sizeof(struct pn533_frame) + f->datalen + \ 53#define PN533_FRAME_SIZE(f) (sizeof(struct pn533_frame) + f->datalen + \
@@ -74,6 +77,10 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
74#define PN533_CMD_IN_RELEASE 0x52 77#define PN533_CMD_IN_RELEASE 0x52
75#define PN533_CMD_IN_JUMP_FOR_DEP 0x56 78#define PN533_CMD_IN_JUMP_FOR_DEP 0x56
76 79
80#define PN533_CMD_TG_INIT_AS_TARGET 0x8c
81#define PN533_CMD_TG_GET_DATA 0x86
82#define PN533_CMD_TG_SET_DATA 0x8e
83
77#define PN533_CMD_RESPONSE(cmd) (cmd + 1) 84#define PN533_CMD_RESPONSE(cmd) (cmd + 1)
78 85
79/* PN533 Return codes */ 86/* PN533 Return codes */
@@ -81,6 +88,9 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
81#define PN533_CMD_MI_MASK 0x40 88#define PN533_CMD_MI_MASK 0x40
82#define PN533_CMD_RET_SUCCESS 0x00 89#define PN533_CMD_RET_SUCCESS 0x00
83 90
91/* PN533 status codes */
92#define PN533_STATUS_TARGET_RELEASED 0x29
93
84struct pn533; 94struct pn533;
85 95
86typedef int (*pn533_cmd_complete_t) (struct pn533 *dev, void *arg, 96typedef int (*pn533_cmd_complete_t) (struct pn533 *dev, void *arg,
@@ -97,8 +107,14 @@ struct pn533_fw_version {
97}; 107};
98 108
99/* PN533_CMD_RF_CONFIGURATION */ 109/* PN533_CMD_RF_CONFIGURATION */
110#define PN533_CFGITEM_TIMING 0x02
100#define PN533_CFGITEM_MAX_RETRIES 0x05 111#define PN533_CFGITEM_MAX_RETRIES 0x05
101 112
113#define PN533_CONFIG_TIMING_102 0xb
114#define PN533_CONFIG_TIMING_204 0xc
115#define PN533_CONFIG_TIMING_409 0xd
116#define PN533_CONFIG_TIMING_819 0xe
117
102#define PN533_CONFIG_MAX_RETRIES_NO_RETRY 0x00 118#define PN533_CONFIG_MAX_RETRIES_NO_RETRY 0x00
103#define PN533_CONFIG_MAX_RETRIES_ENDLESS 0xFF 119#define PN533_CONFIG_MAX_RETRIES_ENDLESS 0xFF
104 120
@@ -108,6 +124,12 @@ struct pn533_config_max_retries {
108 u8 mx_rty_passive_act; 124 u8 mx_rty_passive_act;
109} __packed; 125} __packed;
110 126
127struct pn533_config_timing {
128 u8 rfu;
129 u8 atr_res_timeout;
130 u8 dep_timeout;
131} __packed;
132
111/* PN533_CMD_IN_LIST_PASSIVE_TARGET */ 133/* PN533_CMD_IN_LIST_PASSIVE_TARGET */
112 134
113/* felica commands opcode */ 135/* felica commands opcode */
@@ -144,6 +166,7 @@ enum {
144 PN533_POLL_MOD_424KBPS_FELICA, 166 PN533_POLL_MOD_424KBPS_FELICA,
145 PN533_POLL_MOD_106KBPS_JEWEL, 167 PN533_POLL_MOD_106KBPS_JEWEL,
146 PN533_POLL_MOD_847KBPS_B, 168 PN533_POLL_MOD_847KBPS_B,
169 PN533_LISTEN_MOD,
147 170
148 __PN533_POLL_MOD_AFTER_LAST, 171 __PN533_POLL_MOD_AFTER_LAST,
149}; 172};
@@ -211,6 +234,9 @@ const struct pn533_poll_modulations poll_mod[] = {
211 }, 234 },
212 .len = 3, 235 .len = 3,
213 }, 236 },
237 [PN533_LISTEN_MOD] = {
238 .len = 0,
239 },
214}; 240};
215 241
216/* PN533_CMD_IN_ATR */ 242/* PN533_CMD_IN_ATR */
@@ -237,7 +263,7 @@ struct pn533_cmd_jump_dep {
237 u8 active; 263 u8 active;
238 u8 baud; 264 u8 baud;
239 u8 next; 265 u8 next;
240 u8 gt[]; 266 u8 data[];
241} __packed; 267} __packed;
242 268
243struct pn533_cmd_jump_dep_response { 269struct pn533_cmd_jump_dep_response {
@@ -253,6 +279,29 @@ struct pn533_cmd_jump_dep_response {
253 u8 gt[]; 279 u8 gt[];
254} __packed; 280} __packed;
255 281
282
283/* PN533_TG_INIT_AS_TARGET */
284#define PN533_INIT_TARGET_PASSIVE 0x1
285#define PN533_INIT_TARGET_DEP 0x2
286
287#define PN533_INIT_TARGET_RESP_FRAME_MASK 0x3
288#define PN533_INIT_TARGET_RESP_ACTIVE 0x1
289#define PN533_INIT_TARGET_RESP_DEP 0x4
290
291struct pn533_cmd_init_target {
292 u8 mode;
293 u8 mifare[6];
294 u8 felica[18];
295 u8 nfcid3[10];
296 u8 gb_len;
297 u8 gb[];
298} __packed;
299
300struct pn533_cmd_init_target_response {
301 u8 mode;
302 u8 cmd[];
303} __packed;
304
256struct pn533 { 305struct pn533 {
257 struct usb_device *udev; 306 struct usb_device *udev;
258 struct usb_interface *interface; 307 struct usb_interface *interface;
@@ -270,22 +319,31 @@ struct pn533 {
270 319
271 struct workqueue_struct *wq; 320 struct workqueue_struct *wq;
272 struct work_struct cmd_work; 321 struct work_struct cmd_work;
322 struct work_struct poll_work;
273 struct work_struct mi_work; 323 struct work_struct mi_work;
324 struct work_struct tg_work;
325 struct timer_list listen_timer;
274 struct pn533_frame *wq_in_frame; 326 struct pn533_frame *wq_in_frame;
275 int wq_in_error; 327 int wq_in_error;
328 int cancel_listen;
276 329
277 pn533_cmd_complete_t cmd_complete; 330 pn533_cmd_complete_t cmd_complete;
278 void *cmd_complete_arg; 331 void *cmd_complete_arg;
279 struct semaphore cmd_lock; 332 struct mutex cmd_lock;
280 u8 cmd; 333 u8 cmd;
281 334
282 struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1]; 335 struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1];
283 u8 poll_mod_count; 336 u8 poll_mod_count;
284 u8 poll_mod_curr; 337 u8 poll_mod_curr;
285 u32 poll_protocols; 338 u32 poll_protocols;
339 u32 listen_protocols;
340
341 u8 *gb;
342 size_t gb_len;
286 343
287 u8 tgt_available_prots; 344 u8 tgt_available_prots;
288 u8 tgt_active_prot; 345 u8 tgt_active_prot;
346 u8 tgt_mode;
289}; 347};
290 348
291struct pn533_frame { 349struct pn533_frame {
@@ -405,7 +463,7 @@ static void pn533_wq_cmd_complete(struct work_struct *work)
405 PN533_FRAME_CMD_PARAMS_LEN(in_frame)); 463 PN533_FRAME_CMD_PARAMS_LEN(in_frame));
406 464
407 if (rc != -EINPROGRESS) 465 if (rc != -EINPROGRESS)
408 up(&dev->cmd_lock); 466 mutex_unlock(&dev->cmd_lock);
409} 467}
410 468
411static void pn533_recv_response(struct urb *urb) 469static void pn533_recv_response(struct urb *urb)
@@ -583,7 +641,7 @@ static int pn533_send_cmd_frame_async(struct pn533 *dev,
583 641
584 nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 642 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
585 643
586 if (down_trylock(&dev->cmd_lock)) 644 if (!mutex_trylock(&dev->cmd_lock))
587 return -EBUSY; 645 return -EBUSY;
588 646
589 rc = __pn533_send_cmd_frame_async(dev, out_frame, in_frame, 647 rc = __pn533_send_cmd_frame_async(dev, out_frame, in_frame,
@@ -593,7 +651,7 @@ static int pn533_send_cmd_frame_async(struct pn533 *dev,
593 651
594 return 0; 652 return 0;
595error: 653error:
596 up(&dev->cmd_lock); 654 mutex_unlock(&dev->cmd_lock);
597 return rc; 655 return rc;
598} 656}
599 657
@@ -963,6 +1021,11 @@ static int pn533_target_found(struct pn533 *dev,
963 return 0; 1021 return 0;
964} 1022}
965 1023
1024static inline void pn533_poll_next_mod(struct pn533 *dev)
1025{
1026 dev->poll_mod_curr = (dev->poll_mod_curr + 1) % dev->poll_mod_count;
1027}
1028
966static void pn533_poll_reset_mod_list(struct pn533 *dev) 1029static void pn533_poll_reset_mod_list(struct pn533 *dev)
967{ 1030{
968 dev->poll_mod_count = 0; 1031 dev->poll_mod_count = 0;
@@ -975,102 +1038,283 @@ static void pn533_poll_add_mod(struct pn533 *dev, u8 mod_index)
975 dev->poll_mod_count++; 1038 dev->poll_mod_count++;
976} 1039}
977 1040
978static void pn533_poll_create_mod_list(struct pn533 *dev, u32 protocols) 1041static void pn533_poll_create_mod_list(struct pn533 *dev,
1042 u32 im_protocols, u32 tm_protocols)
979{ 1043{
980 pn533_poll_reset_mod_list(dev); 1044 pn533_poll_reset_mod_list(dev);
981 1045
982 if (protocols & NFC_PROTO_MIFARE_MASK 1046 if (im_protocols & NFC_PROTO_MIFARE_MASK
983 || protocols & NFC_PROTO_ISO14443_MASK 1047 || im_protocols & NFC_PROTO_ISO14443_MASK
984 || protocols & NFC_PROTO_NFC_DEP_MASK) 1048 || im_protocols & NFC_PROTO_NFC_DEP_MASK)
985 pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_A); 1049 pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_A);
986 1050
987 if (protocols & NFC_PROTO_FELICA_MASK 1051 if (im_protocols & NFC_PROTO_FELICA_MASK
988 || protocols & NFC_PROTO_NFC_DEP_MASK) { 1052 || im_protocols & NFC_PROTO_NFC_DEP_MASK) {
989 pn533_poll_add_mod(dev, PN533_POLL_MOD_212KBPS_FELICA); 1053 pn533_poll_add_mod(dev, PN533_POLL_MOD_212KBPS_FELICA);
990 pn533_poll_add_mod(dev, PN533_POLL_MOD_424KBPS_FELICA); 1054 pn533_poll_add_mod(dev, PN533_POLL_MOD_424KBPS_FELICA);
991 } 1055 }
992 1056
993 if (protocols & NFC_PROTO_JEWEL_MASK) 1057 if (im_protocols & NFC_PROTO_JEWEL_MASK)
994 pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_JEWEL); 1058 pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_JEWEL);
995 1059
996 if (protocols & NFC_PROTO_ISO14443_MASK) 1060 if (im_protocols & NFC_PROTO_ISO14443_MASK)
997 pn533_poll_add_mod(dev, PN533_POLL_MOD_847KBPS_B); 1061 pn533_poll_add_mod(dev, PN533_POLL_MOD_847KBPS_B);
1062
1063 if (tm_protocols)
1064 pn533_poll_add_mod(dev, PN533_LISTEN_MOD);
1065}
1066
1067static int pn533_start_poll_complete(struct pn533 *dev, void *arg,
1068 u8 *params, int params_len)
1069{
1070 struct pn533_poll_response *resp;
1071 int rc;
1072
1073 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1074
1075 resp = (struct pn533_poll_response *) params;
1076 if (resp->nbtg) {
1077 rc = pn533_target_found(dev, resp, params_len);
1078
1079 /* We must stop the poll after a valid target found */
1080 if (rc == 0) {
1081 pn533_poll_reset_mod_list(dev);
1082 return 0;
1083 }
1084 }
1085
1086 return -EAGAIN;
998} 1087}
999 1088
1000static void pn533_start_poll_frame(struct pn533_frame *frame, 1089static int pn533_init_target_frame(struct pn533_frame *frame,
1001 struct pn533_poll_modulations *mod) 1090 u8 *gb, size_t gb_len)
1002{ 1091{
1092 struct pn533_cmd_init_target *cmd;
1093 size_t cmd_len;
1094 u8 felica_params[18] = {0x1, 0xfe, /* DEP */
1095 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, /* random */
1096 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
1097 0xff, 0xff}; /* System code */
1098 u8 mifare_params[6] = {0x1, 0x1, /* SENS_RES */
1099 0x0, 0x0, 0x0,
1100 0x40}; /* SEL_RES for DEP */
1101
1102 cmd_len = sizeof(struct pn533_cmd_init_target) + gb_len + 1;
1103 cmd = kzalloc(cmd_len, GFP_KERNEL);
1104 if (cmd == NULL)
1105 return -ENOMEM;
1106
1107 pn533_tx_frame_init(frame, PN533_CMD_TG_INIT_AS_TARGET);
1108
1109 /* DEP support only */
1110 cmd->mode |= PN533_INIT_TARGET_DEP;
1111
1112 /* Felica params */
1113 memcpy(cmd->felica, felica_params, 18);
1114 get_random_bytes(cmd->felica + 2, 6);
1115
1116 /* NFCID3 */
1117 memset(cmd->nfcid3, 0, 10);
1118 memcpy(cmd->nfcid3, cmd->felica, 8);
1003 1119
1004 pn533_tx_frame_init(frame, PN533_CMD_IN_LIST_PASSIVE_TARGET); 1120 /* MIFARE params */
1121 memcpy(cmd->mifare, mifare_params, 6);
1005 1122
1006 memcpy(PN533_FRAME_CMD_PARAMS_PTR(frame), &mod->data, mod->len); 1123 /* General bytes */
1007 frame->datalen += mod->len; 1124 cmd->gb_len = gb_len;
1125 memcpy(cmd->gb, gb, gb_len);
1126
1127 /* Len Tk */
1128 cmd->gb[gb_len] = 0;
1129
1130 memcpy(PN533_FRAME_CMD_PARAMS_PTR(frame), cmd, cmd_len);
1131
1132 frame->datalen += cmd_len;
1008 1133
1009 pn533_tx_frame_finish(frame); 1134 pn533_tx_frame_finish(frame);
1135
1136 kfree(cmd);
1137
1138 return 0;
1010} 1139}
1011 1140
1012static int pn533_start_poll_complete(struct pn533 *dev, void *arg, 1141#define PN533_CMD_DATAEXCH_HEAD_LEN (sizeof(struct pn533_frame) + 3)
1013 u8 *params, int params_len) 1142#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
1143static int pn533_tm_get_data_complete(struct pn533 *dev, void *arg,
1144 u8 *params, int params_len)
1014{ 1145{
1015 struct pn533_poll_response *resp; 1146 struct sk_buff *skb_resp = arg;
1016 struct pn533_poll_modulations *next_mod; 1147 struct pn533_frame *in_frame = (struct pn533_frame *) skb_resp->data;
1017 int rc;
1018 1148
1019 nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 1149 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1020 1150
1021 if (params_len == -ENOENT) { 1151 if (params_len < 0) {
1022 nfc_dev_dbg(&dev->interface->dev, "Polling operation has been" 1152 nfc_dev_err(&dev->interface->dev,
1023 " stopped"); 1153 "Error %d when starting as a target",
1024 goto stop_poll; 1154 params_len);
1155
1156 return params_len;
1025 } 1157 }
1026 1158
1159 if (params_len > 0 && params[0] != 0) {
1160 nfc_tm_deactivated(dev->nfc_dev);
1161
1162 dev->tgt_mode = 0;
1163
1164 kfree_skb(skb_resp);
1165 return 0;
1166 }
1167
1168 skb_put(skb_resp, PN533_FRAME_SIZE(in_frame));
1169 skb_pull(skb_resp, PN533_CMD_DATAEXCH_HEAD_LEN);
1170 skb_trim(skb_resp, skb_resp->len - PN533_FRAME_TAIL_SIZE);
1171
1172 return nfc_tm_data_received(dev->nfc_dev, skb_resp);
1173}
1174
1175static void pn533_wq_tg_get_data(struct work_struct *work)
1176{
1177 struct pn533 *dev = container_of(work, struct pn533, tg_work);
1178 struct pn533_frame *in_frame;
1179 struct sk_buff *skb_resp;
1180 size_t skb_resp_len;
1181
1182 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1183
1184 skb_resp_len = PN533_CMD_DATAEXCH_HEAD_LEN +
1185 PN533_CMD_DATAEXCH_DATA_MAXLEN +
1186 PN533_FRAME_TAIL_SIZE;
1187
1188 skb_resp = nfc_alloc_recv_skb(skb_resp_len, GFP_KERNEL);
1189 if (!skb_resp)
1190 return;
1191
1192 in_frame = (struct pn533_frame *)skb_resp->data;
1193
1194 pn533_tx_frame_init(dev->out_frame, PN533_CMD_TG_GET_DATA);
1195 pn533_tx_frame_finish(dev->out_frame);
1196
1197 pn533_send_cmd_frame_async(dev, dev->out_frame, in_frame,
1198 skb_resp_len,
1199 pn533_tm_get_data_complete,
1200 skb_resp, GFP_KERNEL);
1201
1202 return;
1203}
1204
1205#define ATR_REQ_GB_OFFSET 17
1206static int pn533_init_target_complete(struct pn533 *dev, void *arg,
1207 u8 *params, int params_len)
1208{
1209 struct pn533_cmd_init_target_response *resp;
1210 u8 frame, comm_mode = NFC_COMM_PASSIVE, *gb;
1211 size_t gb_len;
1212 int rc;
1213
1214 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1215
1027 if (params_len < 0) { 1216 if (params_len < 0) {
1028 nfc_dev_err(&dev->interface->dev, "Error %d when running poll", 1217 nfc_dev_err(&dev->interface->dev,
1029 params_len); 1218 "Error %d when starting as a target",
1030 goto stop_poll; 1219 params_len);
1220
1221 return params_len;
1031 } 1222 }
1032 1223
1033 resp = (struct pn533_poll_response *) params; 1224 if (params_len < ATR_REQ_GB_OFFSET + 1)
1034 if (resp->nbtg) { 1225 return -EINVAL;
1035 rc = pn533_target_found(dev, resp, params_len);
1036 1226
1037 /* We must stop the poll after a valid target found */ 1227 resp = (struct pn533_cmd_init_target_response *) params;
1038 if (rc == 0) 1228
1039 goto stop_poll; 1229 nfc_dev_dbg(&dev->interface->dev, "Target mode 0x%x param len %d\n",
1230 resp->mode, params_len);
1231
1232 frame = resp->mode & PN533_INIT_TARGET_RESP_FRAME_MASK;
1233 if (frame == PN533_INIT_TARGET_RESP_ACTIVE)
1234 comm_mode = NFC_COMM_ACTIVE;
1040 1235
1041 if (rc != -EAGAIN) 1236 /* Again, only DEP */
1042 nfc_dev_err(&dev->interface->dev, "The target found is" 1237 if ((resp->mode & PN533_INIT_TARGET_RESP_DEP) == 0)
1043 " not valid - continuing to poll"); 1238 return -EOPNOTSUPP;
1239
1240 gb = resp->cmd + ATR_REQ_GB_OFFSET;
1241 gb_len = params_len - (ATR_REQ_GB_OFFSET + 1);
1242
1243 rc = nfc_tm_activated(dev->nfc_dev, NFC_PROTO_NFC_DEP_MASK,
1244 comm_mode, gb, gb_len);
1245 if (rc < 0) {
1246 nfc_dev_err(&dev->interface->dev,
1247 "Error when signaling target activation");
1248 return rc;
1044 } 1249 }
1045 1250
1046 dev->poll_mod_curr = (dev->poll_mod_curr + 1) % dev->poll_mod_count; 1251 dev->tgt_mode = 1;
1047 1252
1048 next_mod = dev->poll_mod_active[dev->poll_mod_curr]; 1253 queue_work(dev->wq, &dev->tg_work);
1049 1254
1050 nfc_dev_dbg(&dev->interface->dev, "Polling next modulation (0x%x)", 1255 return 0;
1051 dev->poll_mod_curr); 1256}
1257
1258static void pn533_listen_mode_timer(unsigned long data)
1259{
1260 struct pn533 *dev = (struct pn533 *) data;
1261
1262 nfc_dev_dbg(&dev->interface->dev, "Listen mode timeout");
1263
1264 /* An ack will cancel the last issued command (poll) */
1265 pn533_send_ack(dev, GFP_ATOMIC);
1266
1267 dev->cancel_listen = 1;
1268
1269 mutex_unlock(&dev->cmd_lock);
1270
1271 pn533_poll_next_mod(dev);
1272
1273 queue_work(dev->wq, &dev->poll_work);
1274}
1052 1275
1053 pn533_start_poll_frame(dev->out_frame, next_mod); 1276static int pn533_poll_complete(struct pn533 *dev, void *arg,
1277 u8 *params, int params_len)
1278{
1279 struct pn533_poll_modulations *cur_mod;
1280 int rc;
1054 1281
1055 /* Don't need to down the semaphore again */ 1282 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1056 rc = __pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame, 1283
1057 dev->in_maxlen, pn533_start_poll_complete, 1284 if (params_len == -ENOENT) {
1058 NULL, GFP_ATOMIC); 1285 if (dev->poll_mod_count != 0)
1286 return 0;
1287
1288 nfc_dev_err(&dev->interface->dev,
1289 "Polling operation has been stopped");
1059 1290
1060 if (rc == -EPERM) {
1061 nfc_dev_dbg(&dev->interface->dev, "Cannot poll next modulation"
1062 " because poll has been stopped");
1063 goto stop_poll; 1291 goto stop_poll;
1064 } 1292 }
1065 1293
1066 if (rc) { 1294 if (params_len < 0) {
1067 nfc_dev_err(&dev->interface->dev, "Error %d when trying to poll" 1295 nfc_dev_err(&dev->interface->dev,
1068 " next modulation", rc); 1296 "Error %d when running poll", params_len);
1297
1069 goto stop_poll; 1298 goto stop_poll;
1070 } 1299 }
1071 1300
1072 /* Inform caller function to do not up the semaphore */ 1301 cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
1073 return -EINPROGRESS; 1302
1303 if (cur_mod->len == 0) {
1304 del_timer(&dev->listen_timer);
1305
1306 return pn533_init_target_complete(dev, arg, params, params_len);
1307 } else {
1308 rc = pn533_start_poll_complete(dev, arg, params, params_len);
1309 if (!rc)
1310 return rc;
1311 }
1312
1313 pn533_poll_next_mod(dev);
1314
1315 queue_work(dev->wq, &dev->poll_work);
1316
1317 return 0;
1074 1318
1075stop_poll: 1319stop_poll:
1076 pn533_poll_reset_mod_list(dev); 1320 pn533_poll_reset_mod_list(dev);
@@ -1078,61 +1322,104 @@ stop_poll:
1078 return 0; 1322 return 0;
1079} 1323}
1080 1324
1081static int pn533_start_poll(struct nfc_dev *nfc_dev, u32 protocols) 1325static void pn533_build_poll_frame(struct pn533 *dev,
1326 struct pn533_frame *frame,
1327 struct pn533_poll_modulations *mod)
1082{ 1328{
1083 struct pn533 *dev = nfc_get_drvdata(nfc_dev); 1329 nfc_dev_dbg(&dev->interface->dev, "mod len %d\n", mod->len);
1084 struct pn533_poll_modulations *start_mod;
1085 int rc;
1086 1330
1087 nfc_dev_dbg(&dev->interface->dev, "%s - protocols=0x%x", __func__, 1331 if (mod->len == 0) {
1088 protocols); 1332 /* Listen mode */
1333 pn533_init_target_frame(frame, dev->gb, dev->gb_len);
1334 } else {
1335 /* Polling mode */
1336 pn533_tx_frame_init(frame, PN533_CMD_IN_LIST_PASSIVE_TARGET);
1089 1337
1090 if (dev->poll_mod_count) { 1338 memcpy(PN533_FRAME_CMD_PARAMS_PTR(frame), &mod->data, mod->len);
1091 nfc_dev_err(&dev->interface->dev, "Polling operation already" 1339 frame->datalen += mod->len;
1092 " active");
1093 return -EBUSY;
1094 }
1095 1340
1096 if (dev->tgt_active_prot) { 1341 pn533_tx_frame_finish(frame);
1097 nfc_dev_err(&dev->interface->dev, "Cannot poll with a target"
1098 " already activated");
1099 return -EBUSY;
1100 } 1342 }
1343}
1101 1344
1102 pn533_poll_create_mod_list(dev, protocols); 1345static int pn533_send_poll_frame(struct pn533 *dev)
1346{
1347 struct pn533_poll_modulations *cur_mod;
1348 int rc;
1103 1349
1104 if (!dev->poll_mod_count) { 1350 cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
1105 nfc_dev_err(&dev->interface->dev, "No valid protocols" 1351
1106 " specified"); 1352 pn533_build_poll_frame(dev, dev->out_frame, cur_mod);
1107 rc = -EINVAL; 1353
1108 goto error; 1354 rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
1355 dev->in_maxlen, pn533_poll_complete,
1356 NULL, GFP_KERNEL);
1357 if (rc)
1358 nfc_dev_err(&dev->interface->dev, "Polling loop error %d", rc);
1359
1360 return rc;
1361}
1362
1363static void pn533_wq_poll(struct work_struct *work)
1364{
1365 struct pn533 *dev = container_of(work, struct pn533, poll_work);
1366 struct pn533_poll_modulations *cur_mod;
1367 int rc;
1368
1369 cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
1370
1371 nfc_dev_dbg(&dev->interface->dev,
1372 "%s cancel_listen %d modulation len %d",
1373 __func__, dev->cancel_listen, cur_mod->len);
1374
1375 if (dev->cancel_listen == 1) {
1376 dev->cancel_listen = 0;
1377 usb_kill_urb(dev->in_urb);
1109 } 1378 }
1110 1379
1111 nfc_dev_dbg(&dev->interface->dev, "It will poll %d modulations types", 1380 rc = pn533_send_poll_frame(dev);
1112 dev->poll_mod_count); 1381 if (rc)
1382 return;
1113 1383
1114 dev->poll_mod_curr = 0; 1384 if (cur_mod->len == 0 && dev->poll_mod_count > 1)
1115 start_mod = dev->poll_mod_active[dev->poll_mod_curr]; 1385 mod_timer(&dev->listen_timer, jiffies + PN533_LISTEN_TIME * HZ);
1116 1386
1117 pn533_start_poll_frame(dev->out_frame, start_mod); 1387 return;
1388}
1118 1389
1119 rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame, 1390static int pn533_start_poll(struct nfc_dev *nfc_dev,
1120 dev->in_maxlen, pn533_start_poll_complete, 1391 u32 im_protocols, u32 tm_protocols)
1121 NULL, GFP_KERNEL); 1392{
1393 struct pn533 *dev = nfc_get_drvdata(nfc_dev);
1122 1394
1123 if (rc) { 1395 nfc_dev_dbg(&dev->interface->dev,
1124 nfc_dev_err(&dev->interface->dev, "Error %d when trying to" 1396 "%s: im protocols 0x%x tm protocols 0x%x",
1125 " start poll", rc); 1397 __func__, im_protocols, tm_protocols);
1126 goto error; 1398
1399 if (dev->tgt_active_prot) {
1400 nfc_dev_err(&dev->interface->dev,
1401 "Cannot poll with a target already activated");
1402 return -EBUSY;
1127 } 1403 }
1128 1404
1129 dev->poll_protocols = protocols; 1405 if (dev->tgt_mode) {
1406 nfc_dev_err(&dev->interface->dev,
1407 "Cannot poll while already being activated");
1408 return -EBUSY;
1409 }
1130 1410
1131 return 0; 1411 if (tm_protocols) {
1412 dev->gb = nfc_get_local_general_bytes(nfc_dev, &dev->gb_len);
1413 if (dev->gb == NULL)
1414 tm_protocols = 0;
1415 }
1132 1416
1133error: 1417 dev->poll_mod_curr = 0;
1134 pn533_poll_reset_mod_list(dev); 1418 pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
1135 return rc; 1419 dev->poll_protocols = im_protocols;
1420 dev->listen_protocols = tm_protocols;
1421
1422 return pn533_send_poll_frame(dev);
1136} 1423}
1137 1424
1138static void pn533_stop_poll(struct nfc_dev *nfc_dev) 1425static void pn533_stop_poll(struct nfc_dev *nfc_dev)
@@ -1141,6 +1428,8 @@ static void pn533_stop_poll(struct nfc_dev *nfc_dev)
1141 1428
1142 nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 1429 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1143 1430
1431 del_timer(&dev->listen_timer);
1432
1144 if (!dev->poll_mod_count) { 1433 if (!dev->poll_mod_count) {
1145 nfc_dev_dbg(&dev->interface->dev, "Polling operation was not" 1434 nfc_dev_dbg(&dev->interface->dev, "Polling operation was not"
1146 " running"); 1435 " running");
@@ -1152,6 +1441,8 @@ static void pn533_stop_poll(struct nfc_dev *nfc_dev)
1152 1441
1153 /* prevent pn533_start_poll_complete to issue a new poll meanwhile */ 1442 /* prevent pn533_start_poll_complete to issue a new poll meanwhile */
1154 usb_kill_urb(dev->in_urb); 1443 usb_kill_urb(dev->in_urb);
1444
1445 pn533_poll_reset_mod_list(dev);
1155} 1446}
1156 1447
1157static int pn533_activate_target_nfcdep(struct pn533 *dev) 1448static int pn533_activate_target_nfcdep(struct pn533 *dev)
@@ -1349,13 +1640,29 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
1349 return 0; 1640 return 0;
1350} 1641}
1351 1642
1643static int pn533_mod_to_baud(struct pn533 *dev)
1644{
1645 switch (dev->poll_mod_curr) {
1646 case PN533_POLL_MOD_106KBPS_A:
1647 return 0;
1648 case PN533_POLL_MOD_212KBPS_FELICA:
1649 return 1;
1650 case PN533_POLL_MOD_424KBPS_FELICA:
1651 return 2;
1652 default:
1653 return -EINVAL;
1654 }
1655}
1656
1657#define PASSIVE_DATA_LEN 5
1352static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, 1658static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
1353 u8 comm_mode, u8* gb, size_t gb_len) 1659 u8 comm_mode, u8* gb, size_t gb_len)
1354{ 1660{
1355 struct pn533 *dev = nfc_get_drvdata(nfc_dev); 1661 struct pn533 *dev = nfc_get_drvdata(nfc_dev);
1356 struct pn533_cmd_jump_dep *cmd; 1662 struct pn533_cmd_jump_dep *cmd;
1357 u8 cmd_len; 1663 u8 cmd_len, *data_ptr;
1358 int rc; 1664 u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
1665 int rc, baud;
1359 1666
1360 nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 1667 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1361 1668
@@ -1371,7 +1678,17 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
1371 return -EBUSY; 1678 return -EBUSY;
1372 } 1679 }
1373 1680
1681 baud = pn533_mod_to_baud(dev);
1682 if (baud < 0) {
1683 nfc_dev_err(&dev->interface->dev,
1684 "Invalid curr modulation %d", dev->poll_mod_curr);
1685 return baud;
1686 }
1687
1374 cmd_len = sizeof(struct pn533_cmd_jump_dep) + gb_len; 1688 cmd_len = sizeof(struct pn533_cmd_jump_dep) + gb_len;
1689 if (comm_mode == NFC_COMM_PASSIVE)
1690 cmd_len += PASSIVE_DATA_LEN;
1691
1375 cmd = kzalloc(cmd_len, GFP_KERNEL); 1692 cmd = kzalloc(cmd_len, GFP_KERNEL);
1376 if (cmd == NULL) 1693 if (cmd == NULL)
1377 return -ENOMEM; 1694 return -ENOMEM;
@@ -1379,10 +1696,18 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
1379 pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_JUMP_FOR_DEP); 1696 pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_JUMP_FOR_DEP);
1380 1697
1381 cmd->active = !comm_mode; 1698 cmd->active = !comm_mode;
1382 cmd->baud = 0; 1699 cmd->next = 0;
1700 cmd->baud = baud;
1701 data_ptr = cmd->data;
1702 if (comm_mode == NFC_COMM_PASSIVE && cmd->baud > 0) {
1703 memcpy(data_ptr, passive_data, PASSIVE_DATA_LEN);
1704 cmd->next |= 1;
1705 data_ptr += PASSIVE_DATA_LEN;
1706 }
1707
1383 if (gb != NULL && gb_len > 0) { 1708 if (gb != NULL && gb_len > 0) {
1384 cmd->next = 4; /* We have some Gi */ 1709 cmd->next |= 4; /* We have some Gi */
1385 memcpy(cmd->gt, gb, gb_len); 1710 memcpy(data_ptr, gb, gb_len);
1386 } else { 1711 } else {
1387 cmd->next = 0; 1712 cmd->next = 0;
1388 } 1713 }
@@ -1407,15 +1732,25 @@ out:
1407 1732
1408static int pn533_dep_link_down(struct nfc_dev *nfc_dev) 1733static int pn533_dep_link_down(struct nfc_dev *nfc_dev)
1409{ 1734{
1410 pn533_deactivate_target(nfc_dev, 0); 1735 struct pn533 *dev = nfc_get_drvdata(nfc_dev);
1736
1737 pn533_poll_reset_mod_list(dev);
1738
1739 if (dev->tgt_mode || dev->tgt_active_prot) {
1740 pn533_send_ack(dev, GFP_KERNEL);
1741 usb_kill_urb(dev->in_urb);
1742 }
1743
1744 dev->tgt_active_prot = 0;
1745 dev->tgt_mode = 0;
1746
1747 skb_queue_purge(&dev->resp_q);
1411 1748
1412 return 0; 1749 return 0;
1413} 1750}
1414 1751
1415#define PN533_CMD_DATAEXCH_HEAD_LEN (sizeof(struct pn533_frame) + 3) 1752static int pn533_build_tx_frame(struct pn533 *dev, struct sk_buff *skb,
1416#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262 1753 bool target)
1417
1418static int pn533_data_exchange_tx_frame(struct pn533 *dev, struct sk_buff *skb)
1419{ 1754{
1420 int payload_len = skb->len; 1755 int payload_len = skb->len;
1421 struct pn533_frame *out_frame; 1756 struct pn533_frame *out_frame;
@@ -1432,14 +1767,20 @@ static int pn533_data_exchange_tx_frame(struct pn533 *dev, struct sk_buff *skb)
1432 return -ENOSYS; 1767 return -ENOSYS;
1433 } 1768 }
1434 1769
1435 skb_push(skb, PN533_CMD_DATAEXCH_HEAD_LEN); 1770 if (target == true) {
1436 out_frame = (struct pn533_frame *) skb->data; 1771 skb_push(skb, PN533_CMD_DATAEXCH_HEAD_LEN);
1772 out_frame = (struct pn533_frame *) skb->data;
1437 1773
1438 pn533_tx_frame_init(out_frame, PN533_CMD_IN_DATA_EXCHANGE); 1774 pn533_tx_frame_init(out_frame, PN533_CMD_IN_DATA_EXCHANGE);
1775 tg = 1;
1776 memcpy(PN533_FRAME_CMD_PARAMS_PTR(out_frame), &tg, sizeof(u8));
1777 out_frame->datalen += sizeof(u8);
1778 } else {
1779 skb_push(skb, PN533_CMD_DATAEXCH_HEAD_LEN - 1);
1780 out_frame = (struct pn533_frame *) skb->data;
1781 pn533_tx_frame_init(out_frame, PN533_CMD_TG_SET_DATA);
1782 }
1439 1783
1440 tg = 1;
1441 memcpy(PN533_FRAME_CMD_PARAMS_PTR(out_frame), &tg, sizeof(u8));
1442 out_frame->datalen += sizeof(u8);
1443 1784
1444 /* The data is already in the out_frame, just update the datalen */ 1785 /* The data is already in the out_frame, just update the datalen */
1445 out_frame->datalen += payload_len; 1786 out_frame->datalen += payload_len;
@@ -1550,9 +1891,9 @@ error:
1550 return 0; 1891 return 0;
1551} 1892}
1552 1893
1553static int pn533_data_exchange(struct nfc_dev *nfc_dev, 1894static int pn533_transceive(struct nfc_dev *nfc_dev,
1554 struct nfc_target *target, struct sk_buff *skb, 1895 struct nfc_target *target, struct sk_buff *skb,
1555 data_exchange_cb_t cb, void *cb_context) 1896 data_exchange_cb_t cb, void *cb_context)
1556{ 1897{
1557 struct pn533 *dev = nfc_get_drvdata(nfc_dev); 1898 struct pn533 *dev = nfc_get_drvdata(nfc_dev);
1558 struct pn533_frame *out_frame, *in_frame; 1899 struct pn533_frame *out_frame, *in_frame;
@@ -1570,7 +1911,7 @@ static int pn533_data_exchange(struct nfc_dev *nfc_dev,
1570 goto error; 1911 goto error;
1571 } 1912 }
1572 1913
1573 rc = pn533_data_exchange_tx_frame(dev, skb); 1914 rc = pn533_build_tx_frame(dev, skb, true);
1574 if (rc) 1915 if (rc)
1575 goto error; 1916 goto error;
1576 1917
@@ -1618,6 +1959,63 @@ error:
1618 return rc; 1959 return rc;
1619} 1960}
1620 1961
1962static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
1963 u8 *params, int params_len)
1964{
1965 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1966
1967 if (params_len < 0) {
1968 nfc_dev_err(&dev->interface->dev,
1969 "Error %d when sending data",
1970 params_len);
1971
1972 return params_len;
1973 }
1974
1975 if (params_len > 0 && params[0] != 0) {
1976 nfc_tm_deactivated(dev->nfc_dev);
1977
1978 dev->tgt_mode = 0;
1979
1980 return 0;
1981 }
1982
1983 queue_work(dev->wq, &dev->tg_work);
1984
1985 return 0;
1986}
1987
1988static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
1989{
1990 struct pn533 *dev = nfc_get_drvdata(nfc_dev);
1991 struct pn533_frame *out_frame;
1992 int rc;
1993
1994 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1995
1996 rc = pn533_build_tx_frame(dev, skb, false);
1997 if (rc)
1998 goto error;
1999
2000 out_frame = (struct pn533_frame *) skb->data;
2001
2002 rc = pn533_send_cmd_frame_async(dev, out_frame, dev->in_frame,
2003 dev->in_maxlen, pn533_tm_send_complete,
2004 NULL, GFP_KERNEL);
2005 if (rc) {
2006 nfc_dev_err(&dev->interface->dev,
2007 "Error %d when trying to send data", rc);
2008 goto error;
2009 }
2010
2011 return 0;
2012
2013error:
2014 kfree_skb(skb);
2015
2016 return rc;
2017}
2018
1621static void pn533_wq_mi_recv(struct work_struct *work) 2019static void pn533_wq_mi_recv(struct work_struct *work)
1622{ 2020{
1623 struct pn533 *dev = container_of(work, struct pn533, mi_work); 2021 struct pn533 *dev = container_of(work, struct pn533, mi_work);
@@ -1638,7 +2036,7 @@ static void pn533_wq_mi_recv(struct work_struct *work)
1638 2036
1639 skb_reserve(skb_cmd, PN533_CMD_DATAEXCH_HEAD_LEN); 2037 skb_reserve(skb_cmd, PN533_CMD_DATAEXCH_HEAD_LEN);
1640 2038
1641 rc = pn533_data_exchange_tx_frame(dev, skb_cmd); 2039 rc = pn533_build_tx_frame(dev, skb_cmd, true);
1642 if (rc) 2040 if (rc)
1643 goto error_frame; 2041 goto error_frame;
1644 2042
@@ -1677,7 +2075,7 @@ error_cmd:
1677 2075
1678 kfree(arg); 2076 kfree(arg);
1679 2077
1680 up(&dev->cmd_lock); 2078 mutex_unlock(&dev->cmd_lock);
1681} 2079}
1682 2080
1683static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata, 2081static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
@@ -1712,7 +2110,8 @@ struct nfc_ops pn533_nfc_ops = {
1712 .stop_poll = pn533_stop_poll, 2110 .stop_poll = pn533_stop_poll,
1713 .activate_target = pn533_activate_target, 2111 .activate_target = pn533_activate_target,
1714 .deactivate_target = pn533_deactivate_target, 2112 .deactivate_target = pn533_deactivate_target,
1715 .data_exchange = pn533_data_exchange, 2113 .im_transceive = pn533_transceive,
2114 .tm_send = pn533_tm_send,
1716}; 2115};
1717 2116
1718static int pn533_probe(struct usb_interface *interface, 2117static int pn533_probe(struct usb_interface *interface,
@@ -1723,6 +2122,7 @@ static int pn533_probe(struct usb_interface *interface,
1723 struct usb_host_interface *iface_desc; 2122 struct usb_host_interface *iface_desc;
1724 struct usb_endpoint_descriptor *endpoint; 2123 struct usb_endpoint_descriptor *endpoint;
1725 struct pn533_config_max_retries max_retries; 2124 struct pn533_config_max_retries max_retries;
2125 struct pn533_config_timing timing;
1726 int in_endpoint = 0; 2126 int in_endpoint = 0;
1727 int out_endpoint = 0; 2127 int out_endpoint = 0;
1728 int rc = -ENOMEM; 2128 int rc = -ENOMEM;
@@ -1735,7 +2135,7 @@ static int pn533_probe(struct usb_interface *interface,
1735 2135
1736 dev->udev = usb_get_dev(interface_to_usbdev(interface)); 2136 dev->udev = usb_get_dev(interface_to_usbdev(interface));
1737 dev->interface = interface; 2137 dev->interface = interface;
1738 sema_init(&dev->cmd_lock, 1); 2138 mutex_init(&dev->cmd_lock);
1739 2139
1740 iface_desc = interface->cur_altsetting; 2140 iface_desc = interface->cur_altsetting;
1741 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { 2141 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
@@ -1779,12 +2179,18 @@ static int pn533_probe(struct usb_interface *interface,
1779 2179
1780 INIT_WORK(&dev->cmd_work, pn533_wq_cmd_complete); 2180 INIT_WORK(&dev->cmd_work, pn533_wq_cmd_complete);
1781 INIT_WORK(&dev->mi_work, pn533_wq_mi_recv); 2181 INIT_WORK(&dev->mi_work, pn533_wq_mi_recv);
2182 INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data);
2183 INIT_WORK(&dev->poll_work, pn533_wq_poll);
1782 dev->wq = alloc_workqueue("pn533", 2184 dev->wq = alloc_workqueue("pn533",
1783 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 2185 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
1784 1); 2186 1);
1785 if (dev->wq == NULL) 2187 if (dev->wq == NULL)
1786 goto error; 2188 goto error;
1787 2189
2190 init_timer(&dev->listen_timer);
2191 dev->listen_timer.data = (unsigned long) dev;
2192 dev->listen_timer.function = pn533_listen_mode_timer;
2193
1788 skb_queue_head_init(&dev->resp_q); 2194 skb_queue_head_init(&dev->resp_q);
1789 2195
1790 usb_set_intfdata(interface, dev); 2196 usb_set_intfdata(interface, dev);
@@ -1830,13 +2236,29 @@ static int pn533_probe(struct usb_interface *interface,
1830 if (rc) { 2236 if (rc) {
1831 nfc_dev_err(&dev->interface->dev, "Error on setting MAX_RETRIES" 2237 nfc_dev_err(&dev->interface->dev, "Error on setting MAX_RETRIES"
1832 " config"); 2238 " config");
1833 goto free_nfc_dev; 2239 goto unregister_nfc_dev;
2240 }
2241
2242 timing.rfu = PN533_CONFIG_TIMING_102;
2243 timing.atr_res_timeout = PN533_CONFIG_TIMING_204;
2244 timing.dep_timeout = PN533_CONFIG_TIMING_409;
2245
2246 rc = pn533_set_configuration(dev, PN533_CFGITEM_TIMING,
2247 (u8 *) &timing, sizeof(timing));
2248 if (rc) {
2249 nfc_dev_err(&dev->interface->dev,
2250 "Error on setting RF timings");
2251 goto unregister_nfc_dev;
1834 } 2252 }
1835 2253
1836 return 0; 2254 return 0;
1837 2255
2256unregister_nfc_dev:
2257 nfc_unregister_device(dev->nfc_dev);
2258
1838free_nfc_dev: 2259free_nfc_dev:
1839 nfc_free_device(dev->nfc_dev); 2260 nfc_free_device(dev->nfc_dev);
2261
1840destroy_wq: 2262destroy_wq:
1841 destroy_workqueue(dev->wq); 2263 destroy_workqueue(dev->wq);
1842error: 2264error:
@@ -1865,6 +2287,8 @@ static void pn533_disconnect(struct usb_interface *interface)
1865 2287
1866 skb_queue_purge(&dev->resp_q); 2288 skb_queue_purge(&dev->resp_q);
1867 2289
2290 del_timer(&dev->listen_timer);
2291
1868 kfree(dev->in_frame); 2292 kfree(dev->in_frame);
1869 usb_free_urb(dev->in_urb); 2293 usb_free_urb(dev->in_urb);
1870 kfree(dev->out_frame); 2294 kfree(dev->out_frame);
diff --git a/drivers/nfc/pn544_hci.c b/drivers/nfc/pn544_hci.c
index 281f18c2fb8..457eac35dc7 100644
--- a/drivers/nfc/pn544_hci.c
+++ b/drivers/nfc/pn544_hci.c
@@ -576,7 +576,8 @@ static int pn544_hci_xmit(struct nfc_shdlc *shdlc, struct sk_buff *skb)
576 return pn544_hci_i2c_write(client, skb->data, skb->len); 576 return pn544_hci_i2c_write(client, skb->data, skb->len);
577} 577}
578 578
579static int pn544_hci_start_poll(struct nfc_shdlc *shdlc, u32 protocols) 579static int pn544_hci_start_poll(struct nfc_shdlc *shdlc,
580 u32 im_protocols, u32 tm_protocols)
580{ 581{
581 struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc); 582 struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
582 u8 phases = 0; 583 u8 phases = 0;
@@ -584,7 +585,8 @@ static int pn544_hci_start_poll(struct nfc_shdlc *shdlc, u32 protocols)
584 u8 duration[2]; 585 u8 duration[2];
585 u8 activated; 586 u8 activated;
586 587
587 pr_info(DRIVER_DESC ": %s protocols = %d\n", __func__, protocols); 588 pr_info(DRIVER_DESC ": %s protocols 0x%x 0x%x\n",
589 __func__, im_protocols, tm_protocols);
588 590
589 r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, 591 r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
590 NFC_HCI_EVT_END_OPERATION, NULL, 0); 592 NFC_HCI_EVT_END_OPERATION, NULL, 0);
@@ -604,10 +606,10 @@ static int pn544_hci_start_poll(struct nfc_shdlc *shdlc, u32 protocols)
604 if (r < 0) 606 if (r < 0)
605 return r; 607 return r;
606 608
607 if (protocols & (NFC_PROTO_ISO14443_MASK | NFC_PROTO_MIFARE_MASK | 609 if (im_protocols & (NFC_PROTO_ISO14443_MASK | NFC_PROTO_MIFARE_MASK |
608 NFC_PROTO_JEWEL_MASK)) 610 NFC_PROTO_JEWEL_MASK))
609 phases |= 1; /* Type A */ 611 phases |= 1; /* Type A */
610 if (protocols & NFC_PROTO_FELICA_MASK) { 612 if (im_protocols & NFC_PROTO_FELICA_MASK) {
611 phases |= (1 << 2); /* Type F 212 */ 613 phases |= (1 << 2); /* Type F 212 */
612 phases |= (1 << 3); /* Type F 424 */ 614 phases |= (1 << 3); /* Type F 424 */
613 } 615 }
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 2574abde8d9..8e6c25f3504 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -57,6 +57,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
57 const __be32 *paddr; 57 const __be32 *paddr;
58 u32 addr; 58 u32 addr;
59 int len; 59 int len;
60 bool is_c45;
60 61
61 /* A PHY must have a reg property in the range [0-31] */ 62 /* A PHY must have a reg property in the range [0-31] */
62 paddr = of_get_property(child, "reg", &len); 63 paddr = of_get_property(child, "reg", &len);
@@ -79,11 +80,18 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
79 mdio->irq[addr] = PHY_POLL; 80 mdio->irq[addr] = PHY_POLL;
80 } 81 }
81 82
82 phy = get_phy_device(mdio, addr); 83 is_c45 = of_device_is_compatible(child,
84 "ethernet-phy-ieee802.3-c45");
85 phy = get_phy_device(mdio, addr, is_c45);
86
83 if (!phy || IS_ERR(phy)) { 87 if (!phy || IS_ERR(phy)) {
84 dev_err(&mdio->dev, "error probing PHY at address %i\n", 88 phy = phy_device_create(mdio, addr, 0, false, NULL);
85 addr); 89 if (!phy || IS_ERR(phy)) {
86 continue; 90 dev_err(&mdio->dev,
91 "error creating PHY at address %i\n",
92 addr);
93 continue;
94 }
87 } 95 }
88 96
89 /* Associate the OF node with the device structure so it 97 /* Associate the OF node with the device structure so it
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 7be5e977569..73ac63d901c 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2700,10 +2700,11 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
2700 rcu_read_lock(); 2700 rcu_read_lock();
2701 dst = skb_dst(skb); 2701 dst = skb_dst(skb);
2702 if (dst) 2702 if (dst)
2703 n = dst_get_neighbour_noref(dst); 2703 n = dst_neigh_lookup_skb(dst, skb);
2704 if (n) { 2704 if (n) {
2705 cast_type = n->type; 2705 cast_type = n->type;
2706 rcu_read_unlock(); 2706 rcu_read_unlock();
2707 neigh_release(n);
2707 if ((cast_type == RTN_BROADCAST) || 2708 if ((cast_type == RTN_BROADCAST) ||
2708 (cast_type == RTN_MULTICAST) || 2709 (cast_type == RTN_MULTICAST) ||
2709 (cast_type == RTN_ANYCAST)) 2710 (cast_type == RTN_ANYCAST))
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 0578fa0dc14..42969e8a45b 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -59,6 +59,7 @@
59#include "57xx_hsi_bnx2fc.h" 59#include "57xx_hsi_bnx2fc.h"
60#include "bnx2fc_debug.h" 60#include "bnx2fc_debug.h"
61#include "../../net/ethernet/broadcom/cnic_if.h" 61#include "../../net/ethernet/broadcom/cnic_if.h"
62#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h"
62#include "bnx2fc_constants.h" 63#include "bnx2fc_constants.h"
63 64
64#define BNX2FC_NAME "bnx2fc" 65#define BNX2FC_NAME "bnx2fc"
@@ -84,6 +85,8 @@
84#define BNX2FC_NUM_MAX_SESS 1024 85#define BNX2FC_NUM_MAX_SESS 1024
85#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS)) 86#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS))
86 87
88#define BNX2FC_MAX_NPIV 256
89
87#define BNX2FC_MAX_OUTSTANDING_CMNDS 2048 90#define BNX2FC_MAX_OUTSTANDING_CMNDS 2048
88#define BNX2FC_CAN_QUEUE BNX2FC_MAX_OUTSTANDING_CMNDS 91#define BNX2FC_CAN_QUEUE BNX2FC_MAX_OUTSTANDING_CMNDS
89#define BNX2FC_ELSTM_XIDS BNX2FC_CAN_QUEUE 92#define BNX2FC_ELSTM_XIDS BNX2FC_CAN_QUEUE
@@ -206,6 +209,7 @@ struct bnx2fc_hba {
206 struct fcoe_statistics_params *stats_buffer; 209 struct fcoe_statistics_params *stats_buffer;
207 dma_addr_t stats_buf_dma; 210 dma_addr_t stats_buf_dma;
208 struct completion stat_req_done; 211 struct completion stat_req_done;
212 struct fcoe_capabilities fcoe_cap;
209 213
210 /*destroy handling */ 214 /*destroy handling */
211 struct timer_list destroy_timer; 215 struct timer_list destroy_timer;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f52f668fd24..05fe6620b3f 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -1326,6 +1326,7 @@ static void bnx2fc_hba_destroy(struct bnx2fc_hba *hba)
1326static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic) 1326static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1327{ 1327{
1328 struct bnx2fc_hba *hba; 1328 struct bnx2fc_hba *hba;
1329 struct fcoe_capabilities *fcoe_cap;
1329 int rc; 1330 int rc;
1330 1331
1331 hba = kzalloc(sizeof(*hba), GFP_KERNEL); 1332 hba = kzalloc(sizeof(*hba), GFP_KERNEL);
@@ -1361,6 +1362,21 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1361 printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n"); 1362 printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
1362 goto cmgr_err; 1363 goto cmgr_err;
1363 } 1364 }
1365 fcoe_cap = &hba->fcoe_cap;
1366
1367 fcoe_cap->capability1 = BNX2FC_TM_MAX_SQES <<
1368 FCOE_IOS_PER_CONNECTION_SHIFT;
1369 fcoe_cap->capability1 |= BNX2FC_NUM_MAX_SESS <<
1370 FCOE_LOGINS_PER_PORT_SHIFT;
1371 fcoe_cap->capability2 = BNX2FC_MAX_OUTSTANDING_CMNDS <<
1372 FCOE_NUMBER_OF_EXCHANGES_SHIFT;
1373 fcoe_cap->capability2 |= BNX2FC_MAX_NPIV <<
1374 FCOE_NPIV_WWN_PER_PORT_SHIFT;
1375 fcoe_cap->capability3 = BNX2FC_NUM_MAX_SESS <<
1376 FCOE_TARGETS_SUPPORTED_SHIFT;
1377 fcoe_cap->capability3 |= BNX2FC_MAX_OUTSTANDING_CMNDS <<
1378 FCOE_OUTSTANDING_COMMANDS_SHIFT;
1379 fcoe_cap->capability4 = FCOE_CAPABILITY4_STATEFUL;
1364 1380
1365 init_waitqueue_head(&hba->shutdown_wait); 1381 init_waitqueue_head(&hba->shutdown_wait);
1366 init_waitqueue_head(&hba->destroy_wait); 1382 init_waitqueue_head(&hba->destroy_wait);
@@ -1691,6 +1707,32 @@ static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
1691 hba->pcidev = NULL; 1707 hba->pcidev = NULL;
1692} 1708}
1693 1709
1710/**
1711 * bnx2fc_ulp_get_stats - cnic callback to populate FCoE stats
1712 *
1713 * @handle: transport handle pointing to adapter struture
1714 */
1715static int bnx2fc_ulp_get_stats(void *handle)
1716{
1717 struct bnx2fc_hba *hba = handle;
1718 struct cnic_dev *cnic;
1719 struct fcoe_stats_info *stats_addr;
1720
1721 if (!hba)
1722 return -EINVAL;
1723
1724 cnic = hba->cnic;
1725 stats_addr = &cnic->stats_addr->fcoe_stat;
1726 if (!stats_addr)
1727 return -EINVAL;
1728
1729 strncpy(stats_addr->version, BNX2FC_VERSION,
1730 sizeof(stats_addr->version));
1731 stats_addr->txq_size = BNX2FC_SQ_WQES_MAX;
1732 stats_addr->rxq_size = BNX2FC_CQ_WQES_MAX;
1733
1734 return 0;
1735}
1694 1736
1695 1737
1696/** 1738/**
@@ -1944,6 +1986,7 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
1944 adapter_count++; 1986 adapter_count++;
1945 mutex_unlock(&bnx2fc_dev_lock); 1987 mutex_unlock(&bnx2fc_dev_lock);
1946 1988
1989 dev->fcoe_cap = &hba->fcoe_cap;
1947 clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); 1990 clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
1948 rc = dev->register_device(dev, CNIC_ULP_FCOE, 1991 rc = dev->register_device(dev, CNIC_ULP_FCOE,
1949 (void *) hba); 1992 (void *) hba);
@@ -2643,4 +2686,5 @@ static struct cnic_ulp_ops bnx2fc_cnic_cb = {
2643 .cnic_stop = bnx2fc_ulp_stop, 2686 .cnic_stop = bnx2fc_ulp_stop,
2644 .indicate_kcqes = bnx2fc_indicate_kcqe, 2687 .indicate_kcqes = bnx2fc_indicate_kcqe,
2645 .indicate_netevent = bnx2fc_indicate_netevent, 2688 .indicate_netevent = bnx2fc_indicate_netevent,
2689 .cnic_get_stats = bnx2fc_ulp_get_stats,
2646}; 2690};
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
index dc0a08e69c8..f2db5fe7bdc 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -267,7 +267,13 @@ struct bnx2i_cmd_request {
267 * task statistics for write response 267 * task statistics for write response
268 */ 268 */
269struct bnx2i_write_resp_task_stat { 269struct bnx2i_write_resp_task_stat {
270 u32 num_data_ins; 270#if defined(__BIG_ENDIAN)
271 u16 num_r2ts;
272 u16 num_data_outs;
273#elif defined(__LITTLE_ENDIAN)
274 u16 num_data_outs;
275 u16 num_r2ts;
276#endif
271}; 277};
272 278
273/* 279/*
@@ -275,11 +281,11 @@ struct bnx2i_write_resp_task_stat {
275 */ 281 */
276struct bnx2i_read_resp_task_stat { 282struct bnx2i_read_resp_task_stat {
277#if defined(__BIG_ENDIAN) 283#if defined(__BIG_ENDIAN)
278 u16 num_data_outs; 284 u16 reserved;
279 u16 num_r2ts; 285 u16 num_data_ins;
280#elif defined(__LITTLE_ENDIAN) 286#elif defined(__LITTLE_ENDIAN)
281 u16 num_r2ts; 287 u16 num_data_ins;
282 u16 num_data_outs; 288 u16 reserved;
283#endif 289#endif
284}; 290};
285 291
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 0c53c28dc3d..2e326145825 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -44,6 +44,8 @@
44#include "57xx_iscsi_hsi.h" 44#include "57xx_iscsi_hsi.h"
45#include "57xx_iscsi_constants.h" 45#include "57xx_iscsi_constants.h"
46 46
47#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h"
48
47#define BNX2_ISCSI_DRIVER_NAME "bnx2i" 49#define BNX2_ISCSI_DRIVER_NAME "bnx2i"
48 50
49#define BNX2I_MAX_ADAPTERS 8 51#define BNX2I_MAX_ADAPTERS 8
@@ -126,6 +128,43 @@
126#define REG_WR(__hba, offset, val) \ 128#define REG_WR(__hba, offset, val) \
127 writel(val, __hba->regview + offset) 129 writel(val, __hba->regview + offset)
128 130
131#ifdef CONFIG_32BIT
132#define GET_STATS_64(__hba, dst, field) \
133 do { \
134 spin_lock_bh(&__hba->stat_lock); \
135 dst->field##_lo = __hba->stats.field##_lo; \
136 dst->field##_hi = __hba->stats.field##_hi; \
137 spin_unlock_bh(&__hba->stat_lock); \
138 } while (0)
139
140#define ADD_STATS_64(__hba, field, len) \
141 do { \
142 if (spin_trylock(&__hba->stat_lock)) { \
143 if (__hba->stats.field##_lo + len < \
144 __hba->stats.field##_lo) \
145 __hba->stats.field##_hi++; \
146 __hba->stats.field##_lo += len; \
147 spin_unlock(&__hba->stat_lock); \
148 } \
149 } while (0)
150
151#else
152#define GET_STATS_64(__hba, dst, field) \
153 do { \
154 u64 val, *out; \
155 \
156 val = __hba->bnx2i_stats.field; \
157 out = (u64 *)&__hba->stats.field##_lo; \
158 *out = cpu_to_le64(val); \
159 out = (u64 *)&dst->field##_lo; \
160 *out = cpu_to_le64(val); \
161 } while (0)
162
163#define ADD_STATS_64(__hba, field, len) \
164 do { \
165 __hba->bnx2i_stats.field += len; \
166 } while (0)
167#endif
129 168
130/** 169/**
131 * struct generic_pdu_resc - login pdu resource structure 170 * struct generic_pdu_resc - login pdu resource structure
@@ -288,6 +327,15 @@ struct iscsi_cid_queue {
288 struct bnx2i_conn **conn_cid_tbl; 327 struct bnx2i_conn **conn_cid_tbl;
289}; 328};
290 329
330
331struct bnx2i_stats_info {
332 u64 rx_pdus;
333 u64 rx_bytes;
334 u64 tx_pdus;
335 u64 tx_bytes;
336};
337
338
291/** 339/**
292 * struct bnx2i_hba - bnx2i adapter structure 340 * struct bnx2i_hba - bnx2i adapter structure
293 * 341 *
@@ -341,6 +389,8 @@ struct iscsi_cid_queue {
341 * @ctx_ccell_tasks: captures number of ccells and tasks supported by 389 * @ctx_ccell_tasks: captures number of ccells and tasks supported by
342 * currently offloaded connection, used to decode 390 * currently offloaded connection, used to decode
343 * context memory 391 * context memory
392 * @stat_lock: spin lock used by the statistic collector (32 bit)
393 * @stats: local iSCSI statistic collection place holder
344 * 394 *
345 * Adapter Data Structure 395 * Adapter Data Structure
346 */ 396 */
@@ -426,6 +476,12 @@ struct bnx2i_hba {
426 u32 num_sess_opened; 476 u32 num_sess_opened;
427 u32 num_conn_opened; 477 u32 num_conn_opened;
428 unsigned int ctx_ccell_tasks; 478 unsigned int ctx_ccell_tasks;
479
480#ifdef CONFIG_32BIT
481 spinlock_t stat_lock;
482#endif
483 struct bnx2i_stats_info bnx2i_stats;
484 struct iscsi_stats_info stats;
429}; 485};
430 486
431 487
@@ -749,6 +805,8 @@ extern void bnx2i_ulp_init(struct cnic_dev *dev);
749extern void bnx2i_ulp_exit(struct cnic_dev *dev); 805extern void bnx2i_ulp_exit(struct cnic_dev *dev);
750extern void bnx2i_start(void *handle); 806extern void bnx2i_start(void *handle);
751extern void bnx2i_stop(void *handle); 807extern void bnx2i_stop(void *handle);
808extern int bnx2i_get_stats(void *handle);
809
752extern struct bnx2i_hba *get_adapter_list_head(void); 810extern struct bnx2i_hba *get_adapter_list_head(void);
753 811
754struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, 812struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index ece47e50228..49e8b056ca1 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1350,6 +1350,7 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
1350 struct cqe *cqe) 1350 struct cqe *cqe)
1351{ 1351{
1352 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; 1352 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1353 struct bnx2i_hba *hba = bnx2i_conn->hba;
1353 struct bnx2i_cmd_response *resp_cqe; 1354 struct bnx2i_cmd_response *resp_cqe;
1354 struct bnx2i_cmd *bnx2i_cmd; 1355 struct bnx2i_cmd *bnx2i_cmd;
1355 struct iscsi_task *task; 1356 struct iscsi_task *task;
@@ -1367,16 +1368,26 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
1367 1368
1368 if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) { 1369 if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
1369 conn->datain_pdus_cnt += 1370 conn->datain_pdus_cnt +=
1370 resp_cqe->task_stat.read_stat.num_data_outs; 1371 resp_cqe->task_stat.read_stat.num_data_ins;
1371 conn->rxdata_octets += 1372 conn->rxdata_octets +=
1372 bnx2i_cmd->req.total_data_transfer_length; 1373 bnx2i_cmd->req.total_data_transfer_length;
1374 ADD_STATS_64(hba, rx_pdus,
1375 resp_cqe->task_stat.read_stat.num_data_ins);
1376 ADD_STATS_64(hba, rx_bytes,
1377 bnx2i_cmd->req.total_data_transfer_length);
1373 } else { 1378 } else {
1374 conn->dataout_pdus_cnt += 1379 conn->dataout_pdus_cnt +=
1375 resp_cqe->task_stat.read_stat.num_data_outs; 1380 resp_cqe->task_stat.write_stat.num_data_outs;
1376 conn->r2t_pdus_cnt += 1381 conn->r2t_pdus_cnt +=
1377 resp_cqe->task_stat.read_stat.num_r2ts; 1382 resp_cqe->task_stat.write_stat.num_r2ts;
1378 conn->txdata_octets += 1383 conn->txdata_octets +=
1379 bnx2i_cmd->req.total_data_transfer_length; 1384 bnx2i_cmd->req.total_data_transfer_length;
1385 ADD_STATS_64(hba, tx_pdus,
1386 resp_cqe->task_stat.write_stat.num_data_outs);
1387 ADD_STATS_64(hba, tx_bytes,
1388 bnx2i_cmd->req.total_data_transfer_length);
1389 ADD_STATS_64(hba, rx_pdus,
1390 resp_cqe->task_stat.write_stat.num_r2ts);
1380 } 1391 }
1381 bnx2i_iscsi_unmap_sg_list(bnx2i_cmd); 1392 bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
1382 1393
@@ -1961,6 +1972,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1961{ 1972{
1962 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; 1973 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1963 struct iscsi_session *session = conn->session; 1974 struct iscsi_session *session = conn->session;
1975 struct bnx2i_hba *hba = bnx2i_conn->hba;
1964 struct qp_info *qp; 1976 struct qp_info *qp;
1965 struct bnx2i_nop_in_msg *nopin; 1977 struct bnx2i_nop_in_msg *nopin;
1966 int tgt_async_msg; 1978 int tgt_async_msg;
@@ -1973,7 +1985,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1973 1985
1974 if (!qp->cq_virt) { 1986 if (!qp->cq_virt) {
1975 printk(KERN_ALERT "bnx2i (%s): cq resr freed in bh execution!", 1987 printk(KERN_ALERT "bnx2i (%s): cq resr freed in bh execution!",
1976 bnx2i_conn->hba->netdev->name); 1988 hba->netdev->name);
1977 goto out; 1989 goto out;
1978 } 1990 }
1979 while (1) { 1991 while (1) {
@@ -1985,9 +1997,9 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1985 if (nopin->op_code == ISCSI_OP_NOOP_IN && 1997 if (nopin->op_code == ISCSI_OP_NOOP_IN &&
1986 nopin->itt == (u16) RESERVED_ITT) { 1998 nopin->itt == (u16) RESERVED_ITT) {
1987 printk(KERN_ALERT "bnx2i: Unsolicited " 1999 printk(KERN_ALERT "bnx2i: Unsolicited "
1988 "NOP-In detected for suspended " 2000 "NOP-In detected for suspended "
1989 "connection dev=%s!\n", 2001 "connection dev=%s!\n",
1990 bnx2i_conn->hba->netdev->name); 2002 hba->netdev->name);
1991 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); 2003 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1992 goto cqe_out; 2004 goto cqe_out;
1993 } 2005 }
@@ -2001,7 +2013,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
2001 /* Run the kthread engine only for data cmds 2013 /* Run the kthread engine only for data cmds
2002 All other cmds will be completed in this bh! */ 2014 All other cmds will be completed in this bh! */
2003 bnx2i_queue_scsi_cmd_resp(session, bnx2i_conn, nopin); 2015 bnx2i_queue_scsi_cmd_resp(session, bnx2i_conn, nopin);
2004 break; 2016 goto done;
2005 case ISCSI_OP_LOGIN_RSP: 2017 case ISCSI_OP_LOGIN_RSP:
2006 bnx2i_process_login_resp(session, bnx2i_conn, 2018 bnx2i_process_login_resp(session, bnx2i_conn,
2007 qp->cq_cons_qe); 2019 qp->cq_cons_qe);
@@ -2044,11 +2056,15 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
2044 printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n", 2056 printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
2045 nopin->op_code); 2057 nopin->op_code);
2046 } 2058 }
2059
2060 ADD_STATS_64(hba, rx_pdus, 1);
2061 ADD_STATS_64(hba, rx_bytes, nopin->data_length);
2062done:
2047 if (!tgt_async_msg) { 2063 if (!tgt_async_msg) {
2048 if (!atomic_read(&bnx2i_conn->ep->num_active_cmds)) 2064 if (!atomic_read(&bnx2i_conn->ep->num_active_cmds))
2049 printk(KERN_ALERT "bnx2i (%s): no active cmd! " 2065 printk(KERN_ALERT "bnx2i (%s): no active cmd! "
2050 "op 0x%x\n", 2066 "op 0x%x\n",
2051 bnx2i_conn->hba->netdev->name, 2067 hba->netdev->name,
2052 nopin->op_code); 2068 nopin->op_code);
2053 else 2069 else
2054 atomic_dec(&bnx2i_conn->ep->num_active_cmds); 2070 atomic_dec(&bnx2i_conn->ep->num_active_cmds);
@@ -2692,6 +2708,7 @@ struct cnic_ulp_ops bnx2i_cnic_cb = {
2692 .cm_remote_close = bnx2i_cm_remote_close, 2708 .cm_remote_close = bnx2i_cm_remote_close,
2693 .cm_remote_abort = bnx2i_cm_remote_abort, 2709 .cm_remote_abort = bnx2i_cm_remote_abort,
2694 .iscsi_nl_send_msg = bnx2i_send_nl_mesg, 2710 .iscsi_nl_send_msg = bnx2i_send_nl_mesg,
2711 .cnic_get_stats = bnx2i_get_stats,
2695 .owner = THIS_MODULE 2712 .owner = THIS_MODULE
2696}; 2713};
2697 2714
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 8b6816706ee..b17637aab9a 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -381,6 +381,46 @@ void bnx2i_ulp_exit(struct cnic_dev *dev)
381 381
382 382
383/** 383/**
384 * bnx2i_get_stats - Retrieve various statistic from iSCSI offload
385 * @handle: bnx2i_hba
386 *
387 * function callback exported via bnx2i - cnic driver interface to
388 * retrieve various iSCSI offload related statistics.
389 */
390int bnx2i_get_stats(void *handle)
391{
392 struct bnx2i_hba *hba = handle;
393 struct iscsi_stats_info *stats;
394
395 if (!hba)
396 return -EINVAL;
397
398 stats = (struct iscsi_stats_info *)hba->cnic->stats_addr;
399
400 if (!stats)
401 return -ENOMEM;
402
403 strlcpy(stats->version, DRV_MODULE_VERSION, sizeof(stats->version));
404 memcpy(stats->mac_add1 + 2, hba->cnic->mac_addr, ETH_ALEN);
405
406 stats->max_frame_size = hba->netdev->mtu;
407 stats->txq_size = hba->max_sqes;
408 stats->rxq_size = hba->max_cqes;
409
410 stats->txq_avg_depth = 0;
411 stats->rxq_avg_depth = 0;
412
413 GET_STATS_64(hba, stats, rx_pdus);
414 GET_STATS_64(hba, stats, rx_bytes);
415
416 GET_STATS_64(hba, stats, tx_pdus);
417 GET_STATS_64(hba, stats, tx_bytes);
418
419 return 0;
420}
421
422
423/**
384 * bnx2i_percpu_thread_create - Create a receive thread for an 424 * bnx2i_percpu_thread_create - Create a receive thread for an
385 * online CPU 425 * online CPU
386 * 426 *
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index f8d516b5316..b40ac017012 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -874,6 +874,11 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
874 hba->conn_ctx_destroy_tmo = 2 * HZ; 874 hba->conn_ctx_destroy_tmo = 2 * HZ;
875 } 875 }
876 876
877#ifdef CONFIG_32BIT
878 spin_lock_init(&hba->stat_lock);
879#endif
880 memset(&hba->stats, 0, sizeof(struct iscsi_stats_info));
881
877 if (iscsi_host_add(shost, &hba->pcidev->dev)) 882 if (iscsi_host_add(shost, &hba->pcidev->dev))
878 goto free_dump_mem; 883 goto free_dump_mem;
879 return hba; 884 return hba;
@@ -1181,12 +1186,18 @@ static int
1181bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) 1186bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
1182{ 1187{
1183 struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1188 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1189 struct bnx2i_hba *hba = bnx2i_conn->hba;
1184 struct bnx2i_cmd *cmd = task->dd_data; 1190 struct bnx2i_cmd *cmd = task->dd_data;
1185 1191
1186 memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN); 1192 memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
1187 1193
1188 bnx2i_setup_cmd_wqe_template(cmd); 1194 bnx2i_setup_cmd_wqe_template(cmd);
1189 bnx2i_conn->gen_pdu.req_buf_size = task->data_count; 1195 bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
1196
1197 /* Tx PDU/data length count */
1198 ADD_STATS_64(hba, tx_pdus, 1);
1199 ADD_STATS_64(hba, tx_bytes, task->data_count);
1200
1190 if (task->data_count) { 1201 if (task->data_count) {
1191 memcpy(bnx2i_conn->gen_pdu.req_buf, task->data, 1202 memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
1192 task->data_count); 1203 task->data_count);
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 36739da8bc1..49692a1ac44 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -966,7 +966,8 @@ static int init_act_open(struct cxgbi_sock *csk)
966 csk->saddr.sin_addr.s_addr = chba->ipv4addr; 966 csk->saddr.sin_addr.s_addr = chba->ipv4addr;
967 967
968 csk->rss_qid = 0; 968 csk->rss_qid = 0;
969 csk->l2t = t3_l2t_get(t3dev, dst, ndev); 969 csk->l2t = t3_l2t_get(t3dev, dst, ndev,
970 &csk->daddr.sin_addr.s_addr);
970 if (!csk->l2t) { 971 if (!csk->l2t) {
971 pr_err("NO l2t available.\n"); 972 pr_err("NO l2t available.\n");
972 return -EINVAL; 973 return -EINVAL;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 5a4a3bfc60c..cc9a06897f3 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1142,7 +1142,7 @@ static int init_act_open(struct cxgbi_sock *csk)
1142 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); 1142 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1143 cxgbi_sock_get(csk); 1143 cxgbi_sock_get(csk);
1144 1144
1145 n = dst_get_neighbour_noref(csk->dst); 1145 n = dst_neigh_lookup(csk->dst, &csk->daddr.sin_addr.s_addr);
1146 if (!n) { 1146 if (!n) {
1147 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name); 1147 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
1148 goto rel_resource; 1148 goto rel_resource;
@@ -1182,9 +1182,12 @@ static int init_act_open(struct cxgbi_sock *csk)
1182 1182
1183 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); 1183 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1184 send_act_open_req(csk, skb, csk->l2t); 1184 send_act_open_req(csk, skb, csk->l2t);
1185 neigh_release(n);
1185 return 0; 1186 return 0;
1186 1187
1187rel_resource: 1188rel_resource:
1189 if (n)
1190 neigh_release(n);
1188 if (skb) 1191 if (skb)
1189 __kfree_skb(skb); 1192 __kfree_skb(skb);
1190 return -EINVAL; 1193 return -EINVAL;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index d9253db1d0e..b44c1cff311 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -494,7 +494,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
494 goto err_out; 494 goto err_out;
495 } 495 }
496 dst = &rt->dst; 496 dst = &rt->dst;
497 n = dst_get_neighbour_noref(dst); 497 n = dst_neigh_lookup(dst, &daddr->sin_addr.s_addr);
498 if (!n) { 498 if (!n) {
499 err = -ENODEV; 499 err = -ENODEV;
500 goto rel_rt; 500 goto rel_rt;
@@ -506,7 +506,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
506 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), 506 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
507 ndev->name); 507 ndev->name);
508 err = -ENETUNREACH; 508 err = -ENETUNREACH;
509 goto rel_rt; 509 goto rel_neigh;
510 } 510 }
511 511
512 if (ndev->flags & IFF_LOOPBACK) { 512 if (ndev->flags & IFF_LOOPBACK) {
@@ -521,7 +521,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
521 pr_info("dst %pI4, %s, NOT cxgbi device.\n", 521 pr_info("dst %pI4, %s, NOT cxgbi device.\n",
522 &daddr->sin_addr.s_addr, ndev->name); 522 &daddr->sin_addr.s_addr, ndev->name);
523 err = -ENETUNREACH; 523 err = -ENETUNREACH;
524 goto rel_rt; 524 goto rel_neigh;
525 } 525 }
526 log_debug(1 << CXGBI_DBG_SOCK, 526 log_debug(1 << CXGBI_DBG_SOCK,
527 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n", 527 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n",
@@ -531,7 +531,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
531 csk = cxgbi_sock_create(cdev); 531 csk = cxgbi_sock_create(cdev);
532 if (!csk) { 532 if (!csk) {
533 err = -ENOMEM; 533 err = -ENOMEM;
534 goto rel_rt; 534 goto rel_neigh;
535 } 535 }
536 csk->cdev = cdev; 536 csk->cdev = cdev;
537 csk->port_id = port; 537 csk->port_id = port;
@@ -541,9 +541,13 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
541 csk->daddr.sin_port = daddr->sin_port; 541 csk->daddr.sin_port = daddr->sin_port;
542 csk->daddr.sin_family = daddr->sin_family; 542 csk->daddr.sin_family = daddr->sin_family;
543 csk->saddr.sin_addr.s_addr = fl4.saddr; 543 csk->saddr.sin_addr.s_addr = fl4.saddr;
544 neigh_release(n);
544 545
545 return csk; 546 return csk;
546 547
548rel_neigh:
549 neigh_release(n);
550
547rel_rt: 551rel_rt:
548 ip_rt_put(rt); 552 ip_rt_put(rt);
549 if (csk) 553 if (csk)
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index c77628afbf9..8818dd681c1 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -486,6 +486,10 @@ void
486scsi_netlink_init(void) 486scsi_netlink_init(void)
487{ 487{
488 int error; 488 int error;
489 struct netlink_kernel_cfg cfg = {
490 .input = scsi_nl_rcv_msg,
491 .groups = SCSI_NL_GRP_CNT,
492 };
489 493
490 INIT_LIST_HEAD(&scsi_nl_drivers); 494 INIT_LIST_HEAD(&scsi_nl_drivers);
491 495
@@ -497,8 +501,7 @@ scsi_netlink_init(void)
497 } 501 }
498 502
499 scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT, 503 scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT,
500 SCSI_NL_GRP_CNT, scsi_nl_rcv_msg, NULL, 504 THIS_MODULE, &cfg);
501 THIS_MODULE);
502 if (!scsi_nl_sock) { 505 if (!scsi_nl_sock) {
503 printk(KERN_ERR "%s: register of receive handler failed\n", 506 printk(KERN_ERR "%s: register of receive handler failed\n",
504 __func__); 507 __func__);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 1cf640e575d..6042954d8f3 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2936,7 +2936,10 @@ EXPORT_SYMBOL_GPL(iscsi_unregister_transport);
2936static __init int iscsi_transport_init(void) 2936static __init int iscsi_transport_init(void)
2937{ 2937{
2938 int err; 2938 int err;
2939 2939 struct netlink_kernel_cfg cfg = {
2940 .groups = 1,
2941 .input = iscsi_if_rx,
2942 };
2940 printk(KERN_INFO "Loading iSCSI transport class v%s.\n", 2943 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
2941 ISCSI_TRANSPORT_VERSION); 2944 ISCSI_TRANSPORT_VERSION);
2942 2945
@@ -2966,8 +2969,8 @@ static __init int iscsi_transport_init(void)
2966 if (err) 2969 if (err)
2967 goto unregister_conn_class; 2970 goto unregister_conn_class;
2968 2971
2969 nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx, 2972 nls = netlink_kernel_create(&init_net, NETLINK_ISCSI,
2970 NULL, THIS_MODULE); 2973 THIS_MODULE, &cfg);
2971 if (!nls) { 2974 if (!nls) {
2972 err = -ENOBUFS; 2975 err = -ENOBUFS;
2973 goto unregister_session_class; 2976 goto unregister_session_class;
diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
index f551e537614..266aa1648a0 100644
--- a/drivers/ssb/b43_pci_bridge.c
+++ b/drivers/ssb/b43_pci_bridge.c
@@ -36,6 +36,7 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = {
36 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4328) }, 36 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4328) },
37 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4329) }, 37 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4329) },
38 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432b) }, 38 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432b) },
39 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432c) },
39 { 0, }, 40 { 0, },
40}; 41};
41MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl); 42MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl);
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index 266c7c5c86d..ab4627cf111 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -90,6 +90,8 @@ const char *ssb_core_name(u16 coreid)
90 return "ARM 1176"; 90 return "ARM 1176";
91 case SSB_DEV_ARM_7TDMI: 91 case SSB_DEV_ARM_7TDMI:
92 return "ARM 7TDMI"; 92 return "ARM 7TDMI";
93 case SSB_DEV_ARM_CM3:
94 return "ARM Cortex M3";
93 } 95 }
94 return "UNKNOWN"; 96 return "UNKNOWN";
95} 97}
diff --git a/drivers/staging/gdm72xx/netlink_k.c b/drivers/staging/gdm72xx/netlink_k.c
index 51665132c61..87c3a07ed80 100644
--- a/drivers/staging/gdm72xx/netlink_k.c
+++ b/drivers/staging/gdm72xx/netlink_k.c
@@ -88,13 +88,15 @@ struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type,
88 void *msg, int len)) 88 void *msg, int len))
89{ 89{
90 struct sock *sock; 90 struct sock *sock;
91 struct netlink_kernel_cfg cfg = {
92 .input = netlink_rcv,
93 };
91 94
92#if !defined(DEFINE_MUTEX) 95#if !defined(DEFINE_MUTEX)
93 init_MUTEX(&netlink_mutex); 96 init_MUTEX(&netlink_mutex);
94#endif 97#endif
95 98
96 sock = netlink_kernel_create(&init_net, unit, 0, netlink_rcv, NULL, 99 sock = netlink_kernel_create(&init_net, unit, THIS_MODULE, &cfg);
97 THIS_MODULE);
98 100
99 if (sock) 101 if (sock)
100 rcv_cb = cb; 102 rcv_cb = cb;
@@ -127,8 +129,12 @@ int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
127 } 129 }
128 130
129 seq++; 131 seq++;
130 nlh = NLMSG_PUT(skb, 0, seq, type, len); 132 nlh = nlmsg_put(skb, 0, seq, type, len, 0);
131 memcpy(NLMSG_DATA(nlh), msg, len); 133 if (!nlh) {
134 kfree_skb(skb);
135 return -EMSGSIZE;
136 }
137 memcpy(nlmsg_data(nlh), msg, len);
132 138
133 NETLINK_CB(skb).pid = 0; 139 NETLINK_CB(skb).pid = 0;
134 NETLINK_CB(skb).dst_group = 0; 140 NETLINK_CB(skb).dst_group = 0;
@@ -144,7 +150,5 @@ int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
144 } 150 }
145 ret = 0; 151 ret = 0;
146 } 152 }
147
148nlmsg_failure:
149 return ret; 153 return ret;
150} 154}
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 8deaf6d050c..12334f9190c 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -70,6 +70,13 @@ struct bcma_host_ops {
70 70
71/* Core-ID values. */ 71/* Core-ID values. */
72#define BCMA_CORE_OOB_ROUTER 0x367 /* Out of band */ 72#define BCMA_CORE_OOB_ROUTER 0x367 /* Out of band */
73#define BCMA_CORE_4706_CHIPCOMMON 0x500
74#define BCMA_CORE_4706_SOC_RAM 0x50E
75#define BCMA_CORE_4706_MAC_GBIT 0x52D
76#define BCMA_CORE_AMEMC 0x52E /* DDR1/2 memory controller core */
77#define BCMA_CORE_ALTA 0x534 /* I2S core */
78#define BCMA_CORE_4706_MAC_GBIT_COMMON 0x5DC
79#define BCMA_CORE_DDR23_PHY 0x5DD
73#define BCMA_CORE_INVALID 0x700 80#define BCMA_CORE_INVALID 0x700
74#define BCMA_CORE_CHIPCOMMON 0x800 81#define BCMA_CORE_CHIPCOMMON 0x800
75#define BCMA_CORE_ILINE20 0x801 82#define BCMA_CORE_ILINE20 0x801
diff --git a/include/linux/can.h b/include/linux/can.h
index 9a19bcb3eea..018055efc03 100644
--- a/include/linux/can.h
+++ b/include/linux/can.h
@@ -21,7 +21,7 @@
21/* special address description flags for the CAN_ID */ 21/* special address description flags for the CAN_ID */
22#define CAN_EFF_FLAG 0x80000000U /* EFF/SFF is set in the MSB */ 22#define CAN_EFF_FLAG 0x80000000U /* EFF/SFF is set in the MSB */
23#define CAN_RTR_FLAG 0x40000000U /* remote transmission request */ 23#define CAN_RTR_FLAG 0x40000000U /* remote transmission request */
24#define CAN_ERR_FLAG 0x20000000U /* error frame */ 24#define CAN_ERR_FLAG 0x20000000U /* error message frame */
25 25
26/* valid bits in CAN ID for frame formats */ 26/* valid bits in CAN ID for frame formats */
27#define CAN_SFF_MASK 0x000007FFU /* standard frame format (SFF) */ 27#define CAN_SFF_MASK 0x000007FFU /* standard frame format (SFF) */
@@ -32,32 +32,84 @@
32 * Controller Area Network Identifier structure 32 * Controller Area Network Identifier structure
33 * 33 *
34 * bit 0-28 : CAN identifier (11/29 bit) 34 * bit 0-28 : CAN identifier (11/29 bit)
35 * bit 29 : error frame flag (0 = data frame, 1 = error frame) 35 * bit 29 : error message frame flag (0 = data frame, 1 = error message)
36 * bit 30 : remote transmission request flag (1 = rtr frame) 36 * bit 30 : remote transmission request flag (1 = rtr frame)
37 * bit 31 : frame format flag (0 = standard 11 bit, 1 = extended 29 bit) 37 * bit 31 : frame format flag (0 = standard 11 bit, 1 = extended 29 bit)
38 */ 38 */
39typedef __u32 canid_t; 39typedef __u32 canid_t;
40 40
41#define CAN_SFF_ID_BITS 11
42#define CAN_EFF_ID_BITS 29
43
41/* 44/*
42 * Controller Area Network Error Frame Mask structure 45 * Controller Area Network Error Message Frame Mask structure
43 * 46 *
44 * bit 0-28 : error class mask (see include/linux/can/error.h) 47 * bit 0-28 : error class mask (see include/linux/can/error.h)
45 * bit 29-31 : set to zero 48 * bit 29-31 : set to zero
46 */ 49 */
47typedef __u32 can_err_mask_t; 50typedef __u32 can_err_mask_t;
48 51
52/* CAN payload length and DLC definitions according to ISO 11898-1 */
53#define CAN_MAX_DLC 8
54#define CAN_MAX_DLEN 8
55
56/* CAN FD payload length and DLC definitions according to ISO 11898-7 */
57#define CANFD_MAX_DLC 15
58#define CANFD_MAX_DLEN 64
59
49/** 60/**
50 * struct can_frame - basic CAN frame structure 61 * struct can_frame - basic CAN frame structure
51 * @can_id: the CAN ID of the frame and CAN_*_FLAG flags, see above. 62 * @can_id: CAN ID of the frame and CAN_*_FLAG flags, see canid_t definition
52 * @can_dlc: the data length field of the CAN frame 63 * @can_dlc: frame payload length in byte (0 .. 8) aka data length code
53 * @data: the CAN frame payload. 64 * N.B. the DLC field from ISO 11898-1 Chapter 8.4.2.3 has a 1:1
65 * mapping of the 'data length code' to the real payload length
66 * @data: CAN frame payload (up to 8 byte)
54 */ 67 */
55struct can_frame { 68struct can_frame {
56 canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */ 69 canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
57 __u8 can_dlc; /* data length code: 0 .. 8 */ 70 __u8 can_dlc; /* frame payload length in byte (0 .. CAN_MAX_DLEN) */
58 __u8 data[8] __attribute__((aligned(8))); 71 __u8 data[CAN_MAX_DLEN] __attribute__((aligned(8)));
72};
73
74/*
75 * defined bits for canfd_frame.flags
76 *
77 * As the default for CAN FD should be to support the high data rate in the
78 * payload section of the frame (HDR) and to support up to 64 byte in the
79 * data section (EDL) the bits are only set in the non-default case.
80 * Btw. as long as there's no real implementation for CAN FD network driver
81 * these bits are only preliminary.
82 *
83 * RX: NOHDR/NOEDL - info about received CAN FD frame
84 * ESI - bit from originating CAN controller
85 * TX: NOHDR/NOEDL - control per-frame settings if supported by CAN controller
86 * ESI - bit is set by local CAN controller
87 */
88#define CANFD_NOHDR 0x01 /* frame without high data rate */
89#define CANFD_NOEDL 0x02 /* frame without extended data length */
90#define CANFD_ESI 0x04 /* error state indicator */
91
92/**
93 * struct canfd_frame - CAN flexible data rate frame structure
94 * @can_id: CAN ID of the frame and CAN_*_FLAG flags, see canid_t definition
95 * @len: frame payload length in byte (0 .. CANFD_MAX_DLEN)
96 * @flags: additional flags for CAN FD
97 * @__res0: reserved / padding
98 * @__res1: reserved / padding
99 * @data: CAN FD frame payload (up to CANFD_MAX_DLEN byte)
100 */
101struct canfd_frame {
102 canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
103 __u8 len; /* frame payload length in byte */
104 __u8 flags; /* additional flags for CAN FD */
105 __u8 __res0; /* reserved / padding */
106 __u8 __res1; /* reserved / padding */
107 __u8 data[CANFD_MAX_DLEN] __attribute__((aligned(8)));
59}; 108};
60 109
110#define CAN_MTU (sizeof(struct can_frame))
111#define CANFD_MTU (sizeof(struct canfd_frame))
112
61/* particular protocols of the protocol family PF_CAN */ 113/* particular protocols of the protocol family PF_CAN */
62#define CAN_RAW 1 /* RAW sockets */ 114#define CAN_RAW 1 /* RAW sockets */
63#define CAN_BCM 2 /* Broadcast Manager */ 115#define CAN_BCM 2 /* Broadcast Manager */
@@ -97,7 +149,7 @@ struct sockaddr_can {
97 * <received_can_id> & mask == can_id & mask 149 * <received_can_id> & mask == can_id & mask
98 * 150 *
99 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can 151 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
100 * filter for error frames (CAN_ERR_FLAG bit set in mask). 152 * filter for error message frames (CAN_ERR_FLAG bit set in mask).
101 */ 153 */
102struct can_filter { 154struct can_filter {
103 canid_t can_id; 155 canid_t can_id;
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index 0ccc1cd28b9..78c6c52073a 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -17,10 +17,10 @@
17#include <linux/skbuff.h> 17#include <linux/skbuff.h>
18#include <linux/netdevice.h> 18#include <linux/netdevice.h>
19 19
20#define CAN_VERSION "20090105" 20#define CAN_VERSION "20120528"
21 21
22/* increment this number each time you change some user-space interface */ 22/* increment this number each time you change some user-space interface */
23#define CAN_ABI_VERSION "8" 23#define CAN_ABI_VERSION "9"
24 24
25#define CAN_VERSION_STRING "rev " CAN_VERSION " abi " CAN_ABI_VERSION 25#define CAN_VERSION_STRING "rev " CAN_VERSION " abi " CAN_ABI_VERSION
26 26
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 5d2efe7e3f1..ee5a771fb20 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -61,23 +61,40 @@ struct can_priv {
61 * To be used in the CAN netdriver receive path to ensure conformance with 61 * To be used in the CAN netdriver receive path to ensure conformance with
62 * ISO 11898-1 Chapter 8.4.2.3 (DLC field) 62 * ISO 11898-1 Chapter 8.4.2.3 (DLC field)
63 */ 63 */
64#define get_can_dlc(i) (min_t(__u8, (i), 8)) 64#define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC))
65#define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC))
65 66
66/* Drop a given socketbuffer if it does not contain a valid CAN frame. */ 67/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
67static inline int can_dropped_invalid_skb(struct net_device *dev, 68static inline int can_dropped_invalid_skb(struct net_device *dev,
68 struct sk_buff *skb) 69 struct sk_buff *skb)
69{ 70{
70 const struct can_frame *cf = (struct can_frame *)skb->data; 71 const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
71 72
72 if (unlikely(skb->len != sizeof(*cf) || cf->can_dlc > 8)) { 73 if (skb->protocol == htons(ETH_P_CAN)) {
73 kfree_skb(skb); 74 if (unlikely(skb->len != CAN_MTU ||
74 dev->stats.tx_dropped++; 75 cfd->len > CAN_MAX_DLEN))
75 return 1; 76 goto inval_skb;
76 } 77 } else if (skb->protocol == htons(ETH_P_CANFD)) {
78 if (unlikely(skb->len != CANFD_MTU ||
79 cfd->len > CANFD_MAX_DLEN))
80 goto inval_skb;
81 } else
82 goto inval_skb;
77 83
78 return 0; 84 return 0;
85
86inval_skb:
87 kfree_skb(skb);
88 dev->stats.tx_dropped++;
89 return 1;
79} 90}
80 91
92/* get data length from can_dlc with sanitized can_dlc */
93u8 can_dlc2len(u8 can_dlc);
94
95/* map the sanitized data length to an appropriate data length code */
96u8 can_len2dlc(u8 len);
97
81struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max); 98struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max);
82void free_candev(struct net_device *dev); 99void free_candev(struct net_device *dev);
83 100
diff --git a/include/linux/can/error.h b/include/linux/can/error.h
index 63e855ea6b8..7b7148bded7 100644
--- a/include/linux/can/error.h
+++ b/include/linux/can/error.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/can/error.h 2 * linux/can/error.h
3 * 3 *
4 * Definitions of the CAN error frame to be filtered and passed to the user. 4 * Definitions of the CAN error messages to be filtered and passed to the user.
5 * 5 *
6 * Author: Oliver Hartkopp <oliver.hartkopp@volkswagen.de> 6 * Author: Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
7 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research 7 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
@@ -12,7 +12,7 @@
12#ifndef CAN_ERROR_H 12#ifndef CAN_ERROR_H
13#define CAN_ERROR_H 13#define CAN_ERROR_H
14 14
15#define CAN_ERR_DLC 8 /* dlc for error frames */ 15#define CAN_ERR_DLC 8 /* dlc for error message frames */
16 16
17/* error class (mask) in can_id */ 17/* error class (mask) in can_id */
18#define CAN_ERR_TX_TIMEOUT 0x00000001U /* TX timeout (by netdevice driver) */ 18#define CAN_ERR_TX_TIMEOUT 0x00000001U /* TX timeout (by netdevice driver) */
diff --git a/include/linux/can/raw.h b/include/linux/can/raw.h
index 781f3a3701b..a814062b071 100644
--- a/include/linux/can/raw.h
+++ b/include/linux/can/raw.h
@@ -23,7 +23,8 @@ enum {
23 CAN_RAW_FILTER = 1, /* set 0 .. n can_filter(s) */ 23 CAN_RAW_FILTER = 1, /* set 0 .. n can_filter(s) */
24 CAN_RAW_ERR_FILTER, /* set filter for error frames */ 24 CAN_RAW_ERR_FILTER, /* set filter for error frames */
25 CAN_RAW_LOOPBACK, /* local loopback (default:on) */ 25 CAN_RAW_LOOPBACK, /* local loopback (default:on) */
26 CAN_RAW_RECV_OWN_MSGS /* receive my own msgs (default:off) */ 26 CAN_RAW_RECV_OWN_MSGS, /* receive my own msgs (default:off) */
27 CAN_RAW_FD_FRAMES, /* allow CAN FD frames (default:off) */
27}; 28};
28 29
29#endif 30#endif
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 3d406e0ede6..98a27cccedf 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -138,6 +138,17 @@ static inline void random_ether_addr(u8 *addr)
138} 138}
139 139
140/** 140/**
141 * eth_broadcast_addr - Assign broadcast address
142 * @addr: Pointer to a six-byte array containing the Ethernet address
143 *
144 * Assign the broadcast address to the given address array.
145 */
146static inline void eth_broadcast_addr(u8 *addr)
147{
148 memset(addr, 0xff, ETH_ALEN);
149}
150
151/**
141 * eth_hw_addr_random - Generate software assigned random Ethernet and 152 * eth_hw_addr_random - Generate software assigned random Ethernet and
142 * set device flag 153 * set device flag
143 * @dev: pointer to net_device structure 154 * @dev: pointer to net_device structure
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index e17fa714058..21eff418091 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -137,6 +137,35 @@ struct ethtool_eeprom {
137}; 137};
138 138
139/** 139/**
140 * struct ethtool_eee - Energy Efficient Ethernet information
141 * @cmd: ETHTOOL_{G,S}EEE
142 * @supported: Mask of %SUPPORTED_* flags for the speed/duplex combinations
143 * for which there is EEE support.
144 * @advertised: Mask of %ADVERTISED_* flags for the speed/duplex combinations
145 * advertised as eee capable.
146 * @lp_advertised: Mask of %ADVERTISED_* flags for the speed/duplex
147 * combinations advertised by the link partner as eee capable.
148 * @eee_active: Result of the eee auto negotiation.
149 * @eee_enabled: EEE configured mode (enabled/disabled).
150 * @tx_lpi_enabled: Whether the interface should assert its tx lpi, given
151 * that eee was negotiated.
152 * @tx_lpi_timer: Time in microseconds the interface delays prior to asserting
153 * its tx lpi (after reaching 'idle' state). Effective only when eee
154 * was negotiated and tx_lpi_enabled was set.
155 */
156struct ethtool_eee {
157 __u32 cmd;
158 __u32 supported;
159 __u32 advertised;
160 __u32 lp_advertised;
161 __u32 eee_active;
162 __u32 eee_enabled;
163 __u32 tx_lpi_enabled;
164 __u32 tx_lpi_timer;
165 __u32 reserved[2];
166};
167
168/**
140 * struct ethtool_modinfo - plugin module eeprom information 169 * struct ethtool_modinfo - plugin module eeprom information
141 * @cmd: %ETHTOOL_GMODULEINFO 170 * @cmd: %ETHTOOL_GMODULEINFO
142 * @type: Standard the module information conforms to %ETH_MODULE_SFF_xxxx 171 * @type: Standard the module information conforms to %ETH_MODULE_SFF_xxxx
@@ -945,6 +974,8 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
945 * @get_module_info: Get the size and type of the eeprom contained within 974 * @get_module_info: Get the size and type of the eeprom contained within
946 * a plug-in module. 975 * a plug-in module.
947 * @get_module_eeprom: Get the eeprom information from the plug-in module 976 * @get_module_eeprom: Get the eeprom information from the plug-in module
977 * @get_eee: Get Energy-Efficient (EEE) supported and status.
978 * @set_eee: Set EEE status (enable/disable) as well as LPI timers.
948 * 979 *
949 * All operations are optional (i.e. the function pointer may be set 980 * All operations are optional (i.e. the function pointer may be set
950 * to %NULL) and callers must take this into account. Callers must 981 * to %NULL) and callers must take this into account. Callers must
@@ -1011,6 +1042,8 @@ struct ethtool_ops {
1011 struct ethtool_modinfo *); 1042 struct ethtool_modinfo *);
1012 int (*get_module_eeprom)(struct net_device *, 1043 int (*get_module_eeprom)(struct net_device *,
1013 struct ethtool_eeprom *, u8 *); 1044 struct ethtool_eeprom *, u8 *);
1045 int (*get_eee)(struct net_device *, struct ethtool_eee *);
1046 int (*set_eee)(struct net_device *, struct ethtool_eee *);
1014 1047
1015 1048
1016}; 1049};
@@ -1089,6 +1122,8 @@ struct ethtool_ops {
1089#define ETHTOOL_GET_TS_INFO 0x00000041 /* Get time stamping and PHC info */ 1122#define ETHTOOL_GET_TS_INFO 0x00000041 /* Get time stamping and PHC info */
1090#define ETHTOOL_GMODULEINFO 0x00000042 /* Get plug-in module information */ 1123#define ETHTOOL_GMODULEINFO 0x00000042 /* Get plug-in module information */
1091#define ETHTOOL_GMODULEEEPROM 0x00000043 /* Get plug-in module eeprom */ 1124#define ETHTOOL_GMODULEEEPROM 0x00000043 /* Get plug-in module eeprom */
1125#define ETHTOOL_GEEE 0x00000044 /* Get EEE settings */
1126#define ETHTOOL_SEEE 0x00000045 /* Set EEE settings */
1092 1127
1093/* compatibility with older code */ 1128/* compatibility with older code */
1094#define SPARC_ETH_GSET ETHTOOL_GSET 1129#define SPARC_ETH_GSET ETHTOOL_GSET
@@ -1118,6 +1153,10 @@ struct ethtool_ops {
1118#define SUPPORTED_10000baseR_FEC (1 << 20) 1153#define SUPPORTED_10000baseR_FEC (1 << 20)
1119#define SUPPORTED_20000baseMLD2_Full (1 << 21) 1154#define SUPPORTED_20000baseMLD2_Full (1 << 21)
1120#define SUPPORTED_20000baseKR2_Full (1 << 22) 1155#define SUPPORTED_20000baseKR2_Full (1 << 22)
1156#define SUPPORTED_40000baseKR4_Full (1 << 23)
1157#define SUPPORTED_40000baseCR4_Full (1 << 24)
1158#define SUPPORTED_40000baseSR4_Full (1 << 25)
1159#define SUPPORTED_40000baseLR4_Full (1 << 26)
1121 1160
1122/* Indicates what features are advertised by the interface. */ 1161/* Indicates what features are advertised by the interface. */
1123#define ADVERTISED_10baseT_Half (1 << 0) 1162#define ADVERTISED_10baseT_Half (1 << 0)
@@ -1143,6 +1182,10 @@ struct ethtool_ops {
1143#define ADVERTISED_10000baseR_FEC (1 << 20) 1182#define ADVERTISED_10000baseR_FEC (1 << 20)
1144#define ADVERTISED_20000baseMLD2_Full (1 << 21) 1183#define ADVERTISED_20000baseMLD2_Full (1 << 21)
1145#define ADVERTISED_20000baseKR2_Full (1 << 22) 1184#define ADVERTISED_20000baseKR2_Full (1 << 22)
1185#define ADVERTISED_40000baseKR4_Full (1 << 23)
1186#define ADVERTISED_40000baseCR4_Full (1 << 24)
1187#define ADVERTISED_40000baseSR4_Full (1 << 25)
1188#define ADVERTISED_40000baseLR4_Full (1 << 26)
1146 1189
1147/* The following are all involved in forcing a particular link 1190/* The following are all involved in forcing a particular link
1148 * mode for the device for setting things. When getting the 1191 * mode for the device for setting things. When getting the
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index ce9af891851..318fc1f705b 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -568,6 +568,26 @@ struct ieee80211s_hdr {
568#define MESH_FLAGS_PS_DEEP 0x4 568#define MESH_FLAGS_PS_DEEP 0x4
569 569
570/** 570/**
571 * enum ieee80211_preq_flags - mesh PREQ element flags
572 *
573 * @IEEE80211_PREQ_PROACTIVE_PREP_FLAG: proactive PREP subfield
574 */
575enum ieee80211_preq_flags {
576 IEEE80211_PREQ_PROACTIVE_PREP_FLAG = 1<<2,
577};
578
579/**
580 * enum ieee80211_preq_target_flags - mesh PREQ element per target flags
581 *
582 * @IEEE80211_PREQ_TO_FLAG: target only subfield
583 * @IEEE80211_PREQ_USN_FLAG: unknown target HWMP sequence number subfield
584 */
585enum ieee80211_preq_target_flags {
586 IEEE80211_PREQ_TO_FLAG = 1<<0,
587 IEEE80211_PREQ_USN_FLAG = 1<<2,
588};
589
590/**
571 * struct ieee80211_quiet_ie 591 * struct ieee80211_quiet_ie
572 * 592 *
573 * This structure refers to "Quiet information element" 593 * This structure refers to "Quiet information element"
@@ -1443,7 +1463,7 @@ enum ieee80211_tdls_actioncode {
1443 * 1463 *
1444 * @IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET: the default synchronization method 1464 * @IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET: the default synchronization method
1445 * @IEEE80211_SYNC_METHOD_VENDOR: a vendor specific synchronization method 1465 * @IEEE80211_SYNC_METHOD_VENDOR: a vendor specific synchronization method
1446 * that will be specified in a vendor specific information element 1466 * that will be specified in a vendor specific information element
1447 */ 1467 */
1448enum { 1468enum {
1449 IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET = 1, 1469 IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET = 1,
@@ -1455,7 +1475,7 @@ enum {
1455 * 1475 *
1456 * @IEEE80211_PATH_PROTOCOL_HWMP: the default path selection protocol 1476 * @IEEE80211_PATH_PROTOCOL_HWMP: the default path selection protocol
1457 * @IEEE80211_PATH_PROTOCOL_VENDOR: a vendor specific protocol that will 1477 * @IEEE80211_PATH_PROTOCOL_VENDOR: a vendor specific protocol that will
1458 * be specified in a vendor specific information element 1478 * be specified in a vendor specific information element
1459 */ 1479 */
1460enum { 1480enum {
1461 IEEE80211_PATH_PROTOCOL_HWMP = 1, 1481 IEEE80211_PATH_PROTOCOL_HWMP = 1,
@@ -1467,13 +1487,35 @@ enum {
1467 * 1487 *
1468 * @IEEE80211_PATH_METRIC_AIRTIME: the default path selection metric 1488 * @IEEE80211_PATH_METRIC_AIRTIME: the default path selection metric
1469 * @IEEE80211_PATH_METRIC_VENDOR: a vendor specific metric that will be 1489 * @IEEE80211_PATH_METRIC_VENDOR: a vendor specific metric that will be
1470 * specified in a vendor specific information element 1490 * specified in a vendor specific information element
1471 */ 1491 */
1472enum { 1492enum {
1473 IEEE80211_PATH_METRIC_AIRTIME = 1, 1493 IEEE80211_PATH_METRIC_AIRTIME = 1,
1474 IEEE80211_PATH_METRIC_VENDOR = 255, 1494 IEEE80211_PATH_METRIC_VENDOR = 255,
1475}; 1495};
1476 1496
1497/**
1498 * enum ieee80211_root_mode_identifier - root mesh STA mode identifier
1499 *
1500 * These attribute are used by dot11MeshHWMPRootMode to set root mesh STA mode
1501 *
1502 * @IEEE80211_ROOTMODE_NO_ROOT: the mesh STA is not a root mesh STA (default)
1503 * @IEEE80211_ROOTMODE_ROOT: the mesh STA is a root mesh STA if greater than
1504 * this value
1505 * @IEEE80211_PROACTIVE_PREQ_NO_PREP: the mesh STA is a root mesh STA supports
1506 * the proactive PREQ with proactive PREP subfield set to 0
1507 * @IEEE80211_PROACTIVE_PREQ_WITH_PREP: the mesh STA is a root mesh STA
1508 * supports the proactive PREQ with proactive PREP subfield set to 1
1509 * @IEEE80211_PROACTIVE_RANN: the mesh STA is a root mesh STA supports
1510 * the proactive RANN
1511 */
1512enum ieee80211_root_mode_identifier {
1513 IEEE80211_ROOTMODE_NO_ROOT = 0,
1514 IEEE80211_ROOTMODE_ROOT = 1,
1515 IEEE80211_PROACTIVE_PREQ_NO_PREP = 2,
1516 IEEE80211_PROACTIVE_PREQ_WITH_PREP = 3,
1517 IEEE80211_PROACTIVE_RANN = 4,
1518};
1477 1519
1478/* 1520/*
1479 * IEEE 802.11-2007 7.3.2.9 Country information element 1521 * IEEE 802.11-2007 7.3.2.9 Country information element
@@ -1589,6 +1631,10 @@ enum ieee80211_sa_query_action {
1589 1631
1590#define WLAN_OUI_WFA 0x506f9a 1632#define WLAN_OUI_WFA 0x506f9a
1591#define WLAN_OUI_TYPE_WFA_P2P 9 1633#define WLAN_OUI_TYPE_WFA_P2P 9
1634#define WLAN_OUI_MICROSOFT 0x0050f2
1635#define WLAN_OUI_TYPE_MICROSOFT_WPA 1
1636#define WLAN_OUI_TYPE_MICROSOFT_WMM 2
1637#define WLAN_OUI_TYPE_MICROSOFT_WPS 4
1592 1638
1593/* 1639/*
1594 * WMM/802.11e Tspec Element 1640 * WMM/802.11e Tspec Element
diff --git a/include/linux/if.h b/include/linux/if.h
index f995c663c49..1ec407b01e4 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -81,6 +81,8 @@
81#define IFF_UNICAST_FLT 0x20000 /* Supports unicast filtering */ 81#define IFF_UNICAST_FLT 0x20000 /* Supports unicast filtering */
82#define IFF_TEAM_PORT 0x40000 /* device used as team port */ 82#define IFF_TEAM_PORT 0x40000 /* device used as team port */
83#define IFF_SUPP_NOFCS 0x80000 /* device supports sending custom FCS */ 83#define IFF_SUPP_NOFCS 0x80000 /* device supports sending custom FCS */
84#define IFF_LIVE_ADDR_CHANGE 0x100000 /* device supports hardware address
85 * change when it's running */
84 86
85 87
86#define IF_GET_IFACE 0x0001 /* for querying only */ 88#define IF_GET_IFACE 0x0001 /* for querying only */
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 56d907a2c80..167ce5b363d 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -105,7 +105,8 @@
105#define ETH_P_WAN_PPP 0x0007 /* Dummy type for WAN PPP frames*/ 105#define ETH_P_WAN_PPP 0x0007 /* Dummy type for WAN PPP frames*/
106#define ETH_P_PPP_MP 0x0008 /* Dummy type for PPP MP frames */ 106#define ETH_P_PPP_MP 0x0008 /* Dummy type for PPP MP frames */
107#define ETH_P_LOCALTALK 0x0009 /* Localtalk pseudo type */ 107#define ETH_P_LOCALTALK 0x0009 /* Localtalk pseudo type */
108#define ETH_P_CAN 0x000C /* Controller Area Network */ 108#define ETH_P_CAN 0x000C /* CAN: Controller Area Network */
109#define ETH_P_CANFD 0x000D /* CANFD: CAN flexible data rate*/
109#define ETH_P_PPPTALK 0x0010 /* Dummy type for Atalk over PPP*/ 110#define ETH_P_PPPTALK 0x0010 /* Dummy type for Atalk over PPP*/
110#define ETH_P_TR_802_2 0x0011 /* 802.2 frames */ 111#define ETH_P_TR_802_2 0x0011 /* 802.2 frames */
111#define ETH_P_MOBITEX 0x0015 /* Mobitex (kaz@cafe.net) */ 112#define ETH_P_MOBITEX 0x0015 /* Mobitex (kaz@cafe.net) */
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 8185f57a9c7..99efd60fa8c 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -60,9 +60,11 @@ struct team_port {
60 unsigned int mtu; 60 unsigned int mtu;
61 } orig; 61 } orig;
62 62
63 struct rcu_head rcu; 63 long mode_priv[0];
64}; 64};
65 65
66extern bool team_port_enabled(struct team_port *port);
67
66struct team_mode_ops { 68struct team_mode_ops {
67 int (*init)(struct team *team); 69 int (*init)(struct team *team);
68 void (*exit)(struct team *team); 70 void (*exit)(struct team *team);
@@ -73,6 +75,8 @@ struct team_mode_ops {
73 int (*port_enter)(struct team *team, struct team_port *port); 75 int (*port_enter)(struct team *team, struct team_port *port);
74 void (*port_leave)(struct team *team, struct team_port *port); 76 void (*port_leave)(struct team *team, struct team_port *port);
75 void (*port_change_mac)(struct team *team, struct team_port *port); 77 void (*port_change_mac)(struct team *team, struct team_port *port);
78 void (*port_enabled)(struct team *team, struct team_port *port);
79 void (*port_disabled)(struct team *team, struct team_port *port);
76}; 80};
77 81
78enum team_option_type { 82enum team_option_type {
@@ -82,6 +86,11 @@ enum team_option_type {
82 TEAM_OPTION_TYPE_BOOL, 86 TEAM_OPTION_TYPE_BOOL,
83}; 87};
84 88
89struct team_option_inst_info {
90 u32 array_index;
91 struct team_port *port; /* != NULL if per-port */
92};
93
85struct team_gsetter_ctx { 94struct team_gsetter_ctx {
86 union { 95 union {
87 u32 u32_val; 96 u32 u32_val;
@@ -92,23 +101,28 @@ struct team_gsetter_ctx {
92 } bin_val; 101 } bin_val;
93 bool bool_val; 102 bool bool_val;
94 } data; 103 } data;
95 struct team_port *port; 104 struct team_option_inst_info *info;
96}; 105};
97 106
98struct team_option { 107struct team_option {
99 struct list_head list; 108 struct list_head list;
100 const char *name; 109 const char *name;
101 bool per_port; 110 bool per_port;
111 unsigned int array_size; /* != 0 means the option is array */
102 enum team_option_type type; 112 enum team_option_type type;
113 int (*init)(struct team *team, struct team_option_inst_info *info);
103 int (*getter)(struct team *team, struct team_gsetter_ctx *ctx); 114 int (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
104 int (*setter)(struct team *team, struct team_gsetter_ctx *ctx); 115 int (*setter)(struct team *team, struct team_gsetter_ctx *ctx);
105}; 116};
106 117
118extern void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info);
119extern void team_options_change_check(struct team *team);
120
107struct team_mode { 121struct team_mode {
108 struct list_head list;
109 const char *kind; 122 const char *kind;
110 struct module *owner; 123 struct module *owner;
111 size_t priv_size; 124 size_t priv_size;
125 size_t port_priv_size;
112 const struct team_mode_ops *ops; 126 const struct team_mode_ops *ops;
113}; 127};
114 128
@@ -178,8 +192,8 @@ extern int team_options_register(struct team *team,
178extern void team_options_unregister(struct team *team, 192extern void team_options_unregister(struct team *team,
179 const struct team_option *option, 193 const struct team_option *option,
180 size_t option_count); 194 size_t option_count);
181extern int team_mode_register(struct team_mode *mode); 195extern int team_mode_register(const struct team_mode *mode);
182extern int team_mode_unregister(struct team_mode *mode); 196extern void team_mode_unregister(const struct team_mode *mode);
183 197
184#endif /* __KERNEL__ */ 198#endif /* __KERNEL__ */
185 199
@@ -241,6 +255,7 @@ enum {
241 TEAM_ATTR_OPTION_DATA, /* dynamic */ 255 TEAM_ATTR_OPTION_DATA, /* dynamic */
242 TEAM_ATTR_OPTION_REMOVED, /* flag */ 256 TEAM_ATTR_OPTION_REMOVED, /* flag */
243 TEAM_ATTR_OPTION_PORT_IFINDEX, /* u32 */ /* for per-port options */ 257 TEAM_ATTR_OPTION_PORT_IFINDEX, /* u32 */ /* for per-port options */
258 TEAM_ATTR_OPTION_ARRAY_INDEX, /* u32 */ /* for array options */
244 259
245 __TEAM_ATTR_OPTION_MAX, 260 __TEAM_ATTR_OPTION_MAX,
246 TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1, 261 TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1,
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 597f4a9f324..67f9ddacb70 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -38,6 +38,7 @@ enum
38 IPV4_DEVCONF_ACCEPT_LOCAL, 38 IPV4_DEVCONF_ACCEPT_LOCAL,
39 IPV4_DEVCONF_SRC_VMARK, 39 IPV4_DEVCONF_SRC_VMARK,
40 IPV4_DEVCONF_PROXY_ARP_PVLAN, 40 IPV4_DEVCONF_PROXY_ARP_PVLAN,
41 IPV4_DEVCONF_ROUTE_LOCALNET,
41 __IPV4_DEVCONF_MAX 42 __IPV4_DEVCONF_MAX
42}; 43};
43 44
@@ -131,6 +132,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
131#define IN_DEV_PROMOTE_SECONDARIES(in_dev) \ 132#define IN_DEV_PROMOTE_SECONDARIES(in_dev) \
132 IN_DEV_ORCONF((in_dev), \ 133 IN_DEV_ORCONF((in_dev), \
133 PROMOTE_SECONDARIES) 134 PROMOTE_SECONDARIES)
135#define IN_DEV_ROUTE_LOCALNET(in_dev) IN_DEV_ORCONF(in_dev, ROUTE_LOCALNET)
134 136
135#define IN_DEV_RX_REDIRECTS(in_dev) \ 137#define IN_DEV_RX_REDIRECTS(in_dev) \
136 ((IN_DEV_FORWARD(in_dev) && \ 138 ((IN_DEV_FORWARD(in_dev) && \
diff --git a/include/linux/ks8851_mll.h b/include/linux/ks8851_mll.h
new file mode 100644
index 00000000000..e9ccfb59ed3
--- /dev/null
+++ b/include/linux/ks8851_mll.h
@@ -0,0 +1,33 @@
1/*
2 * ks8861_mll platform data struct definition
3 * Copyright (c) 2012 BTicino S.p.A.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19#ifndef _LINUX_KS8851_MLL_H
20#define _LINUX_KS8851_MLL_H
21
22#include <linux/if_ether.h>
23
24/**
25 * struct ks8851_mll_platform_data - Platform data of the KS8851_MLL network driver
26 * @macaddr: The MAC address of the device, set to all 0:s to use the on in
27 * the chip.
28 */
29struct ks8851_mll_platform_data {
30 u8 mac_addr[ETH_ALEN];
31};
32
33#endif
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index dfb947959ec..7cccafe50e7 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -43,7 +43,11 @@
43#define MDIO_PKGID2 15 43#define MDIO_PKGID2 15
44#define MDIO_AN_ADVERTISE 16 /* AN advertising (base page) */ 44#define MDIO_AN_ADVERTISE 16 /* AN advertising (base page) */
45#define MDIO_AN_LPA 19 /* AN LP abilities (base page) */ 45#define MDIO_AN_LPA 19 /* AN LP abilities (base page) */
46#define MDIO_PCS_EEE_ABLE 20 /* EEE Capability register */
47#define MDIO_PCS_EEE_WK_ERR 22 /* EEE wake error counter */
46#define MDIO_PHYXS_LNSTAT 24 /* PHY XGXS lane state */ 48#define MDIO_PHYXS_LNSTAT 24 /* PHY XGXS lane state */
49#define MDIO_AN_EEE_ADV 60 /* EEE advertisement */
50#define MDIO_AN_EEE_LPABLE 61 /* EEE link partner ability */
47 51
48/* Media-dependent registers. */ 52/* Media-dependent registers. */
49#define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */ 53#define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */
@@ -56,7 +60,6 @@
56#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */ 60#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */
57#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */ 61#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */
58#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */ 62#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */
59#define MDIO_AN_EEE_ADV 60 /* EEE advertisement */
60 63
61/* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */ 64/* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */
62#define MDIO_PMA_LASI_RXCTRL 0x9000 /* RX_ALARM control */ 65#define MDIO_PMA_LASI_RXCTRL 0x9000 /* RX_ALARM control */
@@ -82,6 +85,7 @@
82#define MDIO_AN_CTRL1_RESTART BMCR_ANRESTART 85#define MDIO_AN_CTRL1_RESTART BMCR_ANRESTART
83#define MDIO_AN_CTRL1_ENABLE BMCR_ANENABLE 86#define MDIO_AN_CTRL1_ENABLE BMCR_ANENABLE
84#define MDIO_AN_CTRL1_XNP 0x2000 /* Enable extended next page */ 87#define MDIO_AN_CTRL1_XNP 0x2000 /* Enable extended next page */
88#define MDIO_PCS_CTRL1_CLKSTOP_EN 0x400 /* Stop the clock during LPI */
85 89
86/* 10 Gb/s */ 90/* 10 Gb/s */
87#define MDIO_CTRL1_SPEED10G (MDIO_CTRL1_SPEEDSELEXT | 0x00) 91#define MDIO_CTRL1_SPEED10G (MDIO_CTRL1_SPEEDSELEXT | 0x00)
@@ -237,9 +241,25 @@
237#define MDIO_AN_10GBT_STAT_MS 0x4000 /* Master/slave config */ 241#define MDIO_AN_10GBT_STAT_MS 0x4000 /* Master/slave config */
238#define MDIO_AN_10GBT_STAT_MSFLT 0x8000 /* Master/slave config fault */ 242#define MDIO_AN_10GBT_STAT_MSFLT 0x8000 /* Master/slave config fault */
239 243
240/* AN EEE Advertisement register. */ 244/* EEE Supported/Advertisement/LP Advertisement registers.
241#define MDIO_AN_EEE_ADV_100TX 0x0002 /* Advertise 100TX EEE cap */ 245 *
242#define MDIO_AN_EEE_ADV_1000T 0x0004 /* Advertise 1000T EEE cap */ 246 * EEE capability Register (3.20), Advertisement (7.60) and
247 * Link partner ability (7.61) registers have and can use the same identical
248 * bit masks.
249 */
250#define MDIO_AN_EEE_ADV_100TX 0x0002 /* Advertise 100TX EEE cap */
251#define MDIO_AN_EEE_ADV_1000T 0x0004 /* Advertise 1000T EEE cap */
252/* Note: the two defines above can be potentially used by the user-land
253 * and cannot remove them now.
254 * So, we define the new generic MDIO_EEE_100TX and MDIO_EEE_1000T macros
255 * using the previous ones (that can be considered obsolete).
256 */
257#define MDIO_EEE_100TX MDIO_AN_EEE_ADV_100TX /* 100TX EEE cap */
258#define MDIO_EEE_1000T MDIO_AN_EEE_ADV_1000T /* 1000T EEE cap */
259#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */
260#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */
261#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */
262#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */
243 263
244/* LASI RX_ALARM control/status registers. */ 264/* LASI RX_ALARM control/status registers. */
245#define MDIO_PMA_LASI_RX_PHYXSLFLT 0x0001 /* PHY XS RX local fault */ 265#define MDIO_PMA_LASI_RX_PHYXSLFLT 0x0001 /* PHY XS RX local fault */
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 2783eca629a..8ef3a7a1159 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -21,6 +21,8 @@
21#define MII_EXPANSION 0x06 /* Expansion register */ 21#define MII_EXPANSION 0x06 /* Expansion register */
22#define MII_CTRL1000 0x09 /* 1000BASE-T control */ 22#define MII_CTRL1000 0x09 /* 1000BASE-T control */
23#define MII_STAT1000 0x0a /* 1000BASE-T status */ 23#define MII_STAT1000 0x0a /* 1000BASE-T status */
24#define MII_MMD_CTRL 0x0d /* MMD Access Control Register */
25#define MII_MMD_DATA 0x0e /* MMD Access Data Register */
24#define MII_ESTATUS 0x0f /* Extended Status */ 26#define MII_ESTATUS 0x0f /* Extended Status */
25#define MII_DCOUNTER 0x12 /* Disconnect counter */ 27#define MII_DCOUNTER 0x12 /* Disconnect counter */
26#define MII_FCSCOUNTER 0x13 /* False carrier counter */ 28#define MII_FCSCOUNTER 0x13 /* False carrier counter */
@@ -141,6 +143,13 @@
141#define FLOW_CTRL_TX 0x01 143#define FLOW_CTRL_TX 0x01
142#define FLOW_CTRL_RX 0x02 144#define FLOW_CTRL_RX 0x02
143 145
146/* MMD Access Control register fields */
147#define MII_MMD_CTRL_DEVAD_MASK 0x1f /* Mask MMD DEVAD*/
148#define MII_MMD_CTRL_ADDR 0x0000 /* Address */
149#define MII_MMD_CTRL_NOINCR 0x4000 /* no post increment */
150#define MII_MMD_CTRL_INCR_RDWT 0x8000 /* post increment on reads & writes */
151#define MII_MMD_CTRL_INCR_ON_WT 0xC000 /* post increment on writes only */
152
144/* This structure is used in all SIOCxMIIxxx ioctl calls */ 153/* This structure is used in all SIOCxMIIxxx ioctl calls */
145struct mii_ioctl_data { 154struct mii_ioctl_data {
146 __u16 phy_id; 155 __u16 phy_id;
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 1f3860a8a10..26069518625 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -154,6 +154,10 @@ enum {
154 /* set port opcode modifiers */ 154 /* set port opcode modifiers */
155 MLX4_SET_PORT_PRIO2TC = 0x8, 155 MLX4_SET_PORT_PRIO2TC = 0x8,
156 MLX4_SET_PORT_SCHEDULER = 0x9, 156 MLX4_SET_PORT_SCHEDULER = 0x9,
157
158 /* register/delete flow steering network rules */
159 MLX4_QP_FLOW_STEERING_ATTACH = 0x65,
160 MLX4_QP_FLOW_STEERING_DETACH = 0x66,
157}; 161};
158 162
159enum { 163enum {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 6a8f002b8ed..6f0d133cc7a 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -70,6 +70,36 @@ enum {
70 MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1) 70 MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1)
71}; 71};
72 72
73/* Driver supports 3 diffrent device methods to manage traffic steering:
74 * -device managed - High level API for ib and eth flow steering. FW is
75 * managing flow steering tables.
76 * - B0 steering mode - Common low level API for ib and (if supported) eth.
77 * - A0 steering mode - Limited low level API for eth. In case of IB,
78 * B0 mode is in use.
79 */
80enum {
81 MLX4_STEERING_MODE_A0,
82 MLX4_STEERING_MODE_B0,
83 MLX4_STEERING_MODE_DEVICE_MANAGED
84};
85
86static inline const char *mlx4_steering_mode_str(int steering_mode)
87{
88 switch (steering_mode) {
89 case MLX4_STEERING_MODE_A0:
90 return "A0 steering";
91
92 case MLX4_STEERING_MODE_B0:
93 return "B0 steering";
94
95 case MLX4_STEERING_MODE_DEVICE_MANAGED:
96 return "Device managed flow steering";
97
98 default:
99 return "Unrecognize steering mode";
100 }
101}
102
73enum { 103enum {
74 MLX4_DEV_CAP_FLAG_RC = 1LL << 0, 104 MLX4_DEV_CAP_FLAG_RC = 1LL << 0,
75 MLX4_DEV_CAP_FLAG_UC = 1LL << 1, 105 MLX4_DEV_CAP_FLAG_UC = 1LL << 1,
@@ -102,7 +132,8 @@ enum {
102enum { 132enum {
103 MLX4_DEV_CAP_FLAG2_RSS = 1LL << 0, 133 MLX4_DEV_CAP_FLAG2_RSS = 1LL << 0,
104 MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1, 134 MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1,
105 MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2 135 MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2,
136 MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3
106}; 137};
107 138
108#define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) 139#define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
@@ -295,6 +326,8 @@ struct mlx4_caps {
295 int num_amgms; 326 int num_amgms;
296 int reserved_mcgs; 327 int reserved_mcgs;
297 int num_qp_per_mgm; 328 int num_qp_per_mgm;
329 int steering_mode;
330 int fs_log_max_ucast_qp_range_size;
298 int num_pds; 331 int num_pds;
299 int reserved_pds; 332 int reserved_pds;
300 int max_xrcds; 333 int max_xrcds;
@@ -509,6 +542,8 @@ struct mlx4_dev {
509 u8 rev_id; 542 u8 rev_id;
510 char board_id[MLX4_BOARD_ID_LEN]; 543 char board_id[MLX4_BOARD_ID_LEN];
511 int num_vfs; 544 int num_vfs;
545 u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
546 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
512}; 547};
513 548
514struct mlx4_init_port_param { 549struct mlx4_init_port_param {
@@ -623,9 +658,99 @@ int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
623int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 658int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
624 enum mlx4_protocol prot); 659 enum mlx4_protocol prot);
625int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 660int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
626 int block_mcast_loopback, enum mlx4_protocol protocol); 661 u8 port, int block_mcast_loopback,
662 enum mlx4_protocol protocol, u64 *reg_id);
627int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 663int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
628 enum mlx4_protocol protocol); 664 enum mlx4_protocol protocol, u64 reg_id);
665
666enum {
667 MLX4_DOMAIN_UVERBS = 0x1000,
668 MLX4_DOMAIN_ETHTOOL = 0x2000,
669 MLX4_DOMAIN_RFS = 0x3000,
670 MLX4_DOMAIN_NIC = 0x5000,
671};
672
673enum mlx4_net_trans_rule_id {
674 MLX4_NET_TRANS_RULE_ID_ETH = 0,
675 MLX4_NET_TRANS_RULE_ID_IB,
676 MLX4_NET_TRANS_RULE_ID_IPV6,
677 MLX4_NET_TRANS_RULE_ID_IPV4,
678 MLX4_NET_TRANS_RULE_ID_TCP,
679 MLX4_NET_TRANS_RULE_ID_UDP,
680 MLX4_NET_TRANS_RULE_NUM, /* should be last */
681};
682
683enum mlx4_net_trans_promisc_mode {
684 MLX4_FS_PROMISC_NONE = 0,
685 MLX4_FS_PROMISC_UPLINK,
686 /* For future use. Not implemented yet */
687 MLX4_FS_PROMISC_FUNCTION_PORT,
688 MLX4_FS_PROMISC_ALL_MULTI,
689};
690
691struct mlx4_spec_eth {
692 u8 dst_mac[6];
693 u8 dst_mac_msk[6];
694 u8 src_mac[6];
695 u8 src_mac_msk[6];
696 u8 ether_type_enable;
697 __be16 ether_type;
698 __be16 vlan_id_msk;
699 __be16 vlan_id;
700};
701
702struct mlx4_spec_tcp_udp {
703 __be16 dst_port;
704 __be16 dst_port_msk;
705 __be16 src_port;
706 __be16 src_port_msk;
707};
708
709struct mlx4_spec_ipv4 {
710 __be32 dst_ip;
711 __be32 dst_ip_msk;
712 __be32 src_ip;
713 __be32 src_ip_msk;
714};
715
716struct mlx4_spec_ib {
717 __be32 r_qpn;
718 __be32 qpn_msk;
719 u8 dst_gid[16];
720 u8 dst_gid_msk[16];
721};
722
723struct mlx4_spec_list {
724 struct list_head list;
725 enum mlx4_net_trans_rule_id id;
726 union {
727 struct mlx4_spec_eth eth;
728 struct mlx4_spec_ib ib;
729 struct mlx4_spec_ipv4 ipv4;
730 struct mlx4_spec_tcp_udp tcp_udp;
731 };
732};
733
734enum mlx4_net_trans_hw_rule_queue {
735 MLX4_NET_TRANS_Q_FIFO,
736 MLX4_NET_TRANS_Q_LIFO,
737};
738
739struct mlx4_net_trans_rule {
740 struct list_head list;
741 enum mlx4_net_trans_hw_rule_queue queue_mode;
742 bool exclusive;
743 bool allow_loopback;
744 enum mlx4_net_trans_promisc_mode promisc_mode;
745 u8 port;
746 u16 priority;
747 u32 qpn;
748};
749
750int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
751 enum mlx4_net_trans_promisc_mode mode);
752int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
753 enum mlx4_net_trans_promisc_mode mode);
629int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port); 754int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
630int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port); 755int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
631int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port); 756int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
@@ -668,4 +793,8 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
668int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); 793int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
669void mlx4_counter_free(struct mlx4_dev *dev, u32 idx); 794void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
670 795
796int mlx4_flow_attach(struct mlx4_dev *dev,
797 struct mlx4_net_trans_rule *rule, u64 *reg_id);
798int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id);
799
671#endif /* MLX4_DEVICE_H */ 800#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d94cb143151..ab0251d541a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1046,10 +1046,9 @@ struct net_device {
1046 */ 1046 */
1047 char name[IFNAMSIZ]; 1047 char name[IFNAMSIZ];
1048 1048
1049 struct pm_qos_request pm_qos_req; 1049 /* device name hash chain, please keep it close to name[] */
1050
1051 /* device name hash chain */
1052 struct hlist_node name_hlist; 1050 struct hlist_node name_hlist;
1051
1053 /* snmp alias */ 1052 /* snmp alias */
1054 char *ifalias; 1053 char *ifalias;
1055 1054
@@ -1322,6 +1321,8 @@ struct net_device {
1322 1321
1323 /* group the device belongs to */ 1322 /* group the device belongs to */
1324 int group; 1323 int group;
1324
1325 struct pm_qos_request pm_qos_req;
1325}; 1326};
1326#define to_net_dev(d) container_of(d, struct net_device, dev) 1327#define to_net_dev(d) container_of(d, struct net_device, dev)
1327 1328
@@ -1626,6 +1627,7 @@ extern int dev_alloc_name(struct net_device *dev, const char *name);
1626extern int dev_open(struct net_device *dev); 1627extern int dev_open(struct net_device *dev);
1627extern int dev_close(struct net_device *dev); 1628extern int dev_close(struct net_device *dev);
1628extern void dev_disable_lro(struct net_device *dev); 1629extern void dev_disable_lro(struct net_device *dev);
1630extern int dev_loopback_xmit(struct sk_buff *newskb);
1629extern int dev_queue_xmit(struct sk_buff *skb); 1631extern int dev_queue_xmit(struct sk_buff *skb);
1630extern int register_netdevice(struct net_device *dev); 1632extern int register_netdevice(struct net_device *dev);
1631extern void unregister_netdevice_queue(struct net_device *dev, 1633extern void unregister_netdevice_queue(struct net_device *dev,
@@ -2117,6 +2119,9 @@ static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2117#endif 2119#endif
2118} 2120}
2119 2121
2122#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
2123extern int netif_get_num_default_rss_queues(void);
2124
2120/* Use this variant when it is known for sure that it 2125/* Use this variant when it is known for sure that it
2121 * is executing from hardware interrupt context or with hardware interrupts 2126 * is executing from hardware interrupt context or with hardware interrupts
2122 * disabled. 2127 * disabled.
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index ff9c84c29b2..c613cf0d788 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -94,6 +94,16 @@ static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
94 a1->all[3] == a2->all[3]; 94 a1->all[3] == a2->all[3];
95} 95}
96 96
97static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
98 union nf_inet_addr *result,
99 const union nf_inet_addr *mask)
100{
101 result->all[0] = a1->all[0] & mask->all[0];
102 result->all[1] = a1->all[1] & mask->all[1];
103 result->all[2] = a1->all[2] & mask->all[2];
104 result->all[3] = a1->all[3] & mask->all[3];
105}
106
97extern void netfilter_init(void); 107extern void netfilter_init(void);
98 108
99/* Largest hook number + 1 */ 109/* Largest hook number + 1 */
@@ -383,6 +393,22 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
383extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu; 393extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu;
384extern void nf_ct_attach(struct sk_buff *, struct sk_buff *); 394extern void nf_ct_attach(struct sk_buff *, struct sk_buff *);
385extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu; 395extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
396
397struct nf_conn;
398struct nlattr;
399
400struct nfq_ct_hook {
401 size_t (*build_size)(const struct nf_conn *ct);
402 int (*build)(struct sk_buff *skb, struct nf_conn *ct);
403 int (*parse)(const struct nlattr *attr, struct nf_conn *ct);
404};
405extern struct nfq_ct_hook __rcu *nfq_ct_hook;
406
407struct nfq_ct_nat_hook {
408 void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
409 u32 ctinfo, int off);
410};
411extern struct nfq_ct_nat_hook __rcu *nfq_ct_nat_hook;
386#else 412#else
387static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} 413static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
388#endif 414#endif
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
index 1697036336b..874ae8f2706 100644
--- a/include/linux/netfilter/Kbuild
+++ b/include/linux/netfilter/Kbuild
@@ -10,6 +10,7 @@ header-y += nfnetlink.h
10header-y += nfnetlink_acct.h 10header-y += nfnetlink_acct.h
11header-y += nfnetlink_compat.h 11header-y += nfnetlink_compat.h
12header-y += nfnetlink_conntrack.h 12header-y += nfnetlink_conntrack.h
13header-y += nfnetlink_cthelper.h
13header-y += nfnetlink_cttimeout.h 14header-y += nfnetlink_cttimeout.h
14header-y += nfnetlink_log.h 15header-y += nfnetlink_log.h
15header-y += nfnetlink_queue.h 16header-y += nfnetlink_queue.h
diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h
index 0ce91d56a5f..0dfc8b7210a 100644
--- a/include/linux/netfilter/nf_conntrack_sip.h
+++ b/include/linux/netfilter/nf_conntrack_sip.h
@@ -2,6 +2,8 @@
2#define __NF_CONNTRACK_SIP_H__ 2#define __NF_CONNTRACK_SIP_H__
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#include <net/netfilter/nf_conntrack_expect.h>
6
5#define SIP_PORT 5060 7#define SIP_PORT 5060
6#define SIP_TIMEOUT 3600 8#define SIP_TIMEOUT 3600
7 9
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index a1048c1587d..18341cdb244 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -50,7 +50,8 @@ struct nfgenmsg {
50#define NFNL_SUBSYS_IPSET 6 50#define NFNL_SUBSYS_IPSET 6
51#define NFNL_SUBSYS_ACCT 7 51#define NFNL_SUBSYS_ACCT 7
52#define NFNL_SUBSYS_CTNETLINK_TIMEOUT 8 52#define NFNL_SUBSYS_CTNETLINK_TIMEOUT 8
53#define NFNL_SUBSYS_COUNT 9 53#define NFNL_SUBSYS_CTHELPER 9
54#define NFNL_SUBSYS_COUNT 10
54 55
55#ifdef __KERNEL__ 56#ifdef __KERNEL__
56 57
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h
index e58e4b93c10..f649f7423ca 100644
--- a/include/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/linux/netfilter/nfnetlink_conntrack.h
@@ -7,6 +7,8 @@ enum cntl_msg_types {
7 IPCTNL_MSG_CT_GET, 7 IPCTNL_MSG_CT_GET,
8 IPCTNL_MSG_CT_DELETE, 8 IPCTNL_MSG_CT_DELETE,
9 IPCTNL_MSG_CT_GET_CTRZERO, 9 IPCTNL_MSG_CT_GET_CTRZERO,
10 IPCTNL_MSG_CT_GET_STATS_CPU,
11 IPCTNL_MSG_CT_GET_STATS,
10 12
11 IPCTNL_MSG_MAX 13 IPCTNL_MSG_MAX
12}; 14};
@@ -15,6 +17,7 @@ enum ctnl_exp_msg_types {
15 IPCTNL_MSG_EXP_NEW, 17 IPCTNL_MSG_EXP_NEW,
16 IPCTNL_MSG_EXP_GET, 18 IPCTNL_MSG_EXP_GET,
17 IPCTNL_MSG_EXP_DELETE, 19 IPCTNL_MSG_EXP_DELETE,
20 IPCTNL_MSG_EXP_GET_STATS_CPU,
18 21
19 IPCTNL_MSG_EXP_MAX 22 IPCTNL_MSG_EXP_MAX
20}; 23};
@@ -191,6 +194,7 @@ enum ctattr_expect_nat {
191enum ctattr_help { 194enum ctattr_help {
192 CTA_HELP_UNSPEC, 195 CTA_HELP_UNSPEC,
193 CTA_HELP_NAME, 196 CTA_HELP_NAME,
197 CTA_HELP_INFO,
194 __CTA_HELP_MAX 198 __CTA_HELP_MAX
195}; 199};
196#define CTA_HELP_MAX (__CTA_HELP_MAX - 1) 200#define CTA_HELP_MAX (__CTA_HELP_MAX - 1)
@@ -202,4 +206,39 @@ enum ctattr_secctx {
202}; 206};
203#define CTA_SECCTX_MAX (__CTA_SECCTX_MAX - 1) 207#define CTA_SECCTX_MAX (__CTA_SECCTX_MAX - 1)
204 208
209enum ctattr_stats_cpu {
210 CTA_STATS_UNSPEC,
211 CTA_STATS_SEARCHED,
212 CTA_STATS_FOUND,
213 CTA_STATS_NEW,
214 CTA_STATS_INVALID,
215 CTA_STATS_IGNORE,
216 CTA_STATS_DELETE,
217 CTA_STATS_DELETE_LIST,
218 CTA_STATS_INSERT,
219 CTA_STATS_INSERT_FAILED,
220 CTA_STATS_DROP,
221 CTA_STATS_EARLY_DROP,
222 CTA_STATS_ERROR,
223 CTA_STATS_SEARCH_RESTART,
224 __CTA_STATS_MAX,
225};
226#define CTA_STATS_MAX (__CTA_STATS_MAX - 1)
227
228enum ctattr_stats_global {
229 CTA_STATS_GLOBAL_UNSPEC,
230 CTA_STATS_GLOBAL_ENTRIES,
231 __CTA_STATS_GLOBAL_MAX,
232};
233#define CTA_STATS_GLOBAL_MAX (__CTA_STATS_GLOBAL_MAX - 1)
234
235enum ctattr_expect_stats {
236 CTA_STATS_EXP_UNSPEC,
237 CTA_STATS_EXP_NEW,
238 CTA_STATS_EXP_CREATE,
239 CTA_STATS_EXP_DELETE,
240 __CTA_STATS_EXP_MAX,
241};
242#define CTA_STATS_EXP_MAX (__CTA_STATS_EXP_MAX - 1)
243
205#endif /* _IPCONNTRACK_NETLINK_H */ 244#endif /* _IPCONNTRACK_NETLINK_H */
diff --git a/include/linux/netfilter/nfnetlink_cthelper.h b/include/linux/netfilter/nfnetlink_cthelper.h
new file mode 100644
index 00000000000..33659f6fad3
--- /dev/null
+++ b/include/linux/netfilter/nfnetlink_cthelper.h
@@ -0,0 +1,55 @@
1#ifndef _NFNL_CTHELPER_H_
2#define _NFNL_CTHELPER_H_
3
4#define NFCT_HELPER_STATUS_DISABLED 0
5#define NFCT_HELPER_STATUS_ENABLED 1
6
7enum nfnl_acct_msg_types {
8 NFNL_MSG_CTHELPER_NEW,
9 NFNL_MSG_CTHELPER_GET,
10 NFNL_MSG_CTHELPER_DEL,
11 NFNL_MSG_CTHELPER_MAX
12};
13
14enum nfnl_cthelper_type {
15 NFCTH_UNSPEC,
16 NFCTH_NAME,
17 NFCTH_TUPLE,
18 NFCTH_QUEUE_NUM,
19 NFCTH_POLICY,
20 NFCTH_PRIV_DATA_LEN,
21 NFCTH_STATUS,
22 __NFCTH_MAX
23};
24#define NFCTH_MAX (__NFCTH_MAX - 1)
25
26enum nfnl_cthelper_policy_type {
27 NFCTH_POLICY_SET_UNSPEC,
28 NFCTH_POLICY_SET_NUM,
29 NFCTH_POLICY_SET,
30 NFCTH_POLICY_SET1 = NFCTH_POLICY_SET,
31 NFCTH_POLICY_SET2,
32 NFCTH_POLICY_SET3,
33 NFCTH_POLICY_SET4,
34 __NFCTH_POLICY_SET_MAX
35};
36#define NFCTH_POLICY_SET_MAX (__NFCTH_POLICY_SET_MAX - 1)
37
38enum nfnl_cthelper_pol_type {
39 NFCTH_POLICY_UNSPEC,
40 NFCTH_POLICY_NAME,
41 NFCTH_POLICY_EXPECT_MAX,
42 NFCTH_POLICY_EXPECT_TIMEOUT,
43 __NFCTH_POLICY_MAX
44};
45#define NFCTH_POLICY_MAX (__NFCTH_POLICY_MAX - 1)
46
47enum nfnl_cthelper_tuple_type {
48 NFCTH_TUPLE_UNSPEC,
49 NFCTH_TUPLE_L3PROTONUM,
50 NFCTH_TUPLE_L4PROTONUM,
51 __NFCTH_TUPLE_MAX,
52};
53#define NFCTH_TUPLE_MAX (__NFCTH_TUPLE_MAX - 1)
54
55#endif /* _NFNL_CTHELPER_H */
diff --git a/include/linux/netfilter/nfnetlink_queue.h b/include/linux/netfilter/nfnetlink_queue.h
index 24b32e6c009..3b1c1360aed 100644
--- a/include/linux/netfilter/nfnetlink_queue.h
+++ b/include/linux/netfilter/nfnetlink_queue.h
@@ -42,6 +42,8 @@ enum nfqnl_attr_type {
42 NFQA_IFINDEX_PHYSOUTDEV, /* __u32 ifindex */ 42 NFQA_IFINDEX_PHYSOUTDEV, /* __u32 ifindex */
43 NFQA_HWADDR, /* nfqnl_msg_packet_hw */ 43 NFQA_HWADDR, /* nfqnl_msg_packet_hw */
44 NFQA_PAYLOAD, /* opaque data payload */ 44 NFQA_PAYLOAD, /* opaque data payload */
45 NFQA_CT, /* nf_conntrack_netlink.h */
46 NFQA_CT_INFO, /* enum ip_conntrack_info */
45 47
46 __NFQA_MAX 48 __NFQA_MAX
47}; 49};
@@ -84,8 +86,15 @@ enum nfqnl_attr_config {
84 NFQA_CFG_CMD, /* nfqnl_msg_config_cmd */ 86 NFQA_CFG_CMD, /* nfqnl_msg_config_cmd */
85 NFQA_CFG_PARAMS, /* nfqnl_msg_config_params */ 87 NFQA_CFG_PARAMS, /* nfqnl_msg_config_params */
86 NFQA_CFG_QUEUE_MAXLEN, /* __u32 */ 88 NFQA_CFG_QUEUE_MAXLEN, /* __u32 */
89 NFQA_CFG_MASK, /* identify which flags to change */
90 NFQA_CFG_FLAGS, /* value of these flags (__u32) */
87 __NFQA_CFG_MAX 91 __NFQA_CFG_MAX
88}; 92};
89#define NFQA_CFG_MAX (__NFQA_CFG_MAX-1) 93#define NFQA_CFG_MAX (__NFQA_CFG_MAX-1)
90 94
95/* Flags for NFQA_CFG_FLAGS */
96#define NFQA_CFG_F_FAIL_OPEN (1 << 0)
97#define NFQA_CFG_F_CONNTRACK (1 << 1)
98#define NFQA_CFG_F_MAX (1 << 2)
99
91#endif /* _NFNETLINK_QUEUE_H */ 100#endif /* _NFNETLINK_QUEUE_H */
diff --git a/include/linux/netfilter/xt_connlimit.h b/include/linux/netfilter/xt_connlimit.h
index d1366f05d1b..f1656096121 100644
--- a/include/linux/netfilter/xt_connlimit.h
+++ b/include/linux/netfilter/xt_connlimit.h
@@ -22,13 +22,8 @@ struct xt_connlimit_info {
22#endif 22#endif
23 }; 23 };
24 unsigned int limit; 24 unsigned int limit;
25 union { 25 /* revision 1 */
26 /* revision 0 */ 26 __u32 flags;
27 unsigned int inverse;
28
29 /* revision 1 */
30 __u32 flags;
31 };
32 27
33 /* Used internally by the kernel */ 28 /* Used internally by the kernel */
34 struct xt_connlimit_data *data __attribute__((aligned(8))); 29 struct xt_connlimit_data *data __attribute__((aligned(8)));
diff --git a/include/linux/netfilter/xt_recent.h b/include/linux/netfilter/xt_recent.h
index 83318e01425..6ef36c113e8 100644
--- a/include/linux/netfilter/xt_recent.h
+++ b/include/linux/netfilter/xt_recent.h
@@ -32,4 +32,14 @@ struct xt_recent_mtinfo {
32 __u8 side; 32 __u8 side;
33}; 33};
34 34
35struct xt_recent_mtinfo_v1 {
36 __u32 seconds;
37 __u32 hit_count;
38 __u8 check_set;
39 __u8 invert;
40 char name[XT_RECENT_NAME_LEN];
41 __u8 side;
42 union nf_inet_addr mask;
43};
44
35#endif /* _LINUX_NETFILTER_XT_RECENT_H */ 45#endif /* _LINUX_NETFILTER_XT_RECENT_H */
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index fa0946c549d..e2b12801378 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -66,6 +66,7 @@ enum nf_ip_hook_priorities {
66 NF_IP_PRI_SECURITY = 50, 66 NF_IP_PRI_SECURITY = 50,
67 NF_IP_PRI_NAT_SRC = 100, 67 NF_IP_PRI_NAT_SRC = 100,
68 NF_IP_PRI_SELINUX_LAST = 225, 68 NF_IP_PRI_SELINUX_LAST = 225,
69 NF_IP_PRI_CONNTRACK_HELPER = 300,
69 NF_IP_PRI_CONNTRACK_CONFIRM = INT_MAX, 70 NF_IP_PRI_CONNTRACK_CONFIRM = INT_MAX,
70 NF_IP_PRI_LAST = INT_MAX, 71 NF_IP_PRI_LAST = INT_MAX,
71}; 72};
diff --git a/include/linux/netfilter_ipv4/Kbuild b/include/linux/netfilter_ipv4/Kbuild
index c61b8fb1a9e..8ba0c5b72ea 100644
--- a/include/linux/netfilter_ipv4/Kbuild
+++ b/include/linux/netfilter_ipv4/Kbuild
@@ -5,7 +5,6 @@ header-y += ipt_LOG.h
5header-y += ipt_REJECT.h 5header-y += ipt_REJECT.h
6header-y += ipt_TTL.h 6header-y += ipt_TTL.h
7header-y += ipt_ULOG.h 7header-y += ipt_ULOG.h
8header-y += ipt_addrtype.h
9header-y += ipt_ah.h 8header-y += ipt_ah.h
10header-y += ipt_ecn.h 9header-y += ipt_ecn.h
11header-y += ipt_ttl.h 10header-y += ipt_ttl.h
diff --git a/include/linux/netfilter_ipv4/ipt_addrtype.h b/include/linux/netfilter_ipv4/ipt_addrtype.h
deleted file mode 100644
index 0da42237c8d..00000000000
--- a/include/linux/netfilter_ipv4/ipt_addrtype.h
+++ /dev/null
@@ -1,27 +0,0 @@
1#ifndef _IPT_ADDRTYPE_H
2#define _IPT_ADDRTYPE_H
3
4#include <linux/types.h>
5
6enum {
7 IPT_ADDRTYPE_INVERT_SOURCE = 0x0001,
8 IPT_ADDRTYPE_INVERT_DEST = 0x0002,
9 IPT_ADDRTYPE_LIMIT_IFACE_IN = 0x0004,
10 IPT_ADDRTYPE_LIMIT_IFACE_OUT = 0x0008,
11};
12
13struct ipt_addrtype_info_v1 {
14 __u16 source; /* source-type mask */
15 __u16 dest; /* dest-type mask */
16 __u32 flags;
17};
18
19/* revision 0 */
20struct ipt_addrtype_info {
21 __u16 source; /* source-type mask */
22 __u16 dest; /* dest-type mask */
23 __u32 invert_source;
24 __u32 invert_dest;
25};
26
27#endif
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 57c025127f1..7c8a513ce7a 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -71,6 +71,7 @@ enum nf_ip6_hook_priorities {
71 NF_IP6_PRI_SECURITY = 50, 71 NF_IP6_PRI_SECURITY = 50,
72 NF_IP6_PRI_NAT_SRC = 100, 72 NF_IP6_PRI_NAT_SRC = 100,
73 NF_IP6_PRI_SELINUX_LAST = 225, 73 NF_IP6_PRI_SELINUX_LAST = 225,
74 NF_IP6_PRI_CONNTRACK_HELPER = 300,
74 NF_IP6_PRI_LAST = INT_MAX, 75 NF_IP6_PRI_LAST = INT_MAX,
75}; 76};
76 77
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 0f628ffa420..f74dd133788 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -174,11 +174,17 @@ struct netlink_skb_parms {
174extern void netlink_table_grab(void); 174extern void netlink_table_grab(void);
175extern void netlink_table_ungrab(void); 175extern void netlink_table_ungrab(void);
176 176
177extern struct sock *netlink_kernel_create(struct net *net, 177/* optional Netlink kernel configuration parameters */
178 int unit,unsigned int groups, 178struct netlink_kernel_cfg {
179 void (*input)(struct sk_buff *skb), 179 unsigned int groups;
180 struct mutex *cb_mutex, 180 void (*input)(struct sk_buff *skb);
181 struct module *module); 181 struct mutex *cb_mutex;
182 void (*bind)(int group);
183};
184
185extern struct sock *netlink_kernel_create(struct net *net, int unit,
186 struct module *module,
187 struct netlink_kernel_cfg *cfg);
182extern void netlink_kernel_release(struct sock *sk); 188extern void netlink_kernel_release(struct sock *sk);
183extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups); 189extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
184extern int netlink_change_ngroups(struct sock *sk, unsigned int groups); 190extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
@@ -241,14 +247,6 @@ struct netlink_notify {
241struct nlmsghdr * 247struct nlmsghdr *
242__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags); 248__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags);
243 249
244#define NLMSG_NEW(skb, pid, seq, type, len, flags) \
245({ if (unlikely(skb_tailroom(skb) < (int)NLMSG_SPACE(len))) \
246 goto nlmsg_failure; \
247 __nlmsg_put(skb, pid, seq, type, len, flags); })
248
249#define NLMSG_PUT(skb, pid, seq, type, len) \
250 NLMSG_NEW(skb, pid, seq, type, len, 0)
251
252struct netlink_dump_control { 250struct netlink_dump_control {
253 int (*dump)(struct sk_buff *skb, struct netlink_callback *); 251 int (*dump)(struct sk_buff *skb, struct netlink_callback *);
254 int (*done)(struct netlink_callback*); 252 int (*done)(struct netlink_callback*);
diff --git a/include/linux/nfc.h b/include/linux/nfc.h
index 0ae9b5857c8..f4e6dd915b1 100644
--- a/include/linux/nfc.h
+++ b/include/linux/nfc.h
@@ -56,6 +56,10 @@
56 * %NFC_ATTR_PROTOCOLS) 56 * %NFC_ATTR_PROTOCOLS)
57 * @NFC_EVENT_DEVICE_REMOVED: event emitted when a device is removed 57 * @NFC_EVENT_DEVICE_REMOVED: event emitted when a device is removed
58 * (it sends %NFC_ATTR_DEVICE_INDEX) 58 * (it sends %NFC_ATTR_DEVICE_INDEX)
59 * @NFC_EVENT_TM_ACTIVATED: event emitted when the adapter is activated in
60 * target mode.
61 * @NFC_EVENT_DEVICE_DEACTIVATED: event emitted when the adapter is deactivated
62 * from target mode.
59 */ 63 */
60enum nfc_commands { 64enum nfc_commands {
61 NFC_CMD_UNSPEC, 65 NFC_CMD_UNSPEC,
@@ -71,6 +75,8 @@ enum nfc_commands {
71 NFC_EVENT_DEVICE_ADDED, 75 NFC_EVENT_DEVICE_ADDED,
72 NFC_EVENT_DEVICE_REMOVED, 76 NFC_EVENT_DEVICE_REMOVED,
73 NFC_EVENT_TARGET_LOST, 77 NFC_EVENT_TARGET_LOST,
78 NFC_EVENT_TM_ACTIVATED,
79 NFC_EVENT_TM_DEACTIVATED,
74/* private: internal use only */ 80/* private: internal use only */
75 __NFC_CMD_AFTER_LAST 81 __NFC_CMD_AFTER_LAST
76}; 82};
@@ -94,6 +100,8 @@ enum nfc_commands {
94 * @NFC_ATTR_TARGET_SENSF_RES: NFC-F targets extra information, max 18 bytes 100 * @NFC_ATTR_TARGET_SENSF_RES: NFC-F targets extra information, max 18 bytes
95 * @NFC_ATTR_COMM_MODE: Passive or active mode 101 * @NFC_ATTR_COMM_MODE: Passive or active mode
96 * @NFC_ATTR_RF_MODE: Initiator or target 102 * @NFC_ATTR_RF_MODE: Initiator or target
103 * @NFC_ATTR_IM_PROTOCOLS: Initiator mode protocols to poll for
104 * @NFC_ATTR_TM_PROTOCOLS: Target mode protocols to listen for
97 */ 105 */
98enum nfc_attrs { 106enum nfc_attrs {
99 NFC_ATTR_UNSPEC, 107 NFC_ATTR_UNSPEC,
@@ -109,6 +117,8 @@ enum nfc_attrs {
109 NFC_ATTR_COMM_MODE, 117 NFC_ATTR_COMM_MODE,
110 NFC_ATTR_RF_MODE, 118 NFC_ATTR_RF_MODE,
111 NFC_ATTR_DEVICE_POWERED, 119 NFC_ATTR_DEVICE_POWERED,
120 NFC_ATTR_IM_PROTOCOLS,
121 NFC_ATTR_TM_PROTOCOLS,
112/* private: internal use only */ 122/* private: internal use only */
113 __NFC_ATTR_AFTER_LAST 123 __NFC_ATTR_AFTER_LAST
114}; 124};
@@ -118,6 +128,7 @@ enum nfc_attrs {
118#define NFC_NFCID1_MAXSIZE 10 128#define NFC_NFCID1_MAXSIZE 10
119#define NFC_SENSB_RES_MAXSIZE 12 129#define NFC_SENSB_RES_MAXSIZE 12
120#define NFC_SENSF_RES_MAXSIZE 18 130#define NFC_SENSF_RES_MAXSIZE 18
131#define NFC_GB_MAXSIZE 48
121 132
122/* NFC protocols */ 133/* NFC protocols */
123#define NFC_PROTO_JEWEL 1 134#define NFC_PROTO_JEWEL 1
@@ -135,6 +146,7 @@ enum nfc_attrs {
135/* NFC RF modes */ 146/* NFC RF modes */
136#define NFC_RF_INITIATOR 0 147#define NFC_RF_INITIATOR 0
137#define NFC_RF_TARGET 1 148#define NFC_RF_TARGET 1
149#define NFC_RF_NONE 2
138 150
139/* NFC protocols masks used in bitsets */ 151/* NFC protocols masks used in bitsets */
140#define NFC_PROTO_JEWEL_MASK (1 << NFC_PROTO_JEWEL) 152#define NFC_PROTO_JEWEL_MASK (1 << NFC_PROTO_JEWEL)
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index a6959f72745..c0fc5d27733 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -170,6 +170,8 @@
170 * %NL80211_ATTR_CIPHER_GROUP, %NL80211_ATTR_WPA_VERSIONS, 170 * %NL80211_ATTR_CIPHER_GROUP, %NL80211_ATTR_WPA_VERSIONS,
171 * %NL80211_ATTR_AKM_SUITES, %NL80211_ATTR_PRIVACY, 171 * %NL80211_ATTR_AKM_SUITES, %NL80211_ATTR_PRIVACY,
172 * %NL80211_ATTR_AUTH_TYPE and %NL80211_ATTR_INACTIVITY_TIMEOUT. 172 * %NL80211_ATTR_AUTH_TYPE and %NL80211_ATTR_INACTIVITY_TIMEOUT.
173 * The channel to use can be set on the interface or be given using the
174 * %NL80211_ATTR_WIPHY_FREQ and %NL80211_ATTR_WIPHY_CHANNEL_TYPE attrs.
173 * @NL80211_CMD_NEW_BEACON: old alias for %NL80211_CMD_START_AP 175 * @NL80211_CMD_NEW_BEACON: old alias for %NL80211_CMD_START_AP
174 * @NL80211_CMD_STOP_AP: Stop AP operation on the given interface 176 * @NL80211_CMD_STOP_AP: Stop AP operation on the given interface
175 * @NL80211_CMD_DEL_BEACON: old alias for %NL80211_CMD_STOP_AP 177 * @NL80211_CMD_DEL_BEACON: old alias for %NL80211_CMD_STOP_AP
@@ -275,6 +277,12 @@
275 * @NL80211_CMD_NEW_SURVEY_RESULTS: survey data notification (as a reply to 277 * @NL80211_CMD_NEW_SURVEY_RESULTS: survey data notification (as a reply to
276 * NL80211_CMD_GET_SURVEY and on the "scan" multicast group) 278 * NL80211_CMD_GET_SURVEY and on the "scan" multicast group)
277 * 279 *
280 * @NL80211_CMD_SET_PMKSA: Add a PMKSA cache entry, using %NL80211_ATTR_MAC
281 * (for the BSSID) and %NL80211_ATTR_PMKID.
282 * @NL80211_CMD_DEL_PMKSA: Delete a PMKSA cache entry, using %NL80211_ATTR_MAC
283 * (for the BSSID) and %NL80211_ATTR_PMKID.
284 * @NL80211_CMD_FLUSH_PMKSA: Flush all PMKSA cache entries.
285 *
278 * @NL80211_CMD_REG_CHANGE: indicates to userspace the regulatory domain 286 * @NL80211_CMD_REG_CHANGE: indicates to userspace the regulatory domain
279 * has been changed and provides details of the request information 287 * has been changed and provides details of the request information
280 * that caused the change such as who initiated the regulatory request 288 * that caused the change such as who initiated the regulatory request
@@ -454,6 +462,10 @@
454 * the frame. 462 * the frame.
455 * @NL80211_CMD_ACTION_TX_STATUS: Alias for @NL80211_CMD_FRAME_TX_STATUS for 463 * @NL80211_CMD_ACTION_TX_STATUS: Alias for @NL80211_CMD_FRAME_TX_STATUS for
456 * backward compatibility. 464 * backward compatibility.
465 *
466 * @NL80211_CMD_SET_POWER_SAVE: Set powersave, using %NL80211_ATTR_PS_STATE
467 * @NL80211_CMD_GET_POWER_SAVE: Get powersave status in %NL80211_ATTR_PS_STATE
468 *
457 * @NL80211_CMD_SET_CQM: Connection quality monitor configuration. This command 469 * @NL80211_CMD_SET_CQM: Connection quality monitor configuration. This command
458 * is used to configure connection quality monitoring notification trigger 470 * is used to configure connection quality monitoring notification trigger
459 * levels. 471 * levels.
@@ -769,6 +781,13 @@ enum nl80211_commands {
769 * section 7.3.2.25.1, e.g. 0x000FAC04) 781 * section 7.3.2.25.1, e.g. 0x000FAC04)
770 * @NL80211_ATTR_KEY_SEQ: transmit key sequence number (IV/PN) for TKIP and 782 * @NL80211_ATTR_KEY_SEQ: transmit key sequence number (IV/PN) for TKIP and
771 * CCMP keys, each six bytes in little endian 783 * CCMP keys, each six bytes in little endian
784 * @NL80211_ATTR_KEY_DEFAULT: Flag attribute indicating the key is default key
785 * @NL80211_ATTR_KEY_DEFAULT_MGMT: Flag attribute indicating the key is the
786 * default management key
787 * @NL80211_ATTR_CIPHER_SUITES_PAIRWISE: For crypto settings for connect or
788 * other commands, indicates which pairwise cipher suites are used
789 * @NL80211_ATTR_CIPHER_SUITE_GROUP: For crypto settings for connect or
790 * other commands, indicates which group cipher suite is used
772 * 791 *
773 * @NL80211_ATTR_BEACON_INTERVAL: beacon interval in TU 792 * @NL80211_ATTR_BEACON_INTERVAL: beacon interval in TU
774 * @NL80211_ATTR_DTIM_PERIOD: DTIM period for beaconing 793 * @NL80211_ATTR_DTIM_PERIOD: DTIM period for beaconing
@@ -1004,6 +1023,8 @@ enum nl80211_commands {
1004 * @NL80211_ATTR_ACK: Flag attribute indicating that the frame was 1023 * @NL80211_ATTR_ACK: Flag attribute indicating that the frame was
1005 * acknowledged by the recipient. 1024 * acknowledged by the recipient.
1006 * 1025 *
1026 * @NL80211_ATTR_PS_STATE: powersave state, using &enum nl80211_ps_state values.
1027 *
1007 * @NL80211_ATTR_CQM: connection quality monitor configuration in a 1028 * @NL80211_ATTR_CQM: connection quality monitor configuration in a
1008 * nested attribute with %NL80211_ATTR_CQM_* sub-attributes. 1029 * nested attribute with %NL80211_ATTR_CQM_* sub-attributes.
1009 * 1030 *
@@ -1061,7 +1082,7 @@ enum nl80211_commands {
1061 * flag isn't set, the frame will be rejected. This is also used as an 1082 * flag isn't set, the frame will be rejected. This is also used as an
1062 * nl80211 capability flag. 1083 * nl80211 capability flag.
1063 * 1084 *
1064 * @NL80211_ATTR_BSS_HTOPMODE: HT operation mode (u16) 1085 * @NL80211_ATTR_BSS_HT_OPMODE: HT operation mode (u16)
1065 * 1086 *
1066 * @NL80211_ATTR_KEY_DEFAULT_TYPES: A nested attribute containing flags 1087 * @NL80211_ATTR_KEY_DEFAULT_TYPES: A nested attribute containing flags
1067 * attributes, specifying what a key should be set as default as. 1088 * attributes, specifying what a key should be set as default as.
@@ -1085,10 +1106,10 @@ enum nl80211_commands {
1085 * indicate which WoW triggers should be enabled. This is also 1106 * indicate which WoW triggers should be enabled. This is also
1086 * used by %NL80211_CMD_GET_WOWLAN to get the currently enabled WoWLAN 1107 * used by %NL80211_CMD_GET_WOWLAN to get the currently enabled WoWLAN
1087 * triggers. 1108 * triggers.
1088 1109 *
1089 * @NL80211_ATTR_SCHED_SCAN_INTERVAL: Interval between scheduled scan 1110 * @NL80211_ATTR_SCHED_SCAN_INTERVAL: Interval between scheduled scan
1090 * cycles, in msecs. 1111 * cycles, in msecs.
1091 1112 *
1092 * @NL80211_ATTR_SCHED_SCAN_MATCH: Nested attribute with one or more 1113 * @NL80211_ATTR_SCHED_SCAN_MATCH: Nested attribute with one or more
1093 * sets of attributes to match during scheduled scans. Only BSSs 1114 * sets of attributes to match during scheduled scans. Only BSSs
1094 * that match any of the sets will be reported. These are 1115 * that match any of the sets will be reported. These are
@@ -1115,7 +1136,7 @@ enum nl80211_commands {
1115 * are managed in software: interfaces of these types aren't subject to 1136 * are managed in software: interfaces of these types aren't subject to
1116 * any restrictions in their number or combinations. 1137 * any restrictions in their number or combinations.
1117 * 1138 *
1118 * @%NL80211_ATTR_REKEY_DATA: nested attribute containing the information 1139 * @NL80211_ATTR_REKEY_DATA: nested attribute containing the information
1119 * necessary for GTK rekeying in the device, see &enum nl80211_rekey_data. 1140 * necessary for GTK rekeying in the device, see &enum nl80211_rekey_data.
1120 * 1141 *
1121 * @NL80211_ATTR_SCAN_SUPP_RATES: rates per to be advertised as supported in scan, 1142 * @NL80211_ATTR_SCAN_SUPP_RATES: rates per to be advertised as supported in scan,
@@ -1182,7 +1203,6 @@ enum nl80211_commands {
1182 * @NL80211_ATTR_FEATURE_FLAGS: This u32 attribute contains flags from 1203 * @NL80211_ATTR_FEATURE_FLAGS: This u32 attribute contains flags from
1183 * &enum nl80211_feature_flags and is advertised in wiphy information. 1204 * &enum nl80211_feature_flags and is advertised in wiphy information.
1184 * @NL80211_ATTR_PROBE_RESP_OFFLOAD: Indicates that the HW responds to probe 1205 * @NL80211_ATTR_PROBE_RESP_OFFLOAD: Indicates that the HW responds to probe
1185 *
1186 * requests while operating in AP-mode. 1206 * requests while operating in AP-mode.
1187 * This attribute holds a bitmap of the supported protocols for 1207 * This attribute holds a bitmap of the supported protocols for
1188 * offloading (see &enum nl80211_probe_resp_offload_support_attr). 1208 * offloading (see &enum nl80211_probe_resp_offload_support_attr).
@@ -1520,6 +1540,11 @@ enum nl80211_attrs {
1520#define NL80211_MAX_NR_CIPHER_SUITES 5 1540#define NL80211_MAX_NR_CIPHER_SUITES 5
1521#define NL80211_MAX_NR_AKM_SUITES 2 1541#define NL80211_MAX_NR_AKM_SUITES 2
1522 1542
1543#define NL80211_MIN_REMAIN_ON_CHANNEL_TIME 10
1544
1545/* default RSSI threshold for scan results if none specified. */
1546#define NL80211_SCAN_RSSI_THOLD_OFF -300
1547
1523/** 1548/**
1524 * enum nl80211_iftype - (virtual) interface types 1549 * enum nl80211_iftype - (virtual) interface types
1525 * 1550 *
@@ -1952,6 +1977,8 @@ enum nl80211_reg_rule_attr {
1952 * @__NL80211_SCHED_SCAN_MATCH_ATTR_INVALID: attribute number 0 is reserved 1977 * @__NL80211_SCHED_SCAN_MATCH_ATTR_INVALID: attribute number 0 is reserved
1953 * @NL80211_SCHED_SCAN_MATCH_ATTR_SSID: SSID to be used for matching, 1978 * @NL80211_SCHED_SCAN_MATCH_ATTR_SSID: SSID to be used for matching,
1954 * only report BSS with matching SSID. 1979 * only report BSS with matching SSID.
1980 * @NL80211_SCHED_SCAN_MATCH_ATTR_RSSI: RSSI threshold (in dBm) for reporting a
1981 * BSS in scan results. Filtering is turned off if not specified.
1955 * @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter 1982 * @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter
1956 * attribute number currently defined 1983 * attribute number currently defined
1957 * @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use 1984 * @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use
@@ -1959,7 +1986,8 @@ enum nl80211_reg_rule_attr {
1959enum nl80211_sched_scan_match_attr { 1986enum nl80211_sched_scan_match_attr {
1960 __NL80211_SCHED_SCAN_MATCH_ATTR_INVALID, 1987 __NL80211_SCHED_SCAN_MATCH_ATTR_INVALID,
1961 1988
1962 NL80211_ATTR_SCHED_SCAN_MATCH_SSID, 1989 NL80211_SCHED_SCAN_MATCH_ATTR_SSID,
1990 NL80211_SCHED_SCAN_MATCH_ATTR_RSSI,
1963 1991
1964 /* keep last */ 1992 /* keep last */
1965 __NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST, 1993 __NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST,
@@ -1967,6 +1995,9 @@ enum nl80211_sched_scan_match_attr {
1967 __NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST - 1 1995 __NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST - 1
1968}; 1996};
1969 1997
1998/* only for backward compatibility */
1999#define NL80211_ATTR_SCHED_SCAN_MATCH_SSID NL80211_SCHED_SCAN_MATCH_ATTR_SSID
2000
1970/** 2001/**
1971 * enum nl80211_reg_rule_flags - regulatory rule flags 2002 * enum nl80211_reg_rule_flags - regulatory rule flags
1972 * 2003 *
@@ -2086,78 +2117,91 @@ enum nl80211_mntr_flags {
2086 * @__NL80211_MESHCONF_INVALID: internal use 2117 * @__NL80211_MESHCONF_INVALID: internal use
2087 * 2118 *
2088 * @NL80211_MESHCONF_RETRY_TIMEOUT: specifies the initial retry timeout in 2119 * @NL80211_MESHCONF_RETRY_TIMEOUT: specifies the initial retry timeout in
2089 * millisecond units, used by the Peer Link Open message 2120 * millisecond units, used by the Peer Link Open message
2090 * 2121 *
2091 * @NL80211_MESHCONF_CONFIRM_TIMEOUT: specifies the initial confirm timeout, in 2122 * @NL80211_MESHCONF_CONFIRM_TIMEOUT: specifies the initial confirm timeout, in
2092 * millisecond units, used by the peer link management to close a peer link 2123 * millisecond units, used by the peer link management to close a peer link
2093 * 2124 *
2094 * @NL80211_MESHCONF_HOLDING_TIMEOUT: specifies the holding timeout, in 2125 * @NL80211_MESHCONF_HOLDING_TIMEOUT: specifies the holding timeout, in
2095 * millisecond units 2126 * millisecond units
2096 * 2127 *
2097 * @NL80211_MESHCONF_MAX_PEER_LINKS: maximum number of peer links allowed 2128 * @NL80211_MESHCONF_MAX_PEER_LINKS: maximum number of peer links allowed
2098 * on this mesh interface 2129 * on this mesh interface
2099 * 2130 *
2100 * @NL80211_MESHCONF_MAX_RETRIES: specifies the maximum number of peer link 2131 * @NL80211_MESHCONF_MAX_RETRIES: specifies the maximum number of peer link
2101 * open retries that can be sent to establish a new peer link instance in a 2132 * open retries that can be sent to establish a new peer link instance in a
2102 * mesh 2133 * mesh
2103 * 2134 *
2104 * @NL80211_MESHCONF_TTL: specifies the value of TTL field set at a source mesh 2135 * @NL80211_MESHCONF_TTL: specifies the value of TTL field set at a source mesh
2105 * point. 2136 * point.
2106 * 2137 *
2107 * @NL80211_MESHCONF_AUTO_OPEN_PLINKS: whether we should automatically 2138 * @NL80211_MESHCONF_AUTO_OPEN_PLINKS: whether we should automatically
2108 * open peer links when we detect compatible mesh peers. 2139 * open peer links when we detect compatible mesh peers.
2109 * 2140 *
2110 * @NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES: the number of action frames 2141 * @NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES: the number of action frames
2111 * containing a PREQ that an MP can send to a particular destination (path 2142 * containing a PREQ that an MP can send to a particular destination (path
2112 * target) 2143 * target)
2113 * 2144 *
2114 * @NL80211_MESHCONF_PATH_REFRESH_TIME: how frequently to refresh mesh paths 2145 * @NL80211_MESHCONF_PATH_REFRESH_TIME: how frequently to refresh mesh paths
2115 * (in milliseconds) 2146 * (in milliseconds)
2116 * 2147 *
2117 * @NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT: minimum length of time to wait 2148 * @NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT: minimum length of time to wait
2118 * until giving up on a path discovery (in milliseconds) 2149 * until giving up on a path discovery (in milliseconds)
2119 * 2150 *
2120 * @NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT: The time (in TUs) for which mesh 2151 * @NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT: The time (in TUs) for which mesh
2121 * points receiving a PREQ shall consider the forwarding information from the 2152 * points receiving a PREQ shall consider the forwarding information from
2122 * root to be valid. (TU = time unit) 2153 * the root to be valid. (TU = time unit)
2123 * 2154 *
2124 * @NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL: The minimum interval of time (in 2155 * @NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL: The minimum interval of time (in
2125 * TUs) during which an MP can send only one action frame containing a PREQ 2156 * TUs) during which an MP can send only one action frame containing a PREQ
2126 * reference element 2157 * reference element
2127 * 2158 *
2128 * @NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME: The interval of time (in TUs) 2159 * @NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME: The interval of time (in TUs)
2129 * that it takes for an HWMP information element to propagate across the mesh 2160 * that it takes for an HWMP information element to propagate across the
2161 * mesh
2130 * 2162 *
2131 * @NL80211_MESHCONF_HWMP_ROOTMODE: whether root mode is enabled or not 2163 * @NL80211_MESHCONF_HWMP_ROOTMODE: whether root mode is enabled or not
2132 * 2164 *
2133 * @NL80211_MESHCONF_ELEMENT_TTL: specifies the value of TTL field set at a 2165 * @NL80211_MESHCONF_ELEMENT_TTL: specifies the value of TTL field set at a
2134 * source mesh point for path selection elements. 2166 * source mesh point for path selection elements.
2135 * 2167 *
2136 * @NL80211_MESHCONF_HWMP_RANN_INTERVAL: The interval of time (in TUs) between 2168 * @NL80211_MESHCONF_HWMP_RANN_INTERVAL: The interval of time (in TUs) between
2137 * root announcements are transmitted. 2169 * root announcements are transmitted.
2138 * 2170 *
2139 * @NL80211_MESHCONF_GATE_ANNOUNCEMENTS: Advertise that this mesh station has 2171 * @NL80211_MESHCONF_GATE_ANNOUNCEMENTS: Advertise that this mesh station has
2140 * access to a broader network beyond the MBSS. This is done via Root 2172 * access to a broader network beyond the MBSS. This is done via Root
2141 * Announcement frames. 2173 * Announcement frames.
2142 * 2174 *
2143 * @NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL: The minimum interval of time (in 2175 * @NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL: The minimum interval of time (in
2144 * TUs) during which a mesh STA can send only one Action frame containing a 2176 * TUs) during which a mesh STA can send only one Action frame containing a
2145 * PERR element. 2177 * PERR element.
2146 * 2178 *
2147 * @NL80211_MESHCONF_FORWARDING: set Mesh STA as forwarding or non-forwarding 2179 * @NL80211_MESHCONF_FORWARDING: set Mesh STA as forwarding or non-forwarding
2148 * or forwarding entity (default is TRUE - forwarding entity) 2180 * or forwarding entity (default is TRUE - forwarding entity)
2149 * 2181 *
2150 * @NL80211_MESHCONF_RSSI_THRESHOLD: RSSI threshold in dBm. This specifies the 2182 * @NL80211_MESHCONF_RSSI_THRESHOLD: RSSI threshold in dBm. This specifies the
2151 * threshold for average signal strength of candidate station to establish 2183 * threshold for average signal strength of candidate station to establish
2152 * a peer link. 2184 * a peer link.
2153 *
2154 * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute
2155 * 2185 *
2156 * @NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR: maximum number of neighbors 2186 * @NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR: maximum number of neighbors
2157 * to synchronize to for 11s default synchronization method (see 11C.12.2.2) 2187 * to synchronize to for 11s default synchronization method
2188 * (see 11C.12.2.2)
2158 * 2189 *
2159 * @NL80211_MESHCONF_HT_OPMODE: set mesh HT protection mode. 2190 * @NL80211_MESHCONF_HT_OPMODE: set mesh HT protection mode.
2160 * 2191 *
2192 * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute
2193 *
2194 * @NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT: The time (in TUs) for
2195 * which mesh STAs receiving a proactive PREQ shall consider the forwarding
2196 * information to the root mesh STA to be valid.
2197 *
2198 * @NL80211_MESHCONF_HWMP_ROOT_INTERVAL: The interval of time (in TUs) between
2199 * proactive PREQs are transmitted.
2200 *
2201 * @NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL: The minimum interval of time
2202 * (in TUs) during which a mesh STA can send only one Action frame
2203 * containing a PREQ element for root path confirmation.
2204 *
2161 * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use 2205 * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
2162 */ 2206 */
2163enum nl80211_meshconf_params { 2207enum nl80211_meshconf_params {
@@ -2184,6 +2228,9 @@ enum nl80211_meshconf_params {
2184 NL80211_MESHCONF_RSSI_THRESHOLD, 2228 NL80211_MESHCONF_RSSI_THRESHOLD,
2185 NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, 2229 NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
2186 NL80211_MESHCONF_HT_OPMODE, 2230 NL80211_MESHCONF_HT_OPMODE,
2231 NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT,
2232 NL80211_MESHCONF_HWMP_ROOT_INTERVAL,
2233 NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL,
2187 2234
2188 /* keep last */ 2235 /* keep last */
2189 __NL80211_MESHCONF_ATTR_AFTER_LAST, 2236 __NL80211_MESHCONF_ATTR_AFTER_LAST,
@@ -2199,34 +2246,36 @@ enum nl80211_meshconf_params {
2199 * @__NL80211_MESH_SETUP_INVALID: Internal use 2246 * @__NL80211_MESH_SETUP_INVALID: Internal use
2200 * 2247 *
2201 * @NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL: Enable this option to use a 2248 * @NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL: Enable this option to use a
2202 * vendor specific path selection algorithm or disable it to use the default 2249 * vendor specific path selection algorithm or disable it to use the
2203 * HWMP. 2250 * default HWMP.
2204 * 2251 *
2205 * @NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC: Enable this option to use a 2252 * @NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC: Enable this option to use a
2206 * vendor specific path metric or disable it to use the default Airtime 2253 * vendor specific path metric or disable it to use the default Airtime
2207 * metric. 2254 * metric.
2208 * 2255 *
2209 * @NL80211_MESH_SETUP_IE: Information elements for this mesh, for instance, a 2256 * @NL80211_MESH_SETUP_IE: Information elements for this mesh, for instance, a
2210 * robust security network ie, or a vendor specific information element that 2257 * robust security network ie, or a vendor specific information element
2211 * vendors will use to identify the path selection methods and metrics in use. 2258 * that vendors will use to identify the path selection methods and
2259 * metrics in use.
2212 * 2260 *
2213 * @NL80211_MESH_SETUP_USERSPACE_AUTH: Enable this option if an authentication 2261 * @NL80211_MESH_SETUP_USERSPACE_AUTH: Enable this option if an authentication
2214 * daemon will be authenticating mesh candidates. 2262 * daemon will be authenticating mesh candidates.
2215 * 2263 *
2216 * @NL80211_MESH_SETUP_USERSPACE_AMPE: Enable this option if an authentication 2264 * @NL80211_MESH_SETUP_USERSPACE_AMPE: Enable this option if an authentication
2217 * daemon will be securing peer link frames. AMPE is a secured version of Mesh 2265 * daemon will be securing peer link frames. AMPE is a secured version of
2218 * Peering Management (MPM) and is implemented with the assistance of a 2266 * Mesh Peering Management (MPM) and is implemented with the assistance of
2219 * userspace daemon. When this flag is set, the kernel will send peer 2267 * a userspace daemon. When this flag is set, the kernel will send peer
2220 * management frames to a userspace daemon that will implement AMPE 2268 * management frames to a userspace daemon that will implement AMPE
2221 * functionality (security capabilities selection, key confirmation, and key 2269 * functionality (security capabilities selection, key confirmation, and
2222 * management). When the flag is unset (default), the kernel can autonomously 2270 * key management). When the flag is unset (default), the kernel can
2223 * complete (unsecured) mesh peering without the need of a userspace daemon. 2271 * autonomously complete (unsecured) mesh peering without the need of a
2224 * 2272 * userspace daemon.
2225 * @NL80211_MESH_SETUP_ATTR_MAX: highest possible mesh setup attribute number
2226 * 2273 *
2227 * @NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC: Enable this option to use a 2274 * @NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC: Enable this option to use a
2228 * vendor specific synchronization method or disable it to use the default 2275 * vendor specific synchronization method or disable it to use the default
2229 * neighbor offset synchronization 2276 * neighbor offset synchronization
2277 *
2278 * @NL80211_MESH_SETUP_ATTR_MAX: highest possible mesh setup attribute number
2230 * 2279 *
2231 * @__NL80211_MESH_SETUP_ATTR_AFTER_LAST: Internal use 2280 * @__NL80211_MESH_SETUP_ATTR_AFTER_LAST: Internal use
2232 */ 2281 */
@@ -2496,6 +2545,11 @@ enum nl80211_band {
2496 NL80211_BAND_5GHZ, 2545 NL80211_BAND_5GHZ,
2497}; 2546};
2498 2547
2548/**
2549 * enum nl80211_ps_state - powersave state
2550 * @NL80211_PS_DISABLED: powersave is disabled
2551 * @NL80211_PS_ENABLED: powersave is enabled
2552 */
2499enum nl80211_ps_state { 2553enum nl80211_ps_state {
2500 NL80211_PS_DISABLED, 2554 NL80211_PS_DISABLED,
2501 NL80211_PS_ENABLED, 2555 NL80211_PS_ENABLED,
@@ -2534,10 +2588,14 @@ enum nl80211_attr_cqm {
2534 * configured threshold 2588 * configured threshold
2535 * @NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH: The RSSI is higher than the 2589 * @NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH: The RSSI is higher than the
2536 * configured threshold 2590 * configured threshold
2591 * @NL80211_CQM_RSSI_BEACON_LOSS_EVENT: The device experienced beacon loss.
2592 * (Note that deauth/disassoc will still follow if the AP is not
2593 * available. This event might get used as roaming event, etc.)
2537 */ 2594 */
2538enum nl80211_cqm_rssi_threshold_event { 2595enum nl80211_cqm_rssi_threshold_event {
2539 NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, 2596 NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
2540 NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, 2597 NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
2598 NL80211_CQM_RSSI_BEACON_LOSS_EVENT,
2541}; 2599};
2542 2600
2543 2601
diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h
index 5a3db3aa5f1..fd4f2d1cdf6 100644
--- a/include/linux/nl802154.h
+++ b/include/linux/nl802154.h
@@ -130,18 +130,8 @@ enum {
130enum { 130enum {
131 __IEEE802154_DEV_INVALID = -1, 131 __IEEE802154_DEV_INVALID = -1,
132 132
133 /* TODO: 133 IEEE802154_DEV_WPAN,
134 * Nowadays three device types supported by this stack at linux-zigbee 134 IEEE802154_DEV_MONITOR,
135 * project: WPAN = 0, MONITOR = 1 and SMAC = 2.
136 *
137 * Since this stack implementation exists many years, it's definitely
138 * bad idea to change the assigned values due to they are already used
139 * by third-party userspace software like: iz-tools, wireshark...
140 *
141 * Currently only monitor device is added and initialized by '1' for
142 * compatibility.
143 */
144 IEEE802154_DEV_MONITOR = 1,
145 135
146 __IEEE802154_DEV_MAX, 136 __IEEE802154_DEV_MAX,
147}; 137};
diff --git a/include/linux/phy.h b/include/linux/phy.h
index c291cae8ce3..93b3cf77f56 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -243,6 +243,15 @@ enum phy_state {
243 PHY_RESUMING 243 PHY_RESUMING
244}; 244};
245 245
246/**
247 * struct phy_c45_device_ids - 802.3-c45 Device Identifiers
248 * @devices_in_package: Bit vector of devices present.
249 * @device_ids: The device identifer for each present device.
250 */
251struct phy_c45_device_ids {
252 u32 devices_in_package;
253 u32 device_ids[8];
254};
246 255
247/* phy_device: An instance of a PHY 256/* phy_device: An instance of a PHY
248 * 257 *
@@ -250,6 +259,8 @@ enum phy_state {
250 * bus: Pointer to the bus this PHY is on 259 * bus: Pointer to the bus this PHY is on
251 * dev: driver model device structure for this PHY 260 * dev: driver model device structure for this PHY
252 * phy_id: UID for this device found during discovery 261 * phy_id: UID for this device found during discovery
262 * c45_ids: 802.3-c45 Device Identifers if is_c45.
263 * is_c45: Set to true if this phy uses clause 45 addressing.
253 * state: state of the PHY for management purposes 264 * state: state of the PHY for management purposes
254 * dev_flags: Device-specific flags used by the PHY driver. 265 * dev_flags: Device-specific flags used by the PHY driver.
255 * addr: Bus address of PHY 266 * addr: Bus address of PHY
@@ -285,6 +296,9 @@ struct phy_device {
285 296
286 u32 phy_id; 297 u32 phy_id;
287 298
299 struct phy_c45_device_ids c45_ids;
300 bool is_c45;
301
288 enum phy_state state; 302 enum phy_state state;
289 303
290 u32 dev_flags; 304 u32 dev_flags;
@@ -412,6 +426,12 @@ struct phy_driver {
412 /* Clears up any memory if needed */ 426 /* Clears up any memory if needed */
413 void (*remove)(struct phy_device *phydev); 427 void (*remove)(struct phy_device *phydev);
414 428
429 /* Returns true if this is a suitable driver for the given
430 * phydev. If NULL, matching is based on phy_id and
431 * phy_id_mask.
432 */
433 int (*match_phy_device)(struct phy_device *phydev);
434
415 /* Handles ethtool queries for hardware time stamping. */ 435 /* Handles ethtool queries for hardware time stamping. */
416 int (*ts_info)(struct phy_device *phydev, struct ethtool_ts_info *ti); 436 int (*ts_info)(struct phy_device *phydev, struct ethtool_ts_info *ti);
417 437
@@ -480,7 +500,9 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val)
480 return mdiobus_write(phydev->bus, phydev->addr, regnum, val); 500 return mdiobus_write(phydev->bus, phydev->addr, regnum, val);
481} 501}
482 502
483struct phy_device* get_phy_device(struct mii_bus *bus, int addr); 503struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
504 bool is_c45, struct phy_c45_device_ids *c45_ids);
505struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
484int phy_device_register(struct phy_device *phy); 506int phy_device_register(struct phy_device *phy);
485int phy_init_hw(struct phy_device *phydev); 507int phy_init_hw(struct phy_device *phydev);
486struct phy_device * phy_attach(struct net_device *dev, 508struct phy_device * phy_attach(struct net_device *dev,
@@ -511,7 +533,9 @@ int genphy_read_status(struct phy_device *phydev);
511int genphy_suspend(struct phy_device *phydev); 533int genphy_suspend(struct phy_device *phydev);
512int genphy_resume(struct phy_device *phydev); 534int genphy_resume(struct phy_device *phydev);
513void phy_driver_unregister(struct phy_driver *drv); 535void phy_driver_unregister(struct phy_driver *drv);
536void phy_drivers_unregister(struct phy_driver *drv, int n);
514int phy_driver_register(struct phy_driver *new_driver); 537int phy_driver_register(struct phy_driver *new_driver);
538int phy_drivers_register(struct phy_driver *new_driver, int n);
515void phy_state_machine(struct work_struct *work); 539void phy_state_machine(struct work_struct *work);
516void phy_start_machine(struct phy_device *phydev, 540void phy_start_machine(struct phy_device *phydev,
517 void (*handler)(struct net_device *)); 541 void (*handler)(struct net_device *));
@@ -532,6 +556,11 @@ int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
532 int (*run)(struct phy_device *)); 556 int (*run)(struct phy_device *));
533int phy_scan_fixups(struct phy_device *phydev); 557int phy_scan_fixups(struct phy_device *phydev);
534 558
559int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable);
560int phy_get_eee_err(struct phy_device *phydev);
561int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data);
562int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data);
563
535int __init mdio_bus_init(void); 564int __init mdio_bus_init(void);
536void mdio_bus_exit(void); 565void mdio_bus_exit(void);
537 566
diff --git a/include/linux/pkt_cls.h b/include/linux/pkt_cls.h
index defbde203d0..38fbd4bc20a 100644
--- a/include/linux/pkt_cls.h
+++ b/include/linux/pkt_cls.h
@@ -451,8 +451,9 @@ enum {
451#define TCF_EM_U32 3 451#define TCF_EM_U32 3
452#define TCF_EM_META 4 452#define TCF_EM_META 4
453#define TCF_EM_TEXT 5 453#define TCF_EM_TEXT 5
454#define TCF_EM_VLAN 6 454#define TCF_EM_VLAN 6
455#define TCF_EM_MAX 6 455#define TCF_EM_CANID 7
456#define TCF_EM_MAX 7
456 457
457enum { 458enum {
458 TCF_EM_PROG_TC 459 TCF_EM_PROG_TC
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 2c1de8982c8..db71c4ad862 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -612,12 +612,6 @@ struct tcamsg {
612#include <linux/mutex.h> 612#include <linux/mutex.h>
613#include <linux/netdevice.h> 613#include <linux/netdevice.h>
614 614
615static __inline__ int rtattr_strcmp(const struct rtattr *rta, const char *str)
616{
617 int len = strlen(str) + 1;
618 return len > rta->rta_len || memcmp(RTA_DATA(rta), str, len);
619}
620
621extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); 615extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
622extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid); 616extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid);
623extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, 617extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid,
@@ -625,124 +619,7 @@ extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid,
625extern void rtnl_set_sk_err(struct net *net, u32 group, int error); 619extern void rtnl_set_sk_err(struct net *net, u32 group, int error);
626extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics); 620extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics);
627extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, 621extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
628 u32 id, u32 ts, u32 tsage, long expires, 622 u32 id, long expires, u32 error);
629 u32 error);
630
631extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
632
633#define RTA_PUT(skb, attrtype, attrlen, data) \
634({ if (unlikely(skb_tailroom(skb) < (int)RTA_SPACE(attrlen))) \
635 goto rtattr_failure; \
636 __rta_fill(skb, attrtype, attrlen, data); })
637
638#define RTA_APPEND(skb, attrlen, data) \
639({ if (unlikely(skb_tailroom(skb) < (int)(attrlen))) \
640 goto rtattr_failure; \
641 memcpy(skb_put(skb, attrlen), data, attrlen); })
642
643#define RTA_PUT_NOHDR(skb, attrlen, data) \
644({ RTA_APPEND(skb, RTA_ALIGN(attrlen), data); \
645 memset(skb_tail_pointer(skb) - (RTA_ALIGN(attrlen) - attrlen), 0, \
646 RTA_ALIGN(attrlen) - attrlen); })
647
648#define RTA_PUT_U8(skb, attrtype, value) \
649({ u8 _tmp = (value); \
650 RTA_PUT(skb, attrtype, sizeof(u8), &_tmp); })
651
652#define RTA_PUT_U16(skb, attrtype, value) \
653({ u16 _tmp = (value); \
654 RTA_PUT(skb, attrtype, sizeof(u16), &_tmp); })
655
656#define RTA_PUT_U32(skb, attrtype, value) \
657({ u32 _tmp = (value); \
658 RTA_PUT(skb, attrtype, sizeof(u32), &_tmp); })
659
660#define RTA_PUT_U64(skb, attrtype, value) \
661({ u64 _tmp = (value); \
662 RTA_PUT(skb, attrtype, sizeof(u64), &_tmp); })
663
664#define RTA_PUT_SECS(skb, attrtype, value) \
665 RTA_PUT_U64(skb, attrtype, (value) / HZ)
666
667#define RTA_PUT_MSECS(skb, attrtype, value) \
668 RTA_PUT_U64(skb, attrtype, jiffies_to_msecs(value))
669
670#define RTA_PUT_STRING(skb, attrtype, value) \
671 RTA_PUT(skb, attrtype, strlen(value) + 1, value)
672
673#define RTA_PUT_FLAG(skb, attrtype) \
674 RTA_PUT(skb, attrtype, 0, NULL);
675
676#define RTA_NEST(skb, type) \
677({ struct rtattr *__start = (struct rtattr *)skb_tail_pointer(skb); \
678 RTA_PUT(skb, type, 0, NULL); \
679 __start; })
680
681#define RTA_NEST_END(skb, start) \
682({ (start)->rta_len = skb_tail_pointer(skb) - (unsigned char *)(start); \
683 (skb)->len; })
684
685#define RTA_NEST_COMPAT(skb, type, attrlen, data) \
686({ struct rtattr *__start = (struct rtattr *)skb_tail_pointer(skb); \
687 RTA_PUT(skb, type, attrlen, data); \
688 RTA_NEST(skb, type); \
689 __start; })
690
691#define RTA_NEST_COMPAT_END(skb, start) \
692({ struct rtattr *__nest = (void *)(start) + NLMSG_ALIGN((start)->rta_len); \
693 (start)->rta_len = skb_tail_pointer(skb) - (unsigned char *)(start); \
694 RTA_NEST_END(skb, __nest); \
695 (skb)->len; })
696
697#define RTA_NEST_CANCEL(skb, start) \
698({ if (start) \
699 skb_trim(skb, (unsigned char *) (start) - (skb)->data); \
700 -1; })
701
702#define RTA_GET_U8(rta) \
703({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u8)) \
704 goto rtattr_failure; \
705 *(u8 *) RTA_DATA(rta); })
706
707#define RTA_GET_U16(rta) \
708({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u16)) \
709 goto rtattr_failure; \
710 *(u16 *) RTA_DATA(rta); })
711
712#define RTA_GET_U32(rta) \
713({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u32)) \
714 goto rtattr_failure; \
715 *(u32 *) RTA_DATA(rta); })
716
717#define RTA_GET_U64(rta) \
718({ u64 _tmp; \
719 if (!rta || RTA_PAYLOAD(rta) < sizeof(u64)) \
720 goto rtattr_failure; \
721 memcpy(&_tmp, RTA_DATA(rta), sizeof(_tmp)); \
722 _tmp; })
723
724#define RTA_GET_FLAG(rta) (!!(rta))
725
726#define RTA_GET_SECS(rta) ((unsigned long) RTA_GET_U64(rta) * HZ)
727#define RTA_GET_MSECS(rta) (msecs_to_jiffies((unsigned long) RTA_GET_U64(rta)))
728
729static inline struct rtattr *
730__rta_reserve(struct sk_buff *skb, int attrtype, int attrlen)
731{
732 struct rtattr *rta;
733 int size = RTA_LENGTH(attrlen);
734
735 rta = (struct rtattr*)skb_put(skb, RTA_ALIGN(size));
736 rta->rta_type = attrtype;
737 rta->rta_len = size;
738 memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size);
739 return rta;
740}
741
742#define __RTA_PUT(skb, attrtype, attrlen) \
743({ if (unlikely(skb_tailroom(skb) < (int)RTA_SPACE(attrlen))) \
744 goto rtattr_failure; \
745 __rta_reserve(skb, attrtype, attrlen); })
746 623
747extern void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change); 624extern void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change);
748 625
@@ -794,13 +671,6 @@ extern void __rtnl_unlock(void);
794 } \ 671 } \
795} while(0) 672} while(0)
796 673
797static inline u32 rtm_get_table(struct rtattr **rta, u8 table)
798{
799 return RTA_GET_U32(rta[RTA_TABLE-1]);
800rtattr_failure:
801 return table;
802}
803
804extern int ndo_dflt_fdb_dump(struct sk_buff *skb, 674extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
805 struct netlink_callback *cb, 675 struct netlink_callback *cb,
806 struct net_device *dev, 676 struct net_device *dev,
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index db4bae78bda..6793fac5eab 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -18,6 +18,7 @@ enum {
18 SK_MEMINFO_FWD_ALLOC, 18 SK_MEMINFO_FWD_ALLOC,
19 SK_MEMINFO_WMEM_QUEUED, 19 SK_MEMINFO_WMEM_QUEUED,
20 SK_MEMINFO_OPTMEM, 20 SK_MEMINFO_OPTMEM,
21 SK_MEMINFO_BACKLOG,
21 22
22 SK_MEMINFO_VARS, 23 SK_MEMINFO_VARS,
23}; 24};
diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h
new file mode 100644
index 00000000000..b2b1afbb320
--- /dev/null
+++ b/include/linux/spi/at86rf230.h
@@ -0,0 +1,31 @@
1/*
2 * AT86RF230/RF231 driver
3 *
4 * Copyright (C) 2009-2012 Siemens AG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Written by:
20 * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
21 */
22#ifndef AT86RF230_H
23#define AT86RF230_H
24
25struct at86rf230_platform_data {
26 int rstn;
27 int slp_tr;
28 int dig2;
29};
30
31#endif
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index bc14bd738ad..bb674c02f30 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -243,6 +243,7 @@ struct ssb_bus_ops {
243#define SSB_DEV_MINI_MACPHY 0x823 243#define SSB_DEV_MINI_MACPHY 0x823
244#define SSB_DEV_ARM_1176 0x824 244#define SSB_DEV_ARM_1176 0x824
245#define SSB_DEV_ARM_7TDMI 0x825 245#define SSB_DEV_ARM_7TDMI 0x825
246#define SSB_DEV_ARM_CM3 0x82A
246 247
247/* Vendor-ID values */ 248/* Vendor-ID values */
248#define SSB_VENDOR_BROADCOM 0x4243 249#define SSB_VENDOR_BROADCOM 0x4243
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 5f359dbfcdc..2de9cf46f9f 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -507,7 +507,7 @@ struct tcp_timewait_sock {
507 u32 tw_ts_recent; 507 u32 tw_ts_recent;
508 long tw_ts_recent_stamp; 508 long tw_ts_recent_stamp;
509#ifdef CONFIG_TCP_MD5SIG 509#ifdef CONFIG_TCP_MD5SIG
510 struct tcp_md5sig_key *tw_md5_key; 510 struct tcp_md5sig_key *tw_md5_key;
511#endif 511#endif
512 /* Few sockets in timewait have cookies; in that case, then this 512 /* Few sockets in timewait have cookies; in that case, then this
513 * object holds a reference to them (tw_cookie_values->kref). 513 * object holds a reference to them (tw_cookie_values->kref).
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 76f439647c4..f87cf622317 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -66,9 +66,8 @@ struct usbnet {
66# define EVENT_STS_SPLIT 3 66# define EVENT_STS_SPLIT 3
67# define EVENT_LINK_RESET 4 67# define EVENT_LINK_RESET 4
68# define EVENT_RX_PAUSED 5 68# define EVENT_RX_PAUSED 5
69# define EVENT_DEV_WAKING 6 69# define EVENT_DEV_ASLEEP 6
70# define EVENT_DEV_ASLEEP 7 70# define EVENT_DEV_OPEN 7
71# define EVENT_DEV_OPEN 8
72}; 71};
73 72
74static inline struct usb_driver *driver_of(struct usb_interface *intf) 73static inline struct usb_driver *driver_of(struct usb_interface *intf)
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 2ee33da36a7..b5f8988e428 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -14,10 +14,11 @@ extern struct sock *unix_get_socket(struct file *filp);
14extern struct sock *unix_peer_get(struct sock *); 14extern struct sock *unix_peer_get(struct sock *);
15 15
16#define UNIX_HASH_SIZE 256 16#define UNIX_HASH_SIZE 256
17#define UNIX_HASH_BITS 8
17 18
18extern unsigned int unix_tot_inflight; 19extern unsigned int unix_tot_inflight;
19extern spinlock_t unix_table_lock; 20extern spinlock_t unix_table_lock;
20extern struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1]; 21extern struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
21 22
22struct unix_address { 23struct unix_address {
23 atomic_t refcnt; 24 atomic_t refcnt;
diff --git a/include/net/arp.h b/include/net/arp.h
index 4a1f3fb562e..4617d984113 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -15,24 +15,34 @@ static inline u32 arp_hashfn(u32 key, const struct net_device *dev, u32 hash_rnd
15 return val * hash_rnd; 15 return val * hash_rnd;
16} 16}
17 17
18static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key) 18static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
19{ 19{
20 struct neigh_hash_table *nht; 20 struct neigh_hash_table *nht = rcu_dereference_bh(arp_tbl.nht);
21 struct neighbour *n; 21 struct neighbour *n;
22 u32 hash_val; 22 u32 hash_val;
23 23
24 rcu_read_lock_bh(); 24 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
25 nht = rcu_dereference_bh(arp_tbl.nht); 25 key = 0;
26
26 hash_val = arp_hashfn(key, dev, nht->hash_rnd[0]) >> (32 - nht->hash_shift); 27 hash_val = arp_hashfn(key, dev, nht->hash_rnd[0]) >> (32 - nht->hash_shift);
27 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); 28 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
28 n != NULL; 29 n != NULL;
29 n = rcu_dereference_bh(n->next)) { 30 n = rcu_dereference_bh(n->next)) {
30 if (n->dev == dev && *(u32 *)n->primary_key == key) { 31 if (n->dev == dev && *(u32 *)n->primary_key == key)
31 if (!atomic_inc_not_zero(&n->refcnt)) 32 return n;
32 n = NULL;
33 break;
34 }
35 } 33 }
34
35 return NULL;
36}
37
38static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key)
39{
40 struct neighbour *n;
41
42 rcu_read_lock_bh();
43 n = __ipv4_neigh_lookup_noref(dev, key);
44 if (n && !atomic_inc_not_zero(&n->refcnt))
45 n = NULL;
36 rcu_read_unlock_bh(); 46 rcu_read_unlock_bh();
37 47
38 return n; 48 return n;
diff --git a/include/net/bluetooth/a2mp.h b/include/net/bluetooth/a2mp.h
new file mode 100644
index 00000000000..6a76e0a0705
--- /dev/null
+++ b/include/net/bluetooth/a2mp.h
@@ -0,0 +1,126 @@
1/*
2 Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
3 Copyright (c) 2011,2012 Intel Corp.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License version 2 and
7 only version 2 as published by the Free Software Foundation.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13*/
14
15#ifndef __A2MP_H
16#define __A2MP_H
17
18#include <net/bluetooth/l2cap.h>
19
20#define A2MP_FEAT_EXT 0x8000
21
22struct amp_mgr {
23 struct l2cap_conn *l2cap_conn;
24 struct l2cap_chan *a2mp_chan;
25 struct kref kref;
26 __u8 ident;
27 __u8 handle;
28 unsigned long flags;
29};
30
31struct a2mp_cmd {
32 __u8 code;
33 __u8 ident;
34 __le16 len;
35 __u8 data[0];
36} __packed;
37
38/* A2MP command codes */
39#define A2MP_COMMAND_REJ 0x01
40struct a2mp_cmd_rej {
41 __le16 reason;
42 __u8 data[0];
43} __packed;
44
45#define A2MP_DISCOVER_REQ 0x02
46struct a2mp_discov_req {
47 __le16 mtu;
48 __le16 ext_feat;
49} __packed;
50
51struct a2mp_cl {
52 __u8 id;
53 __u8 type;
54 __u8 status;
55} __packed;
56
57#define A2MP_DISCOVER_RSP 0x03
58struct a2mp_discov_rsp {
59 __le16 mtu;
60 __le16 ext_feat;
61 struct a2mp_cl cl[0];
62} __packed;
63
64#define A2MP_CHANGE_NOTIFY 0x04
65#define A2MP_CHANGE_RSP 0x05
66
67#define A2MP_GETINFO_REQ 0x06
68struct a2mp_info_req {
69 __u8 id;
70} __packed;
71
72#define A2MP_GETINFO_RSP 0x07
73struct a2mp_info_rsp {
74 __u8 id;
75 __u8 status;
76 __le32 total_bw;
77 __le32 max_bw;
78 __le32 min_latency;
79 __le16 pal_cap;
80 __le16 assoc_size;
81} __packed;
82
83#define A2MP_GETAMPASSOC_REQ 0x08
84struct a2mp_amp_assoc_req {
85 __u8 id;
86} __packed;
87
88#define A2MP_GETAMPASSOC_RSP 0x09
89struct a2mp_amp_assoc_rsp {
90 __u8 id;
91 __u8 status;
92 __u8 amp_assoc[0];
93} __packed;
94
95#define A2MP_CREATEPHYSLINK_REQ 0x0A
96#define A2MP_DISCONNPHYSLINK_REQ 0x0C
97struct a2mp_physlink_req {
98 __u8 local_id;
99 __u8 remote_id;
100 __u8 amp_assoc[0];
101} __packed;
102
103#define A2MP_CREATEPHYSLINK_RSP 0x0B
104#define A2MP_DISCONNPHYSLINK_RSP 0x0D
105struct a2mp_physlink_rsp {
106 __u8 local_id;
107 __u8 remote_id;
108 __u8 status;
109} __packed;
110
111/* A2MP response status */
112#define A2MP_STATUS_SUCCESS 0x00
113#define A2MP_STATUS_INVALID_CTRL_ID 0x01
114#define A2MP_STATUS_UNABLE_START_LINK_CREATION 0x02
115#define A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS 0x02
116#define A2MP_STATUS_COLLISION_OCCURED 0x03
117#define A2MP_STATUS_DISCONN_REQ_RECVD 0x04
118#define A2MP_STATUS_PHYS_LINK_EXISTS 0x05
119#define A2MP_STATUS_SECURITY_VIOLATION 0x06
120
121void amp_mgr_get(struct amp_mgr *mgr);
122int amp_mgr_put(struct amp_mgr *mgr);
123struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
124 struct sk_buff *skb);
125
126#endif /* __A2MP_H */
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 961669b648f..565d4bee1e4 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -1,4 +1,4 @@
1/* 1/*
2 BlueZ - Bluetooth protocol stack for Linux 2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated 3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 4
@@ -12,22 +12,19 @@
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED. 22 SOFTWARE IS DISCLAIMED.
23*/ 23*/
24 24
25#ifndef __BLUETOOTH_H 25#ifndef __BLUETOOTH_H
26#define __BLUETOOTH_H 26#define __BLUETOOTH_H
27 27
28#include <asm/types.h>
29#include <asm/byteorder.h>
30#include <linux/list.h>
31#include <linux/poll.h> 28#include <linux/poll.h>
32#include <net/sock.h> 29#include <net/sock.h>
33 30
@@ -168,8 +165,8 @@ typedef struct {
168#define BDADDR_LE_PUBLIC 0x01 165#define BDADDR_LE_PUBLIC 0x01
169#define BDADDR_LE_RANDOM 0x02 166#define BDADDR_LE_RANDOM 0x02
170 167
171#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0}}) 168#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0} })
172#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff}}) 169#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff} })
173 170
174/* Copy, swap, convert BD Address */ 171/* Copy, swap, convert BD Address */
175static inline int bacmp(bdaddr_t *ba1, bdaddr_t *ba2) 172static inline int bacmp(bdaddr_t *ba1, bdaddr_t *ba2)
@@ -215,7 +212,7 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
215 struct msghdr *msg, size_t len, int flags); 212 struct msghdr *msg, size_t len, int flags);
216int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock, 213int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
217 struct msghdr *msg, size_t len, int flags); 214 struct msghdr *msg, size_t len, int flags);
218uint bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait); 215uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
219int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); 216int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
220int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo); 217int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
221 218
@@ -225,12 +222,12 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
225 222
226/* Skb helpers */ 223/* Skb helpers */
227struct l2cap_ctrl { 224struct l2cap_ctrl {
228 unsigned int sframe : 1, 225 unsigned int sframe:1,
229 poll : 1, 226 poll:1,
230 final : 1, 227 final:1,
231 fcs : 1, 228 fcs:1,
232 sar : 2, 229 sar:2,
233 super : 2; 230 super:2;
234 __u16 reqseq; 231 __u16 reqseq;
235 __u16 txseq; 232 __u16 txseq;
236 __u8 retries; 233 __u8 retries;
@@ -249,7 +246,8 @@ static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how)
249{ 246{
250 struct sk_buff *skb; 247 struct sk_buff *skb;
251 248
252 if ((skb = alloc_skb(len + BT_SKB_RESERVE, how))) { 249 skb = alloc_skb(len + BT_SKB_RESERVE, how);
250 if (skb) {
253 skb_reserve(skb, BT_SKB_RESERVE); 251 skb_reserve(skb, BT_SKB_RESERVE);
254 bt_cb(skb)->incoming = 0; 252 bt_cb(skb)->incoming = 0;
255 } 253 }
@@ -261,7 +259,8 @@ static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk,
261{ 259{
262 struct sk_buff *skb; 260 struct sk_buff *skb;
263 261
264 if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) { 262 skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err);
263 if (skb) {
265 skb_reserve(skb, BT_SKB_RESERVE); 264 skb_reserve(skb, BT_SKB_RESERVE);
266 bt_cb(skb)->incoming = 0; 265 bt_cb(skb)->incoming = 0;
267 } 266 }
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 3def64ba77f..2a6b0b8b712 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -30,6 +30,9 @@
30#define HCI_MAX_EVENT_SIZE 260 30#define HCI_MAX_EVENT_SIZE 260
31#define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4) 31#define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4)
32 32
33#define HCI_LINK_KEY_SIZE 16
34#define HCI_AMP_LINK_KEY_SIZE (2 * HCI_LINK_KEY_SIZE)
35
33/* HCI dev events */ 36/* HCI dev events */
34#define HCI_DEV_REG 1 37#define HCI_DEV_REG 1
35#define HCI_DEV_UNREG 2 38#define HCI_DEV_UNREG 2
@@ -56,9 +59,12 @@
56#define HCI_BREDR 0x00 59#define HCI_BREDR 0x00
57#define HCI_AMP 0x01 60#define HCI_AMP 0x01
58 61
62/* First BR/EDR Controller shall have ID = 0 */
63#define HCI_BREDR_ID 0
64
59/* HCI device quirks */ 65/* HCI device quirks */
60enum { 66enum {
61 HCI_QUIRK_NO_RESET, 67 HCI_QUIRK_RESET_ON_CLOSE,
62 HCI_QUIRK_RAW_DEVICE, 68 HCI_QUIRK_RAW_DEVICE,
63 HCI_QUIRK_FIXUP_BUFFER_SIZE 69 HCI_QUIRK_FIXUP_BUFFER_SIZE
64}; 70};
@@ -133,10 +139,8 @@ enum {
133#define HCIINQUIRY _IOR('H', 240, int) 139#define HCIINQUIRY _IOR('H', 240, int)
134 140
135/* HCI timeouts */ 141/* HCI timeouts */
136#define HCI_CONNECT_TIMEOUT (40000) /* 40 seconds */
137#define HCI_DISCONN_TIMEOUT (2000) /* 2 seconds */ 142#define HCI_DISCONN_TIMEOUT (2000) /* 2 seconds */
138#define HCI_PAIRING_TIMEOUT (60000) /* 60 seconds */ 143#define HCI_PAIRING_TIMEOUT (60000) /* 60 seconds */
139#define HCI_IDLE_TIMEOUT (6000) /* 6 seconds */
140#define HCI_INIT_TIMEOUT (10000) /* 10 seconds */ 144#define HCI_INIT_TIMEOUT (10000) /* 10 seconds */
141#define HCI_CMD_TIMEOUT (1000) /* 1 seconds */ 145#define HCI_CMD_TIMEOUT (1000) /* 1 seconds */
142#define HCI_ACL_TX_TIMEOUT (45000) /* 45 seconds */ 146#define HCI_ACL_TX_TIMEOUT (45000) /* 45 seconds */
@@ -371,7 +375,7 @@ struct hci_cp_reject_conn_req {
371#define HCI_OP_LINK_KEY_REPLY 0x040b 375#define HCI_OP_LINK_KEY_REPLY 0x040b
372struct hci_cp_link_key_reply { 376struct hci_cp_link_key_reply {
373 bdaddr_t bdaddr; 377 bdaddr_t bdaddr;
374 __u8 link_key[16]; 378 __u8 link_key[HCI_LINK_KEY_SIZE];
375} __packed; 379} __packed;
376 380
377#define HCI_OP_LINK_KEY_NEG_REPLY 0x040c 381#define HCI_OP_LINK_KEY_NEG_REPLY 0x040c
@@ -523,6 +527,28 @@ struct hci_cp_io_capability_neg_reply {
523 __u8 reason; 527 __u8 reason;
524} __packed; 528} __packed;
525 529
530#define HCI_OP_CREATE_PHY_LINK 0x0435
531struct hci_cp_create_phy_link {
532 __u8 phy_handle;
533 __u8 key_len;
534 __u8 key_type;
535 __u8 key[HCI_AMP_LINK_KEY_SIZE];
536} __packed;
537
538#define HCI_OP_ACCEPT_PHY_LINK 0x0436
539struct hci_cp_accept_phy_link {
540 __u8 phy_handle;
541 __u8 key_len;
542 __u8 key_type;
543 __u8 key[HCI_AMP_LINK_KEY_SIZE];
544} __packed;
545
546#define HCI_OP_DISCONN_PHY_LINK 0x0437
547struct hci_cp_disconn_phy_link {
548 __u8 phy_handle;
549 __u8 reason;
550} __packed;
551
526#define HCI_OP_SNIFF_MODE 0x0803 552#define HCI_OP_SNIFF_MODE 0x0803
527struct hci_cp_sniff_mode { 553struct hci_cp_sniff_mode {
528 __le16 handle; 554 __le16 handle;
@@ -818,6 +844,31 @@ struct hci_rp_read_local_amp_info {
818 __le32 be_flush_to; 844 __le32 be_flush_to;
819} __packed; 845} __packed;
820 846
847#define HCI_OP_READ_LOCAL_AMP_ASSOC 0x140a
848struct hci_cp_read_local_amp_assoc {
849 __u8 phy_handle;
850 __le16 len_so_far;
851 __le16 max_len;
852} __packed;
853struct hci_rp_read_local_amp_assoc {
854 __u8 status;
855 __u8 phy_handle;
856 __le16 rem_len;
857 __u8 frag[0];
858} __packed;
859
860#define HCI_OP_WRITE_REMOTE_AMP_ASSOC 0x140b
861struct hci_cp_write_remote_amp_assoc {
862 __u8 phy_handle;
863 __le16 len_so_far;
864 __le16 rem_len;
865 __u8 frag[0];
866} __packed;
867struct hci_rp_write_remote_amp_assoc {
868 __u8 status;
869 __u8 phy_handle;
870} __packed;
871
821#define HCI_OP_LE_SET_EVENT_MASK 0x2001 872#define HCI_OP_LE_SET_EVENT_MASK 0x2001
822struct hci_cp_le_set_event_mask { 873struct hci_cp_le_set_event_mask {
823 __u8 mask[8]; 874 __u8 mask[8];
@@ -1048,7 +1099,7 @@ struct hci_ev_link_key_req {
1048#define HCI_EV_LINK_KEY_NOTIFY 0x18 1099#define HCI_EV_LINK_KEY_NOTIFY 0x18
1049struct hci_ev_link_key_notify { 1100struct hci_ev_link_key_notify {
1050 bdaddr_t bdaddr; 1101 bdaddr_t bdaddr;
1051 __u8 link_key[16]; 1102 __u8 link_key[HCI_LINK_KEY_SIZE];
1052 __u8 key_type; 1103 __u8 key_type;
1053} __packed; 1104} __packed;
1054 1105
@@ -1196,6 +1247,39 @@ struct hci_ev_le_meta {
1196 __u8 subevent; 1247 __u8 subevent;
1197} __packed; 1248} __packed;
1198 1249
1250#define HCI_EV_PHY_LINK_COMPLETE 0x40
1251struct hci_ev_phy_link_complete {
1252 __u8 status;
1253 __u8 phy_handle;
1254} __packed;
1255
1256#define HCI_EV_CHANNEL_SELECTED 0x41
1257struct hci_ev_channel_selected {
1258 __u8 phy_handle;
1259} __packed;
1260
1261#define HCI_EV_DISCONN_PHY_LINK_COMPLETE 0x42
1262struct hci_ev_disconn_phy_link_complete {
1263 __u8 status;
1264 __u8 phy_handle;
1265 __u8 reason;
1266} __packed;
1267
1268#define HCI_EV_LOGICAL_LINK_COMPLETE 0x45
1269struct hci_ev_logical_link_complete {
1270 __u8 status;
1271 __le16 handle;
1272 __u8 phy_handle;
1273 __u8 flow_spec_id;
1274} __packed;
1275
1276#define HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE 0x46
1277struct hci_ev_disconn_logical_link_complete {
1278 __u8 status;
1279 __le16 handle;
1280 __u8 reason;
1281} __packed;
1282
1199#define HCI_EV_NUM_COMP_BLOCKS 0x48 1283#define HCI_EV_NUM_COMP_BLOCKS 0x48
1200struct hci_comp_blocks_info { 1284struct hci_comp_blocks_info {
1201 __le16 handle; 1285 __le16 handle;
@@ -1296,7 +1380,6 @@ struct hci_sco_hdr {
1296 __u8 dlen; 1380 __u8 dlen;
1297} __packed; 1381} __packed;
1298 1382
1299#include <linux/skbuff.h>
1300static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb) 1383static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb)
1301{ 1384{
1302 return (struct hci_event_hdr *) skb->data; 1385 return (struct hci_event_hdr *) skb->data;
@@ -1313,12 +1396,12 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
1313} 1396}
1314 1397
1315/* Command opcode pack/unpack */ 1398/* Command opcode pack/unpack */
1316#define hci_opcode_pack(ogf, ocf) (__u16) ((ocf & 0x03ff)|(ogf << 10)) 1399#define hci_opcode_pack(ogf, ocf) ((__u16) ((ocf & 0x03ff)|(ogf << 10)))
1317#define hci_opcode_ogf(op) (op >> 10) 1400#define hci_opcode_ogf(op) (op >> 10)
1318#define hci_opcode_ocf(op) (op & 0x03ff) 1401#define hci_opcode_ocf(op) (op & 0x03ff)
1319 1402
1320/* ACL handle and flags pack/unpack */ 1403/* ACL handle and flags pack/unpack */
1321#define hci_handle_pack(h, f) (__u16) ((h & 0x0fff)|(f << 12)) 1404#define hci_handle_pack(h, f) ((__u16) ((h & 0x0fff)|(f << 12)))
1322#define hci_handle(h) (h & 0x0fff) 1405#define hci_handle(h) (h & 0x0fff)
1323#define hci_flags(h) (h >> 12) 1406#define hci_flags(h) (h >> 12)
1324 1407
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 9fc7728f94e..20fd57367dd 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -25,7 +25,6 @@
25#ifndef __HCI_CORE_H 25#ifndef __HCI_CORE_H
26#define __HCI_CORE_H 26#define __HCI_CORE_H
27 27
28#include <linux/interrupt.h>
29#include <net/bluetooth/hci.h> 28#include <net/bluetooth/hci.h>
30 29
31/* HCI priority */ 30/* HCI priority */
@@ -65,7 +64,7 @@ struct discovery_state {
65 DISCOVERY_RESOLVING, 64 DISCOVERY_RESOLVING,
66 DISCOVERY_STOPPING, 65 DISCOVERY_STOPPING,
67 } state; 66 } state;
68 struct list_head all; /* All devices found during inquiry */ 67 struct list_head all; /* All devices found during inquiry */
69 struct list_head unknown; /* Name state not known */ 68 struct list_head unknown; /* Name state not known */
70 struct list_head resolve; /* Name needs to be resolved */ 69 struct list_head resolve; /* Name needs to be resolved */
71 __u32 timestamp; 70 __u32 timestamp;
@@ -105,7 +104,7 @@ struct link_key {
105 struct list_head list; 104 struct list_head list;
106 bdaddr_t bdaddr; 105 bdaddr_t bdaddr;
107 u8 type; 106 u8 type;
108 u8 val[16]; 107 u8 val[HCI_LINK_KEY_SIZE];
109 u8 pin_len; 108 u8 pin_len;
110}; 109};
111 110
@@ -333,6 +332,7 @@ struct hci_conn {
333 void *l2cap_data; 332 void *l2cap_data;
334 void *sco_data; 333 void *sco_data;
335 void *smp_conn; 334 void *smp_conn;
335 struct amp_mgr *amp_mgr;
336 336
337 struct hci_conn *link; 337 struct hci_conn *link;
338 338
@@ -360,7 +360,8 @@ extern int l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
360extern int l2cap_disconn_ind(struct hci_conn *hcon); 360extern int l2cap_disconn_ind(struct hci_conn *hcon);
361extern int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason); 361extern int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
362extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt); 362extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
363extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags); 363extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb,
364 u16 flags);
364 365
365extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); 366extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
366extern int sco_connect_cfm(struct hci_conn *hcon, __u8 status); 367extern int sco_connect_cfm(struct hci_conn *hcon, __u8 status);
@@ -429,8 +430,8 @@ enum {
429static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) 430static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
430{ 431{
431 struct hci_dev *hdev = conn->hdev; 432 struct hci_dev *hdev = conn->hdev;
432 return (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) && 433 return test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
433 test_bit(HCI_CONN_SSP_ENABLED, &conn->flags)); 434 test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
434} 435}
435 436
436static inline void hci_conn_hash_init(struct hci_dev *hdev) 437static inline void hci_conn_hash_init(struct hci_dev *hdev)
@@ -640,6 +641,19 @@ static inline void hci_set_drvdata(struct hci_dev *hdev, void *data)
640 dev_set_drvdata(&hdev->dev, data); 641 dev_set_drvdata(&hdev->dev, data);
641} 642}
642 643
644/* hci_dev_list shall be locked */
645static inline uint8_t __hci_num_ctrl(void)
646{
647 uint8_t count = 0;
648 struct list_head *p;
649
650 list_for_each(p, &hci_dev_list) {
651 count++;
652 }
653
654 return count;
655}
656
643struct hci_dev *hci_dev_get(int index); 657struct hci_dev *hci_dev_get(int index);
644struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst); 658struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
645 659
@@ -661,7 +675,8 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
661int hci_get_auth_info(struct hci_dev *hdev, void __user *arg); 675int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
662int hci_inquiry(void __user *arg); 676int hci_inquiry(void __user *arg);
663 677
664struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr); 678struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
679 bdaddr_t *bdaddr);
665int hci_blacklist_clear(struct hci_dev *hdev); 680int hci_blacklist_clear(struct hci_dev *hdev);
666int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); 681int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
667int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); 682int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 1c7d1cd5e67..d80e3f0691b 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -40,11 +40,11 @@
40#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */ 40#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */
41#define L2CAP_DEFAULT_MAX_PDU_SIZE 1009 /* Sized for 3-DH5 packet */ 41#define L2CAP_DEFAULT_MAX_PDU_SIZE 1009 /* Sized for 3-DH5 packet */
42#define L2CAP_DEFAULT_ACK_TO 200 42#define L2CAP_DEFAULT_ACK_TO 200
43#define L2CAP_LE_DEFAULT_MTU 23
44#define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF 43#define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF
45#define L2CAP_DEFAULT_SDU_ITIME 0xFFFFFFFF 44#define L2CAP_DEFAULT_SDU_ITIME 0xFFFFFFFF
46#define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF 45#define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF
47#define L2CAP_BREDR_MAX_PAYLOAD 1019 /* 3-DH5 packet */ 46#define L2CAP_BREDR_MAX_PAYLOAD 1019 /* 3-DH5 packet */
47#define L2CAP_LE_MIN_MTU 23
48 48
49#define L2CAP_DISC_TIMEOUT msecs_to_jiffies(100) 49#define L2CAP_DISC_TIMEOUT msecs_to_jiffies(100)
50#define L2CAP_DISC_REJ_TIMEOUT msecs_to_jiffies(5000) 50#define L2CAP_DISC_REJ_TIMEOUT msecs_to_jiffies(5000)
@@ -52,6 +52,8 @@
52#define L2CAP_CONN_TIMEOUT msecs_to_jiffies(40000) 52#define L2CAP_CONN_TIMEOUT msecs_to_jiffies(40000)
53#define L2CAP_INFO_TIMEOUT msecs_to_jiffies(4000) 53#define L2CAP_INFO_TIMEOUT msecs_to_jiffies(4000)
54 54
55#define L2CAP_A2MP_DEFAULT_MTU 670
56
55/* L2CAP socket address */ 57/* L2CAP socket address */
56struct sockaddr_l2 { 58struct sockaddr_l2 {
57 sa_family_t l2_family; 59 sa_family_t l2_family;
@@ -229,9 +231,14 @@ struct l2cap_conn_rsp {
229 __le16 status; 231 __le16 status;
230} __packed; 232} __packed;
231 233
234/* protocol/service multiplexer (PSM) */
235#define L2CAP_PSM_SDP 0x0001
236#define L2CAP_PSM_RFCOMM 0x0003
237
232/* channel indentifier */ 238/* channel indentifier */
233#define L2CAP_CID_SIGNALING 0x0001 239#define L2CAP_CID_SIGNALING 0x0001
234#define L2CAP_CID_CONN_LESS 0x0002 240#define L2CAP_CID_CONN_LESS 0x0002
241#define L2CAP_CID_A2MP 0x0003
235#define L2CAP_CID_LE_DATA 0x0004 242#define L2CAP_CID_LE_DATA 0x0004
236#define L2CAP_CID_LE_SIGNALING 0x0005 243#define L2CAP_CID_LE_SIGNALING 0x0005
237#define L2CAP_CID_SMP 0x0006 244#define L2CAP_CID_SMP 0x0006
@@ -271,6 +278,9 @@ struct l2cap_conf_rsp {
271#define L2CAP_CONF_PENDING 0x0004 278#define L2CAP_CONF_PENDING 0x0004
272#define L2CAP_CONF_EFS_REJECT 0x0005 279#define L2CAP_CONF_EFS_REJECT 0x0005
273 280
281/* configuration req/rsp continuation flag */
282#define L2CAP_CONF_FLAG_CONTINUATION 0x0001
283
274struct l2cap_conf_opt { 284struct l2cap_conf_opt {
275 __u8 type; 285 __u8 type;
276 __u8 len; 286 __u8 len;
@@ -419,11 +429,6 @@ struct l2cap_seq_list {
419#define L2CAP_SEQ_LIST_CLEAR 0xFFFF 429#define L2CAP_SEQ_LIST_CLEAR 0xFFFF
420#define L2CAP_SEQ_LIST_TAIL 0x8000 430#define L2CAP_SEQ_LIST_TAIL 0x8000
421 431
422struct srej_list {
423 __u16 tx_seq;
424 struct list_head list;
425};
426
427struct l2cap_chan { 432struct l2cap_chan {
428 struct sock *sk; 433 struct sock *sk;
429 434
@@ -475,14 +480,12 @@ struct l2cap_chan {
475 __u16 expected_ack_seq; 480 __u16 expected_ack_seq;
476 __u16 expected_tx_seq; 481 __u16 expected_tx_seq;
477 __u16 buffer_seq; 482 __u16 buffer_seq;
478 __u16 buffer_seq_srej;
479 __u16 srej_save_reqseq; 483 __u16 srej_save_reqseq;
480 __u16 last_acked_seq; 484 __u16 last_acked_seq;
481 __u16 frames_sent; 485 __u16 frames_sent;
482 __u16 unacked_frames; 486 __u16 unacked_frames;
483 __u8 retry_count; 487 __u8 retry_count;
484 __u16 srej_queue_next; 488 __u16 srej_queue_next;
485 __u8 num_acked;
486 __u16 sdu_len; 489 __u16 sdu_len;
487 struct sk_buff *sdu; 490 struct sk_buff *sdu;
488 struct sk_buff *sdu_last_frag; 491 struct sk_buff *sdu_last_frag;
@@ -515,7 +518,6 @@ struct l2cap_chan {
515 struct sk_buff_head srej_q; 518 struct sk_buff_head srej_q;
516 struct l2cap_seq_list srej_list; 519 struct l2cap_seq_list srej_list;
517 struct l2cap_seq_list retrans_list; 520 struct l2cap_seq_list retrans_list;
518 struct list_head srej_l;
519 521
520 struct list_head list; 522 struct list_head list;
521 struct list_head global_l; 523 struct list_head global_l;
@@ -528,10 +530,14 @@ struct l2cap_chan {
528struct l2cap_ops { 530struct l2cap_ops {
529 char *name; 531 char *name;
530 532
531 struct l2cap_chan *(*new_connection) (void *data); 533 struct l2cap_chan *(*new_connection) (struct l2cap_chan *chan);
532 int (*recv) (void *data, struct sk_buff *skb); 534 int (*recv) (struct l2cap_chan * chan,
533 void (*close) (void *data); 535 struct sk_buff *skb);
534 void (*state_change) (void *data, int state); 536 void (*teardown) (struct l2cap_chan *chan, int err);
537 void (*close) (struct l2cap_chan *chan);
538 void (*state_change) (struct l2cap_chan *chan,
539 int state);
540 void (*ready) (struct l2cap_chan *chan);
535 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan, 541 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
536 unsigned long len, int nb); 542 unsigned long len, int nb);
537}; 543};
@@ -575,6 +581,7 @@ struct l2cap_conn {
575#define L2CAP_CHAN_RAW 1 581#define L2CAP_CHAN_RAW 1
576#define L2CAP_CHAN_CONN_LESS 2 582#define L2CAP_CHAN_CONN_LESS 2
577#define L2CAP_CHAN_CONN_ORIENTED 3 583#define L2CAP_CHAN_CONN_ORIENTED 3
584#define L2CAP_CHAN_CONN_FIX_A2MP 4
578 585
579/* ----- L2CAP socket info ----- */ 586/* ----- L2CAP socket info ----- */
580#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk) 587#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
@@ -597,6 +604,7 @@ enum {
597 CONF_EWS_RECV, 604 CONF_EWS_RECV,
598 CONF_LOC_CONF_PEND, 605 CONF_LOC_CONF_PEND,
599 CONF_REM_CONF_PEND, 606 CONF_REM_CONF_PEND,
607 CONF_NOT_COMPLETE,
600}; 608};
601 609
602#define L2CAP_CONF_MAX_CONF_REQ 2 610#define L2CAP_CONF_MAX_CONF_REQ 2
@@ -713,11 +721,7 @@ static inline bool l2cap_clear_timer(struct l2cap_chan *chan,
713 721
714#define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t)) 722#define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t))
715#define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer) 723#define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer)
716#define __set_retrans_timer(c) l2cap_set_timer(c, &c->retrans_timer, \
717 msecs_to_jiffies(L2CAP_DEFAULT_RETRANS_TO));
718#define __clear_retrans_timer(c) l2cap_clear_timer(c, &c->retrans_timer) 724#define __clear_retrans_timer(c) l2cap_clear_timer(c, &c->retrans_timer)
719#define __set_monitor_timer(c) l2cap_set_timer(c, &c->monitor_timer, \
720 msecs_to_jiffies(L2CAP_DEFAULT_MONITOR_TO));
721#define __clear_monitor_timer(c) l2cap_clear_timer(c, &c->monitor_timer) 725#define __clear_monitor_timer(c) l2cap_clear_timer(c, &c->monitor_timer)
722#define __set_ack_timer(c) l2cap_set_timer(c, &chan->ack_timer, \ 726#define __set_ack_timer(c) l2cap_set_timer(c, &chan->ack_timer, \
723 msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO)); 727 msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
@@ -736,173 +740,17 @@ static inline __u16 __next_seq(struct l2cap_chan *chan, __u16 seq)
736 return (seq + 1) % (chan->tx_win_max + 1); 740 return (seq + 1) % (chan->tx_win_max + 1);
737} 741}
738 742
739static inline int l2cap_tx_window_full(struct l2cap_chan *ch) 743static inline struct l2cap_chan *l2cap_chan_no_new_connection(struct l2cap_chan *chan)
740{
741 int sub;
742
743 sub = (ch->next_tx_seq - ch->expected_ack_seq) % 64;
744
745 if (sub < 0)
746 sub += 64;
747
748 return sub == ch->remote_tx_win;
749}
750
751static inline __u16 __get_reqseq(struct l2cap_chan *chan, __u32 ctrl)
752{
753 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
754 return (ctrl & L2CAP_EXT_CTRL_REQSEQ) >>
755 L2CAP_EXT_CTRL_REQSEQ_SHIFT;
756 else
757 return (ctrl & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
758}
759
760static inline __u32 __set_reqseq(struct l2cap_chan *chan, __u32 reqseq)
761{ 744{
762 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 745 return NULL;
763 return (reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) &
764 L2CAP_EXT_CTRL_REQSEQ;
765 else
766 return (reqseq << L2CAP_CTRL_REQSEQ_SHIFT) & L2CAP_CTRL_REQSEQ;
767} 746}
768 747
769static inline __u16 __get_txseq(struct l2cap_chan *chan, __u32 ctrl) 748static inline void l2cap_chan_no_teardown(struct l2cap_chan *chan, int err)
770{ 749{
771 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
772 return (ctrl & L2CAP_EXT_CTRL_TXSEQ) >>
773 L2CAP_EXT_CTRL_TXSEQ_SHIFT;
774 else
775 return (ctrl & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
776} 750}
777 751
778static inline __u32 __set_txseq(struct l2cap_chan *chan, __u32 txseq) 752static inline void l2cap_chan_no_ready(struct l2cap_chan *chan)
779{ 753{
780 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
781 return (txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) &
782 L2CAP_EXT_CTRL_TXSEQ;
783 else
784 return (txseq << L2CAP_CTRL_TXSEQ_SHIFT) & L2CAP_CTRL_TXSEQ;
785}
786
787static inline bool __is_sframe(struct l2cap_chan *chan, __u32 ctrl)
788{
789 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
790 return ctrl & L2CAP_EXT_CTRL_FRAME_TYPE;
791 else
792 return ctrl & L2CAP_CTRL_FRAME_TYPE;
793}
794
795static inline __u32 __set_sframe(struct l2cap_chan *chan)
796{
797 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
798 return L2CAP_EXT_CTRL_FRAME_TYPE;
799 else
800 return L2CAP_CTRL_FRAME_TYPE;
801}
802
803static inline __u8 __get_ctrl_sar(struct l2cap_chan *chan, __u32 ctrl)
804{
805 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
806 return (ctrl & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
807 else
808 return (ctrl & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
809}
810
811static inline __u32 __set_ctrl_sar(struct l2cap_chan *chan, __u32 sar)
812{
813 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
814 return (sar << L2CAP_EXT_CTRL_SAR_SHIFT) & L2CAP_EXT_CTRL_SAR;
815 else
816 return (sar << L2CAP_CTRL_SAR_SHIFT) & L2CAP_CTRL_SAR;
817}
818
819static inline bool __is_sar_start(struct l2cap_chan *chan, __u32 ctrl)
820{
821 return __get_ctrl_sar(chan, ctrl) == L2CAP_SAR_START;
822}
823
824static inline __u32 __get_sar_mask(struct l2cap_chan *chan)
825{
826 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
827 return L2CAP_EXT_CTRL_SAR;
828 else
829 return L2CAP_CTRL_SAR;
830}
831
832static inline __u8 __get_ctrl_super(struct l2cap_chan *chan, __u32 ctrl)
833{
834 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
835 return (ctrl & L2CAP_EXT_CTRL_SUPERVISE) >>
836 L2CAP_EXT_CTRL_SUPER_SHIFT;
837 else
838 return (ctrl & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
839}
840
841static inline __u32 __set_ctrl_super(struct l2cap_chan *chan, __u32 super)
842{
843 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
844 return (super << L2CAP_EXT_CTRL_SUPER_SHIFT) &
845 L2CAP_EXT_CTRL_SUPERVISE;
846 else
847 return (super << L2CAP_CTRL_SUPER_SHIFT) &
848 L2CAP_CTRL_SUPERVISE;
849}
850
851static inline __u32 __set_ctrl_final(struct l2cap_chan *chan)
852{
853 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
854 return L2CAP_EXT_CTRL_FINAL;
855 else
856 return L2CAP_CTRL_FINAL;
857}
858
859static inline bool __is_ctrl_final(struct l2cap_chan *chan, __u32 ctrl)
860{
861 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
862 return ctrl & L2CAP_EXT_CTRL_FINAL;
863 else
864 return ctrl & L2CAP_CTRL_FINAL;
865}
866
867static inline __u32 __set_ctrl_poll(struct l2cap_chan *chan)
868{
869 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
870 return L2CAP_EXT_CTRL_POLL;
871 else
872 return L2CAP_CTRL_POLL;
873}
874
875static inline bool __is_ctrl_poll(struct l2cap_chan *chan, __u32 ctrl)
876{
877 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
878 return ctrl & L2CAP_EXT_CTRL_POLL;
879 else
880 return ctrl & L2CAP_CTRL_POLL;
881}
882
883static inline __u32 __get_control(struct l2cap_chan *chan, void *p)
884{
885 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
886 return get_unaligned_le32(p);
887 else
888 return get_unaligned_le16(p);
889}
890
891static inline void __put_control(struct l2cap_chan *chan, __u32 control,
892 void *p)
893{
894 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
895 return put_unaligned_le32(control, p);
896 else
897 return put_unaligned_le16(control, p);
898}
899
900static inline __u8 __ctrl_size(struct l2cap_chan *chan)
901{
902 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
903 return L2CAP_EXT_HDR_SIZE - L2CAP_HDR_SIZE;
904 else
905 return L2CAP_ENH_HDR_SIZE - L2CAP_HDR_SIZE;
906} 754}
907 755
908extern bool disable_ertm; 756extern bool disable_ertm;
@@ -926,5 +774,8 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
926void l2cap_chan_busy(struct l2cap_chan *chan, int busy); 774void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
927int l2cap_chan_check_security(struct l2cap_chan *chan); 775int l2cap_chan_check_security(struct l2cap_chan *chan);
928void l2cap_chan_set_defaults(struct l2cap_chan *chan); 776void l2cap_chan_set_defaults(struct l2cap_chan *chan);
777int l2cap_ertm_init(struct l2cap_chan *chan);
778void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
779void l2cap_chan_del(struct l2cap_chan *chan, int err);
929 780
930#endif /* __L2CAP_H */ 781#endif /* __L2CAP_H */
diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
index 439dadc8102..bcb9cc3ce98 100644
--- a/include/net/caif/caif_hsi.h
+++ b/include/net/caif/caif_hsi.h
@@ -93,25 +93,25 @@ struct cfhsi_desc {
93#endif 93#endif
94 94
95/* Structure implemented by the CAIF HSI driver. */ 95/* Structure implemented by the CAIF HSI driver. */
96struct cfhsi_drv { 96struct cfhsi_cb_ops {
97 void (*tx_done_cb) (struct cfhsi_drv *drv); 97 void (*tx_done_cb) (struct cfhsi_cb_ops *drv);
98 void (*rx_done_cb) (struct cfhsi_drv *drv); 98 void (*rx_done_cb) (struct cfhsi_cb_ops *drv);
99 void (*wake_up_cb) (struct cfhsi_drv *drv); 99 void (*wake_up_cb) (struct cfhsi_cb_ops *drv);
100 void (*wake_down_cb) (struct cfhsi_drv *drv); 100 void (*wake_down_cb) (struct cfhsi_cb_ops *drv);
101}; 101};
102 102
103/* Structure implemented by HSI device. */ 103/* Structure implemented by HSI device. */
104struct cfhsi_dev { 104struct cfhsi_ops {
105 int (*cfhsi_up) (struct cfhsi_dev *dev); 105 int (*cfhsi_up) (struct cfhsi_ops *dev);
106 int (*cfhsi_down) (struct cfhsi_dev *dev); 106 int (*cfhsi_down) (struct cfhsi_ops *dev);
107 int (*cfhsi_tx) (u8 *ptr, int len, struct cfhsi_dev *dev); 107 int (*cfhsi_tx) (u8 *ptr, int len, struct cfhsi_ops *dev);
108 int (*cfhsi_rx) (u8 *ptr, int len, struct cfhsi_dev *dev); 108 int (*cfhsi_rx) (u8 *ptr, int len, struct cfhsi_ops *dev);
109 int (*cfhsi_wake_up) (struct cfhsi_dev *dev); 109 int (*cfhsi_wake_up) (struct cfhsi_ops *dev);
110 int (*cfhsi_wake_down) (struct cfhsi_dev *dev); 110 int (*cfhsi_wake_down) (struct cfhsi_ops *dev);
111 int (*cfhsi_get_peer_wake) (struct cfhsi_dev *dev, bool *status); 111 int (*cfhsi_get_peer_wake) (struct cfhsi_ops *dev, bool *status);
112 int (*cfhsi_fifo_occupancy)(struct cfhsi_dev *dev, size_t *occupancy); 112 int (*cfhsi_fifo_occupancy) (struct cfhsi_ops *dev, size_t *occupancy);
113 int (*cfhsi_rx_cancel)(struct cfhsi_dev *dev); 113 int (*cfhsi_rx_cancel)(struct cfhsi_ops *dev);
114 struct cfhsi_drv *drv; 114 struct cfhsi_cb_ops *cb_ops;
115}; 115};
116 116
117/* Structure holds status of received CAIF frames processing */ 117/* Structure holds status of received CAIF frames processing */
@@ -132,17 +132,26 @@ enum {
132 CFHSI_PRIO_LAST, 132 CFHSI_PRIO_LAST,
133}; 133};
134 134
135struct cfhsi_config {
136 u32 inactivity_timeout;
137 u32 aggregation_timeout;
138 u32 head_align;
139 u32 tail_align;
140 u32 q_high_mark;
141 u32 q_low_mark;
142};
143
135/* Structure implemented by CAIF HSI drivers. */ 144/* Structure implemented by CAIF HSI drivers. */
136struct cfhsi { 145struct cfhsi {
137 struct caif_dev_common cfdev; 146 struct caif_dev_common cfdev;
138 struct net_device *ndev; 147 struct net_device *ndev;
139 struct platform_device *pdev; 148 struct platform_device *pdev;
140 struct sk_buff_head qhead[CFHSI_PRIO_LAST]; 149 struct sk_buff_head qhead[CFHSI_PRIO_LAST];
141 struct cfhsi_drv drv; 150 struct cfhsi_cb_ops cb_ops;
142 struct cfhsi_dev *dev; 151 struct cfhsi_ops *ops;
143 int tx_state; 152 int tx_state;
144 struct cfhsi_rx_state rx_state; 153 struct cfhsi_rx_state rx_state;
145 unsigned long inactivity_timeout; 154 struct cfhsi_config cfg;
146 int rx_len; 155 int rx_len;
147 u8 *rx_ptr; 156 u8 *rx_ptr;
148 u8 *tx_buf; 157 u8 *tx_buf;
@@ -150,8 +159,6 @@ struct cfhsi {
150 u8 *rx_flip_buf; 159 u8 *rx_flip_buf;
151 spinlock_t lock; 160 spinlock_t lock;
152 int flow_off_sent; 161 int flow_off_sent;
153 u32 q_low_mark;
154 u32 q_high_mark;
155 struct list_head list; 162 struct list_head list;
156 struct work_struct wake_up_work; 163 struct work_struct wake_up_work;
157 struct work_struct wake_down_work; 164 struct work_struct wake_down_work;
@@ -164,13 +171,31 @@ struct cfhsi {
164 struct timer_list rx_slowpath_timer; 171 struct timer_list rx_slowpath_timer;
165 172
166 /* TX aggregation */ 173 /* TX aggregation */
167 unsigned long aggregation_timeout;
168 int aggregation_len; 174 int aggregation_len;
169 struct timer_list aggregation_timer; 175 struct timer_list aggregation_timer;
170 176
171 unsigned long bits; 177 unsigned long bits;
172}; 178};
173
174extern struct platform_driver cfhsi_driver; 179extern struct platform_driver cfhsi_driver;
175 180
181/**
182 * enum ifla_caif_hsi - CAIF HSI NetlinkRT parameters.
183 * @IFLA_CAIF_HSI_INACTIVITY_TOUT: Inactivity timeout before
184 * taking the HSI wakeline down, in milliseconds.
185 * When using RT Netlink to create, destroy or configure a CAIF HSI interface,
186 * enum ifla_caif_hsi is used to specify the configuration attributes.
187 */
188enum ifla_caif_hsi {
189 __IFLA_CAIF_HSI_UNSPEC,
190 __IFLA_CAIF_HSI_INACTIVITY_TOUT,
191 __IFLA_CAIF_HSI_AGGREGATION_TOUT,
192 __IFLA_CAIF_HSI_HEAD_ALIGN,
193 __IFLA_CAIF_HSI_TAIL_ALIGN,
194 __IFLA_CAIF_HSI_QHIGH_WATERMARK,
195 __IFLA_CAIF_HSI_QLOW_WATERMARK,
196 __IFLA_CAIF_HSI_MAX
197};
198
199extern struct cfhsi_ops *cfhsi_get_ops(void);
200
176#endif /* CAIF_HSI_H_ */ 201#endif /* CAIF_HSI_H_ */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 0289d4ce707..061c01957e5 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -404,6 +404,8 @@ struct cfg80211_beacon_data {
404 * 404 *
405 * Used to configure an AP interface. 405 * Used to configure an AP interface.
406 * 406 *
407 * @channel: the channel to start the AP on
408 * @channel_type: the channel type to use
407 * @beacon: beacon data 409 * @beacon: beacon data
408 * @beacon_interval: beacon interval 410 * @beacon_interval: beacon interval
409 * @dtim_period: DTIM period 411 * @dtim_period: DTIM period
@@ -417,6 +419,9 @@ struct cfg80211_beacon_data {
417 * @inactivity_timeout: time in seconds to determine station's inactivity. 419 * @inactivity_timeout: time in seconds to determine station's inactivity.
418 */ 420 */
419struct cfg80211_ap_settings { 421struct cfg80211_ap_settings {
422 struct ieee80211_channel *channel;
423 enum nl80211_channel_type channel_type;
424
420 struct cfg80211_beacon_data beacon; 425 struct cfg80211_beacon_data beacon;
421 426
422 int beacon_interval, dtim_period; 427 int beacon_interval, dtim_period;
@@ -622,10 +627,10 @@ struct sta_bss_parameters {
622 * @llid: mesh local link id 627 * @llid: mesh local link id
623 * @plid: mesh peer link id 628 * @plid: mesh peer link id
624 * @plink_state: mesh peer link state 629 * @plink_state: mesh peer link state
625 * @signal: the signal strength, type depends on the wiphy's signal_type 630 * @signal: The signal strength, type depends on the wiphy's signal_type.
626 NOTE: For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_. 631 * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
627 * @signal_avg: avg signal strength, type depends on the wiphy's signal_type 632 * @signal_avg: Average signal strength, type depends on the wiphy's signal_type.
628 NOTE: For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_. 633 * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
629 * @txrate: current unicast bitrate from this station 634 * @txrate: current unicast bitrate from this station
630 * @rxrate: current unicast bitrate to this station 635 * @rxrate: current unicast bitrate to this station
631 * @rx_packets: packets received from this station 636 * @rx_packets: packets received from this station
@@ -785,47 +790,101 @@ struct bss_parameters {
785 int ht_opmode; 790 int ht_opmode;
786}; 791};
787 792
788/* 793/**
789 * struct mesh_config - 802.11s mesh configuration 794 * struct mesh_config - 802.11s mesh configuration
790 * 795 *
791 * These parameters can be changed while the mesh is active. 796 * These parameters can be changed while the mesh is active.
797 *
798 * @dot11MeshRetryTimeout: the initial retry timeout in millisecond units used
799 * by the Mesh Peering Open message
800 * @dot11MeshConfirmTimeout: the initial retry timeout in millisecond units
801 * used by the Mesh Peering Open message
802 * @dot11MeshHoldingTimeout: the confirm timeout in millisecond units used by
803 * the mesh peering management to close a mesh peering
804 * @dot11MeshMaxPeerLinks: the maximum number of peer links allowed on this
805 * mesh interface
806 * @dot11MeshMaxRetries: the maximum number of peer link open retries that can
807 * be sent to establish a new peer link instance in a mesh
808 * @dot11MeshTTL: the value of TTL field set at a source mesh STA
809 * @element_ttl: the value of TTL field set at a mesh STA for path selection
810 * elements
811 * @auto_open_plinks: whether we should automatically open peer links when we
812 * detect compatible mesh peers
813 * @dot11MeshNbrOffsetMaxNeighbor: the maximum number of neighbors to
814 * synchronize to for 11s default synchronization method
815 * @dot11MeshHWMPmaxPREQretries: the number of action frames containing a PREQ
816 * that an originator mesh STA can send to a particular path target
817 * @path_refresh_time: how frequently to refresh mesh paths in milliseconds
818 * @min_discovery_timeout: the minimum length of time to wait until giving up on
819 * a path discovery in milliseconds
820 * @dot11MeshHWMPactivePathTimeout: the time (in TUs) for which mesh STAs
821 * receiving a PREQ shall consider the forwarding information from the
822 * root to be valid. (TU = time unit)
823 * @dot11MeshHWMPpreqMinInterval: the minimum interval of time (in TUs) during
824 * which a mesh STA can send only one action frame containing a PREQ
825 * element
826 * @dot11MeshHWMPperrMinInterval: the minimum interval of time (in TUs) during
827 * which a mesh STA can send only one Action frame containing a PERR
828 * element
829 * @dot11MeshHWMPnetDiameterTraversalTime: the interval of time (in TUs) that
830 * it takes for an HWMP information element to propagate across the mesh
831 * @dot11MeshHWMPRootMode: the configuration of a mesh STA as root mesh STA
832 * @dot11MeshHWMPRannInterval: the interval of time (in TUs) between root
833 * announcements are transmitted
834 * @dot11MeshGateAnnouncementProtocol: whether to advertise that this mesh
835 * station has access to a broader network beyond the MBSS. (This is
836 * missnamed in draft 12.0: dot11MeshGateAnnouncementProtocol set to true
837 * only means that the station will announce others it's a mesh gate, but
838 * not necessarily using the gate announcement protocol. Still keeping the
839 * same nomenclature to be in sync with the spec)
840 * @dot11MeshForwarding: whether the Mesh STA is forwarding or non-forwarding
841 * entity (default is TRUE - forwarding entity)
842 * @rssi_threshold: the threshold for average signal strength of candidate
843 * station to establish a peer link
844 * @ht_opmode: mesh HT protection mode
845 *
846 * @dot11MeshHWMPactivePathToRootTimeout: The time (in TUs) for which mesh STAs
847 * receiving a proactive PREQ shall consider the forwarding information to
848 * the root mesh STA to be valid.
849 *
850 * @dot11MeshHWMProotInterval: The interval of time (in TUs) between proactive
851 * PREQs are transmitted.
852 * @dot11MeshHWMPconfirmationInterval: The minimum interval of time (in TUs)
853 * during which a mesh STA can send only one Action frame containing
854 * a PREQ element for root path confirmation.
792 */ 855 */
793struct mesh_config { 856struct mesh_config {
794 /* Timeouts in ms */
795 /* Mesh plink management parameters */
796 u16 dot11MeshRetryTimeout; 857 u16 dot11MeshRetryTimeout;
797 u16 dot11MeshConfirmTimeout; 858 u16 dot11MeshConfirmTimeout;
798 u16 dot11MeshHoldingTimeout; 859 u16 dot11MeshHoldingTimeout;
799 u16 dot11MeshMaxPeerLinks; 860 u16 dot11MeshMaxPeerLinks;
800 u8 dot11MeshMaxRetries; 861 u8 dot11MeshMaxRetries;
801 u8 dot11MeshTTL; 862 u8 dot11MeshTTL;
802 /* ttl used in path selection information elements */ 863 u8 element_ttl;
803 u8 element_ttl;
804 bool auto_open_plinks; 864 bool auto_open_plinks;
805 /* neighbor offset synchronization */
806 u32 dot11MeshNbrOffsetMaxNeighbor; 865 u32 dot11MeshNbrOffsetMaxNeighbor;
807 /* HWMP parameters */ 866 u8 dot11MeshHWMPmaxPREQretries;
808 u8 dot11MeshHWMPmaxPREQretries;
809 u32 path_refresh_time; 867 u32 path_refresh_time;
810 u16 min_discovery_timeout; 868 u16 min_discovery_timeout;
811 u32 dot11MeshHWMPactivePathTimeout; 869 u32 dot11MeshHWMPactivePathTimeout;
812 u16 dot11MeshHWMPpreqMinInterval; 870 u16 dot11MeshHWMPpreqMinInterval;
813 u16 dot11MeshHWMPperrMinInterval; 871 u16 dot11MeshHWMPperrMinInterval;
814 u16 dot11MeshHWMPnetDiameterTraversalTime; 872 u16 dot11MeshHWMPnetDiameterTraversalTime;
815 u8 dot11MeshHWMPRootMode; 873 u8 dot11MeshHWMPRootMode;
816 u16 dot11MeshHWMPRannInterval; 874 u16 dot11MeshHWMPRannInterval;
817 /* This is missnamed in draft 12.0: dot11MeshGateAnnouncementProtocol 875 bool dot11MeshGateAnnouncementProtocol;
818 * set to true only means that the station will announce others it's a
819 * mesh gate, but not necessarily using the gate announcement protocol.
820 * Still keeping the same nomenclature to be in sync with the spec. */
821 bool dot11MeshGateAnnouncementProtocol;
822 bool dot11MeshForwarding; 876 bool dot11MeshForwarding;
823 s32 rssi_threshold; 877 s32 rssi_threshold;
824 u16 ht_opmode; 878 u16 ht_opmode;
879 u32 dot11MeshHWMPactivePathToRootTimeout;
880 u16 dot11MeshHWMProotInterval;
881 u16 dot11MeshHWMPconfirmationInterval;
825}; 882};
826 883
827/** 884/**
828 * struct mesh_setup - 802.11s mesh setup configuration 885 * struct mesh_setup - 802.11s mesh setup configuration
886 * @channel: the channel to start the mesh network on
887 * @channel_type: the channel type to use
829 * @mesh_id: the mesh ID 888 * @mesh_id: the mesh ID
830 * @mesh_id_len: length of the mesh ID, at least 1 and at most 32 bytes 889 * @mesh_id_len: length of the mesh ID, at least 1 and at most 32 bytes
831 * @sync_method: which synchronization method to use 890 * @sync_method: which synchronization method to use
@@ -840,6 +899,8 @@ struct mesh_config {
840 * These parameters are fixed when the mesh is created. 899 * These parameters are fixed when the mesh is created.
841 */ 900 */
842struct mesh_setup { 901struct mesh_setup {
902 struct ieee80211_channel *channel;
903 enum nl80211_channel_type channel_type;
843 const u8 *mesh_id; 904 const u8 *mesh_id;
844 u8 mesh_id_len; 905 u8 mesh_id_len;
845 u8 sync_method; 906 u8 sync_method;
@@ -966,6 +1027,7 @@ struct cfg80211_match_set {
966 * @wiphy: the wiphy this was for 1027 * @wiphy: the wiphy this was for
967 * @dev: the interface 1028 * @dev: the interface
968 * @channels: channels to scan 1029 * @channels: channels to scan
1030 * @rssi_thold: don't report scan results below this threshold (in s32 dBm)
969 */ 1031 */
970struct cfg80211_sched_scan_request { 1032struct cfg80211_sched_scan_request {
971 struct cfg80211_ssid *ssids; 1033 struct cfg80211_ssid *ssids;
@@ -976,6 +1038,7 @@ struct cfg80211_sched_scan_request {
976 size_t ie_len; 1038 size_t ie_len;
977 struct cfg80211_match_set *match_sets; 1039 struct cfg80211_match_set *match_sets;
978 int n_match_sets; 1040 int n_match_sets;
1041 s32 rssi_thold;
979 1042
980 /* internal */ 1043 /* internal */
981 struct wiphy *wiphy; 1044 struct wiphy *wiphy;
@@ -1411,11 +1474,14 @@ struct cfg80211_gtk_rekey_data {
1411 * 1474 *
1412 * @set_txq_params: Set TX queue parameters 1475 * @set_txq_params: Set TX queue parameters
1413 * 1476 *
1414 * @set_channel: Set channel for a given wireless interface. Some devices 1477 * @libertas_set_mesh_channel: Only for backward compatibility for libertas,
1415 * may support multi-channel operation (by channel hopping) so cfg80211 1478 * as it doesn't implement join_mesh and needs to set the channel to
1416 * doesn't verify much. Note, however, that the passed netdev may be 1479 * join the mesh instead.
1417 * %NULL as well if the user requested changing the channel for the 1480 *
1418 * device itself, or for a monitor interface. 1481 * @set_monitor_channel: Set the monitor mode channel for the device. If other
1482 * interfaces are active this callback should reject the configuration.
1483 * If no interfaces are active or the device is down, the channel should
1484 * be stored for when a monitor interface becomes active.
1419 * @get_channel: Get the current operating channel, should return %NULL if 1485 * @get_channel: Get the current operating channel, should return %NULL if
1420 * there's no single defined operating channel if for example the 1486 * there's no single defined operating channel if for example the
1421 * device implements channel hopping for multi-channel virtual interfaces. 1487 * device implements channel hopping for multi-channel virtual interfaces.
@@ -1605,9 +1671,13 @@ struct cfg80211_ops {
1605 int (*set_txq_params)(struct wiphy *wiphy, struct net_device *dev, 1671 int (*set_txq_params)(struct wiphy *wiphy, struct net_device *dev,
1606 struct ieee80211_txq_params *params); 1672 struct ieee80211_txq_params *params);
1607 1673
1608 int (*set_channel)(struct wiphy *wiphy, struct net_device *dev, 1674 int (*libertas_set_mesh_channel)(struct wiphy *wiphy,
1609 struct ieee80211_channel *chan, 1675 struct net_device *dev,
1610 enum nl80211_channel_type channel_type); 1676 struct ieee80211_channel *chan);
1677
1678 int (*set_monitor_channel)(struct wiphy *wiphy,
1679 struct ieee80211_channel *chan,
1680 enum nl80211_channel_type channel_type);
1611 1681
1612 int (*scan)(struct wiphy *wiphy, struct net_device *dev, 1682 int (*scan)(struct wiphy *wiphy, struct net_device *dev,
1613 struct cfg80211_scan_request *request); 1683 struct cfg80211_scan_request *request);
@@ -2263,7 +2333,10 @@ struct cfg80211_cached_keys;
2263 * @netdev: (private) Used to reference back to the netdev 2333 * @netdev: (private) Used to reference back to the netdev
2264 * @current_bss: (private) Used by the internal configuration code 2334 * @current_bss: (private) Used by the internal configuration code
2265 * @channel: (private) Used by the internal configuration code to track 2335 * @channel: (private) Used by the internal configuration code to track
2266 * user-set AP, monitor and WDS channels for wireless extensions 2336 * the user-set AP, monitor and WDS channel
2337 * @preset_chan: (private) Used by the internal configuration code to
2338 * track the channel to be used for AP later
2339 * @preset_chantype: (private) the corresponding channel type
2267 * @bssid: (private) Used by the internal configuration code 2340 * @bssid: (private) Used by the internal configuration code
2268 * @ssid: (private) Used by the internal configuration code 2341 * @ssid: (private) Used by the internal configuration code
2269 * @ssid_len: (private) Used by the internal configuration code 2342 * @ssid_len: (private) Used by the internal configuration code
@@ -2313,7 +2386,8 @@ struct wireless_dev {
2313 spinlock_t event_lock; 2386 spinlock_t event_lock;
2314 2387
2315 struct cfg80211_internal_bss *current_bss; /* associated / joined */ 2388 struct cfg80211_internal_bss *current_bss; /* associated / joined */
2316 struct ieee80211_channel *channel; 2389 struct ieee80211_channel *preset_chan;
2390 enum nl80211_channel_type preset_chantype;
2317 2391
2318 bool ps; 2392 bool ps;
2319 int ps_timeout; 2393 int ps_timeout;
@@ -3359,11 +3433,14 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
3359 const u8 *frame, size_t len, 3433 const u8 *frame, size_t len,
3360 int freq, int sig_dbm, gfp_t gfp); 3434 int freq, int sig_dbm, gfp_t gfp);
3361 3435
3362/* 3436/**
3363 * cfg80211_can_beacon_sec_chan - test if ht40 on extension channel can be used 3437 * cfg80211_can_beacon_sec_chan - test if ht40 on extension channel can be used
3364 * @wiphy: the wiphy 3438 * @wiphy: the wiphy
3365 * @chan: main channel 3439 * @chan: main channel
3366 * @channel_type: HT mode 3440 * @channel_type: HT mode
3441 *
3442 * This function returns true if there is no secondary channel or the secondary
3443 * channel can be used for beaconing (i.e. is not a radar channel etc.)
3367 */ 3444 */
3368bool cfg80211_can_beacon_sec_chan(struct wiphy *wiphy, 3445bool cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
3369 struct ieee80211_channel *chan, 3446 struct ieee80211_channel *chan,
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index c507e05d172..4f7d6a18238 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -67,6 +67,8 @@ extern void dn_rt_cache_flush(int delay);
67struct dn_route { 67struct dn_route {
68 struct dst_entry dst; 68 struct dst_entry dst;
69 69
70 struct neighbour *n;
71
70 struct flowidn fld; 72 struct flowidn fld;
71 73
72 __le16 rt_saddr; 74 __le16 rt_saddr;
diff --git a/include/net/dst.h b/include/net/dst.h
index 8197eadca81..51610468c63 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -42,16 +42,16 @@ struct dst_entry {
42 struct dst_entry *from; 42 struct dst_entry *from;
43 }; 43 };
44 struct dst_entry *path; 44 struct dst_entry *path;
45 struct neighbour __rcu *_neighbour; 45 void *__pad0;
46#ifdef CONFIG_XFRM 46#ifdef CONFIG_XFRM
47 struct xfrm_state *xfrm; 47 struct xfrm_state *xfrm;
48#else 48#else
49 void *__pad1; 49 void *__pad1;
50#endif 50#endif
51 int (*input)(struct sk_buff*); 51 int (*input)(struct sk_buff *);
52 int (*output)(struct sk_buff*); 52 int (*output)(struct sk_buff *);
53 53
54 int flags; 54 unsigned short flags;
55#define DST_HOST 0x0001 55#define DST_HOST 0x0001
56#define DST_NOXFRM 0x0002 56#define DST_NOXFRM 0x0002
57#define DST_NOPOLICY 0x0004 57#define DST_NOPOLICY 0x0004
@@ -62,6 +62,8 @@ struct dst_entry {
62#define DST_FAKE_RTABLE 0x0080 62#define DST_FAKE_RTABLE 0x0080
63#define DST_XFRM_TUNNEL 0x0100 63#define DST_XFRM_TUNNEL 0x0100
64 64
65 unsigned short pending_confirm;
66
65 short error; 67 short error;
66 short obsolete; 68 short obsolete;
67 unsigned short header_len; /* more space at head required */ 69 unsigned short header_len; /* more space at head required */
@@ -94,21 +96,6 @@ struct dst_entry {
94 }; 96 };
95}; 97};
96 98
97static inline struct neighbour *dst_get_neighbour_noref(struct dst_entry *dst)
98{
99 return rcu_dereference(dst->_neighbour);
100}
101
102static inline struct neighbour *dst_get_neighbour_noref_raw(struct dst_entry *dst)
103{
104 return rcu_dereference_raw(dst->_neighbour);
105}
106
107static inline void dst_set_neighbour(struct dst_entry *dst, struct neighbour *neigh)
108{
109 rcu_assign_pointer(dst->_neighbour, neigh);
110}
111
112extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); 99extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
113extern const u32 dst_default_metrics[RTAX_MAX]; 100extern const u32 dst_default_metrics[RTAX_MAX];
114 101
@@ -222,12 +209,6 @@ static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metr
222 return msecs_to_jiffies(dst_metric(dst, metric)); 209 return msecs_to_jiffies(dst_metric(dst, metric));
223} 210}
224 211
225static inline void set_dst_metric_rtt(struct dst_entry *dst, int metric,
226 unsigned long rtt)
227{
228 dst_metric_set(dst, metric, jiffies_to_msecs(rtt));
229}
230
231static inline u32 212static inline u32
232dst_allfrag(const struct dst_entry *dst) 213dst_allfrag(const struct dst_entry *dst)
233{ 214{
@@ -241,7 +222,7 @@ dst_metric_locked(const struct dst_entry *dst, int metric)
241 return dst_metric(dst, RTAX_LOCK) & (1<<metric); 222 return dst_metric(dst, RTAX_LOCK) & (1<<metric);
242} 223}
243 224
244static inline void dst_hold(struct dst_entry * dst) 225static inline void dst_hold(struct dst_entry *dst)
245{ 226{
246 /* 227 /*
247 * If your kernel compilation stops here, please check 228 * If your kernel compilation stops here, please check
@@ -264,8 +245,7 @@ static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
264 dst->lastuse = time; 245 dst->lastuse = time;
265} 246}
266 247
267static inline 248static inline struct dst_entry *dst_clone(struct dst_entry *dst)
268struct dst_entry * dst_clone(struct dst_entry * dst)
269{ 249{
270 if (dst) 250 if (dst)
271 atomic_inc(&dst->__refcnt); 251 atomic_inc(&dst->__refcnt);
@@ -371,12 +351,13 @@ static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
371} 351}
372 352
373extern int dst_discard(struct sk_buff *skb); 353extern int dst_discard(struct sk_buff *skb);
374extern void *dst_alloc(struct dst_ops * ops, struct net_device *dev, 354extern void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
375 int initial_ref, int initial_obsolete, int flags); 355 int initial_ref, int initial_obsolete,
376extern void __dst_free(struct dst_entry * dst); 356 unsigned short flags);
377extern struct dst_entry *dst_destroy(struct dst_entry * dst); 357extern void __dst_free(struct dst_entry *dst);
358extern struct dst_entry *dst_destroy(struct dst_entry *dst);
378 359
379static inline void dst_free(struct dst_entry * dst) 360static inline void dst_free(struct dst_entry *dst)
380{ 361{
381 if (dst->obsolete > 1) 362 if (dst->obsolete > 1)
382 return; 363 return;
@@ -396,19 +377,35 @@ static inline void dst_rcu_free(struct rcu_head *head)
396 377
397static inline void dst_confirm(struct dst_entry *dst) 378static inline void dst_confirm(struct dst_entry *dst)
398{ 379{
399 if (dst) { 380 dst->pending_confirm = 1;
400 struct neighbour *n; 381}
382
383static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
384 struct sk_buff *skb)
385{
386 struct hh_cache *hh;
401 387
402 rcu_read_lock(); 388 if (unlikely(dst->pending_confirm)) {
403 n = dst_get_neighbour_noref(dst); 389 n->confirmed = jiffies;
404 neigh_confirm(n); 390 dst->pending_confirm = 0;
405 rcu_read_unlock();
406 } 391 }
392
393 hh = &n->hh;
394 if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
395 return neigh_hh_output(hh, skb);
396 else
397 return n->output(n, skb);
407} 398}
408 399
409static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) 400static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
410{ 401{
411 return dst->ops->neigh_lookup(dst, daddr); 402 return dst->ops->neigh_lookup(dst, NULL, daddr);
403}
404
405static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
406 struct sk_buff *skb)
407{
408 return dst->ops->neigh_lookup(dst, skb, NULL);
412} 409}
413 410
414static inline void dst_link_failure(struct sk_buff *skb) 411static inline void dst_link_failure(struct sk_buff *skb)
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 3682a0a076c..4badc86e45d 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -26,7 +26,9 @@ struct dst_ops {
26 void (*link_failure)(struct sk_buff *); 26 void (*link_failure)(struct sk_buff *);
27 void (*update_pmtu)(struct dst_entry *dst, u32 mtu); 27 void (*update_pmtu)(struct dst_entry *dst, u32 mtu);
28 int (*local_out)(struct sk_buff *skb); 28 int (*local_out)(struct sk_buff *skb);
29 struct neighbour * (*neigh_lookup)(const struct dst_entry *dst, const void *daddr); 29 struct neighbour * (*neigh_lookup)(const struct dst_entry *dst,
30 struct sk_buff *skb,
31 const void *daddr);
30 32
31 struct kmem_cache *kmem_cachep; 33 struct kmem_cache *kmem_cachep;
32 34
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 075f1e3a0fe..e361f488242 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -52,6 +52,7 @@ struct fib_rules_ops {
52 struct sk_buff *, 52 struct sk_buff *,
53 struct fib_rule_hdr *, 53 struct fib_rule_hdr *,
54 struct nlattr **); 54 struct nlattr **);
55 void (*delete)(struct fib_rule *);
55 int (*compare)(struct fib_rule *, 56 int (*compare)(struct fib_rule *,
56 struct fib_rule_hdr *, 57 struct fib_rule_hdr *,
57 struct nlattr **); 58 struct nlattr **);
diff --git a/include/net/flow.h b/include/net/flow.h
index 6c469dbdb91..ce9cb7656b4 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -20,8 +20,8 @@ struct flowi_common {
20 __u8 flowic_proto; 20 __u8 flowic_proto;
21 __u8 flowic_flags; 21 __u8 flowic_flags;
22#define FLOWI_FLAG_ANYSRC 0x01 22#define FLOWI_FLAG_ANYSRC 0x01
23#define FLOWI_FLAG_PRECOW_METRICS 0x02 23#define FLOWI_FLAG_CAN_SLEEP 0x02
24#define FLOWI_FLAG_CAN_SLEEP 0x04 24#define FLOWI_FLAG_RT_NOCACHE 0x04
25 __u32 flowic_secid; 25 __u32 flowic_secid;
26}; 26};
27 27
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index ccb68880abf..48905cd3884 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -5,6 +5,8 @@
5#include <net/netlink.h> 5#include <net/netlink.h>
6#include <net/net_namespace.h> 6#include <net/net_namespace.h>
7 7
8#define GENLMSG_DEFAULT_SIZE (NLMSG_DEFAULT_SIZE - GENL_HDRLEN)
9
8/** 10/**
9 * struct genl_multicast_group - generic netlink multicast group 11 * struct genl_multicast_group - generic netlink multicast group
10 * @name: name of the multicast group, names are per-family 12 * @name: name of the multicast group, names are per-family
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 1866a676c81..df2a857e853 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -26,6 +26,7 @@ extern int inet6_csk_bind_conflict(const struct sock *sk,
26 const struct inet_bind_bucket *tb, bool relax); 26 const struct inet_bind_bucket *tb, bool relax);
27 27
28extern struct dst_entry* inet6_csk_route_req(struct sock *sk, 28extern struct dst_entry* inet6_csk_route_req(struct sock *sk,
29 struct flowi6 *fl6,
29 const struct request_sock *req); 30 const struct request_sock *req);
30 31
31extern struct request_sock *inet6_csk_search_req(const struct sock *sk, 32extern struct request_sock *inet6_csk_search_req(const struct sock *sk,
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 7d83f90f203..291e7cee14e 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -43,7 +43,6 @@ struct inet_connection_sock_af_ops {
43 struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb, 43 struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb,
44 struct request_sock *req, 44 struct request_sock *req,
45 struct dst_entry *dst); 45 struct dst_entry *dst);
46 struct inet_peer *(*get_peer)(struct sock *sk, bool *release_it);
47 u16 net_header_len; 46 u16 net_header_len;
48 u16 net_frag_header_len; 47 u16 net_frag_header_len;
49 u16 sockaddr_len; 48 u16 sockaddr_len;
@@ -251,7 +250,8 @@ extern int inet_csk_get_port(struct sock *sk, unsigned short snum);
251 250
252extern struct dst_entry* inet_csk_route_req(struct sock *sk, 251extern struct dst_entry* inet_csk_route_req(struct sock *sk,
253 struct flowi4 *fl4, 252 struct flowi4 *fl4,
254 const struct request_sock *req); 253 const struct request_sock *req,
254 bool nocache);
255extern struct dst_entry* inet_csk_route_child_sock(struct sock *sk, 255extern struct dst_entry* inet_csk_route_child_sock(struct sock *sk,
256 struct sock *newsk, 256 struct sock *newsk,
257 const struct request_sock *req); 257 const struct request_sock *req);
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 808fc5f76b0..54be0287eb9 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -379,10 +379,10 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
379 const __be16 sport, 379 const __be16 sport,
380 const __be16 dport) 380 const __be16 dport)
381{ 381{
382 struct sock *sk; 382 struct sock *sk = skb_steal_sock(skb);
383 const struct iphdr *iph = ip_hdr(skb); 383 const struct iphdr *iph = ip_hdr(skb);
384 384
385 if (unlikely(sk = skb_steal_sock(skb))) 385 if (sk)
386 return sk; 386 return sk;
387 else 387 else
388 return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, 388 return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo,
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index ae17e1352d7..924d7b98ab6 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -245,8 +245,6 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
245 245
246 if (inet_sk(sk)->transparent || inet_sk(sk)->hdrincl) 246 if (inet_sk(sk)->transparent || inet_sk(sk)->hdrincl)
247 flags |= FLOWI_FLAG_ANYSRC; 247 flags |= FLOWI_FLAG_ANYSRC;
248 if (sk->sk_protocol == IPPROTO_TCP)
249 flags |= FLOWI_FLAG_PRECOW_METRICS;
250 return flags; 248 return flags;
251} 249}
252 250
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 2040bff945d..53f464d7cdd 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -36,25 +36,19 @@ struct inet_peer {
36 u32 metrics[RTAX_MAX]; 36 u32 metrics[RTAX_MAX];
37 u32 rate_tokens; /* rate limiting for ICMP */ 37 u32 rate_tokens; /* rate limiting for ICMP */
38 unsigned long rate_last; 38 unsigned long rate_last;
39 unsigned long pmtu_expires;
40 u32 pmtu_orig;
41 u32 pmtu_learned;
42 struct inetpeer_addr_base redirect_learned;
43 union { 39 union {
44 struct list_head gc_list; 40 struct list_head gc_list;
45 struct rcu_head gc_rcu; 41 struct rcu_head gc_rcu;
46 }; 42 };
47 /* 43 /*
48 * Once inet_peer is queued for deletion (refcnt == -1), following fields 44 * Once inet_peer is queued for deletion (refcnt == -1), following fields
49 * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp 45 * are not available: rid, ip_id_count
50 * We can share memory with rcu_head to help keep inet_peer small. 46 * We can share memory with rcu_head to help keep inet_peer small.
51 */ 47 */
52 union { 48 union {
53 struct { 49 struct {
54 atomic_t rid; /* Frag reception counter */ 50 atomic_t rid; /* Frag reception counter */
55 atomic_t ip_id_count; /* IP ID for the next packet */ 51 atomic_t ip_id_count; /* IP ID for the next packet */
56 __u32 tcp_ts;
57 __u32 tcp_ts_stamp;
58 }; 52 };
59 struct rcu_head rcu; 53 struct rcu_head rcu;
60 struct inet_peer *gc_next; 54 struct inet_peer *gc_next;
@@ -65,6 +59,69 @@ struct inet_peer {
65 atomic_t refcnt; 59 atomic_t refcnt;
66}; 60};
67 61
62struct inet_peer_base {
63 struct inet_peer __rcu *root;
64 seqlock_t lock;
65 u32 flush_seq;
66 int total;
67};
68
69#define INETPEER_BASE_BIT 0x1UL
70
71static inline struct inet_peer *inetpeer_ptr(unsigned long val)
72{
73 BUG_ON(val & INETPEER_BASE_BIT);
74 return (struct inet_peer *) val;
75}
76
77static inline struct inet_peer_base *inetpeer_base_ptr(unsigned long val)
78{
79 if (!(val & INETPEER_BASE_BIT))
80 return NULL;
81 val &= ~INETPEER_BASE_BIT;
82 return (struct inet_peer_base *) val;
83}
84
85static inline bool inetpeer_ptr_is_peer(unsigned long val)
86{
87 return !(val & INETPEER_BASE_BIT);
88}
89
90static inline void __inetpeer_ptr_set_peer(unsigned long *val, struct inet_peer *peer)
91{
92 /* This implicitly clears INETPEER_BASE_BIT */
93 *val = (unsigned long) peer;
94}
95
96static inline bool inetpeer_ptr_set_peer(unsigned long *ptr, struct inet_peer *peer)
97{
98 unsigned long val = (unsigned long) peer;
99 unsigned long orig = *ptr;
100
101 if (!(orig & INETPEER_BASE_BIT) ||
102 cmpxchg(ptr, orig, val) != orig)
103 return false;
104 return true;
105}
106
107static inline void inetpeer_init_ptr(unsigned long *ptr, struct inet_peer_base *base)
108{
109 *ptr = (unsigned long) base | INETPEER_BASE_BIT;
110}
111
112static inline void inetpeer_transfer_peer(unsigned long *to, unsigned long *from)
113{
114 unsigned long val = *from;
115
116 *to = val;
117 if (inetpeer_ptr_is_peer(val)) {
118 struct inet_peer *peer = inetpeer_ptr(val);
119 atomic_inc(&peer->refcnt);
120 }
121}
122
123extern void inet_peer_base_init(struct inet_peer_base *);
124
68void inet_initpeers(void) __init; 125void inet_initpeers(void) __init;
69 126
70#define INETPEER_METRICS_NEW (~(u32) 0) 127#define INETPEER_METRICS_NEW (~(u32) 0)
@@ -75,31 +132,38 @@ static inline bool inet_metrics_new(const struct inet_peer *p)
75} 132}
76 133
77/* can be called with or without local BH being disabled */ 134/* can be called with or without local BH being disabled */
78struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create); 135struct inet_peer *inet_getpeer(struct inet_peer_base *base,
136 const struct inetpeer_addr *daddr,
137 int create);
79 138
80static inline struct inet_peer *inet_getpeer_v4(__be32 v4daddr, int create) 139static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
140 __be32 v4daddr,
141 int create)
81{ 142{
82 struct inetpeer_addr daddr; 143 struct inetpeer_addr daddr;
83 144
84 daddr.addr.a4 = v4daddr; 145 daddr.addr.a4 = v4daddr;
85 daddr.family = AF_INET; 146 daddr.family = AF_INET;
86 return inet_getpeer(&daddr, create); 147 return inet_getpeer(base, &daddr, create);
87} 148}
88 149
89static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr, int create) 150static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
151 const struct in6_addr *v6daddr,
152 int create)
90{ 153{
91 struct inetpeer_addr daddr; 154 struct inetpeer_addr daddr;
92 155
93 *(struct in6_addr *)daddr.addr.a6 = *v6daddr; 156 *(struct in6_addr *)daddr.addr.a6 = *v6daddr;
94 daddr.family = AF_INET6; 157 daddr.family = AF_INET6;
95 return inet_getpeer(&daddr, create); 158 return inet_getpeer(base, &daddr, create);
96} 159}
97 160
98/* can be called from BH context or outside */ 161/* can be called from BH context or outside */
99extern void inet_putpeer(struct inet_peer *p); 162extern void inet_putpeer(struct inet_peer *p);
100extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout); 163extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
101 164
102extern void inetpeer_invalidate_tree(int family); 165extern void inetpeer_invalidate_tree(struct inet_peer_base *);
166extern void inetpeer_invalidate_family(int family);
103 167
104/* 168/*
105 * temporary check to make sure we dont access rid, ip_id_count, tcp_ts, 169 * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
diff --git a/include/net/ip.h b/include/net/ip.h
index 83e0619f59d..ec5cfde85e9 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -158,8 +158,9 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
158 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; 158 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
159} 159}
160 160
161void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr, 161void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
162 const struct ip_reply_arg *arg, unsigned int len); 162 __be32 saddr, const struct ip_reply_arg *arg,
163 unsigned int len);
163 164
164struct ipv4_config { 165struct ipv4_config {
165 int log_martians; 166 int log_martians;
@@ -210,6 +211,9 @@ extern int inet_peer_threshold;
210extern int inet_peer_minttl; 211extern int inet_peer_minttl;
211extern int inet_peer_maxttl; 212extern int inet_peer_maxttl;
212 213
214/* From ip_input.c */
215extern int sysctl_ip_early_demux;
216
213/* From ip_output.c */ 217/* From ip_output.c */
214extern int sysctl_ip_dynaddr; 218extern int sysctl_ip_dynaddr;
215 219
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 0ae759a6c76..0fedbd8d747 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -86,6 +86,8 @@ struct fib6_table;
86struct rt6_info { 86struct rt6_info {
87 struct dst_entry dst; 87 struct dst_entry dst;
88 88
89 struct neighbour *n;
90
89 /* 91 /*
90 * Tail elements of dst_entry (__refcnt etc.) 92 * Tail elements of dst_entry (__refcnt etc.)
91 * and these elements (rarely used in hot path) are in 93 * and these elements (rarely used in hot path) are in
@@ -107,7 +109,7 @@ struct rt6_info {
107 u32 rt6i_peer_genid; 109 u32 rt6i_peer_genid;
108 110
109 struct inet6_dev *rt6i_idev; 111 struct inet6_dev *rt6i_idev;
110 struct inet_peer *rt6i_peer; 112 unsigned long _rt6i_peer;
111 113
112#ifdef CONFIG_XFRM 114#ifdef CONFIG_XFRM
113 u32 rt6i_flow_cache_genid; 115 u32 rt6i_flow_cache_genid;
@@ -118,6 +120,36 @@ struct rt6_info {
118 u8 rt6i_protocol; 120 u8 rt6i_protocol;
119}; 121};
120 122
123static inline struct inet_peer *rt6_peer_ptr(struct rt6_info *rt)
124{
125 return inetpeer_ptr(rt->_rt6i_peer);
126}
127
128static inline bool rt6_has_peer(struct rt6_info *rt)
129{
130 return inetpeer_ptr_is_peer(rt->_rt6i_peer);
131}
132
133static inline void __rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer)
134{
135 __inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer);
136}
137
138static inline bool rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer)
139{
140 return inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer);
141}
142
143static inline void rt6_init_peer(struct rt6_info *rt, struct inet_peer_base *base)
144{
145 inetpeer_init_ptr(&rt->_rt6i_peer, base);
146}
147
148static inline void rt6_transfer_peer(struct rt6_info *rt, struct rt6_info *ort)
149{
150 inetpeer_transfer_peer(&rt->_rt6i_peer, &ort->_rt6i_peer);
151}
152
121static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst) 153static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
122{ 154{
123 return ((struct rt6_info *)dst)->rt6i_idev; 155 return ((struct rt6_info *)dst)->rt6i_idev;
@@ -207,6 +239,7 @@ struct fib6_table {
207 u32 tb6_id; 239 u32 tb6_id;
208 rwlock_t tb6_lock; 240 rwlock_t tb6_lock;
209 struct fib6_node tb6_root; 241 struct fib6_node tb6_root;
242 struct inet_peer_base tb6_peers;
210}; 243};
211 244
212#define RT6_TABLE_UNSPEC RT_TABLE_UNSPEC 245#define RT6_TABLE_UNSPEC RT_TABLE_UNSPEC
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 37c1a1ed82c..58cb3fc3487 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -53,16 +53,25 @@ static inline unsigned int rt6_flags2srcprefs(int flags)
53 return (flags >> 3) & 7; 53 return (flags >> 3) & 7;
54} 54}
55 55
56extern void rt6_bind_peer(struct rt6_info *rt, 56extern void rt6_bind_peer(struct rt6_info *rt, int create);
57 int create); 57
58static inline struct inet_peer *__rt6_get_peer(struct rt6_info *rt, int create)
59{
60 if (rt6_has_peer(rt))
61 return rt6_peer_ptr(rt);
62
63 rt6_bind_peer(rt, create);
64 return (rt6_has_peer(rt) ? rt6_peer_ptr(rt) : NULL);
65}
58 66
59static inline struct inet_peer *rt6_get_peer(struct rt6_info *rt) 67static inline struct inet_peer *rt6_get_peer(struct rt6_info *rt)
60{ 68{
61 if (rt->rt6i_peer) 69 return __rt6_get_peer(rt, 0);
62 return rt->rt6i_peer; 70}
63 71
64 rt6_bind_peer(rt, 0); 72static inline struct inet_peer *rt6_get_peer_create(struct rt6_info *rt)
65 return rt->rt6i_peer; 73{
74 return __rt6_get_peer(rt, 1);
66} 75}
67 76
68extern void ip6_route_input(struct sk_buff *skb); 77extern void ip6_route_input(struct sk_buff *skb);
@@ -131,10 +140,10 @@ extern void rt6_redirect(const struct in6_addr *dest,
131 u8 *lladdr, 140 u8 *lladdr,
132 int on_link); 141 int on_link);
133 142
134extern void rt6_pmtu_discovery(const struct in6_addr *daddr, 143extern void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
135 const struct in6_addr *saddr, 144 int oif, u32 mark);
136 struct net_device *dev, 145extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk,
137 u32 pmtu); 146 __be32 mtu);
138 147
139struct netlink_callback; 148struct netlink_callback;
140 149
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index fc73e667b50..358fb86f57e 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -9,6 +9,8 @@
9#define IP6_TNL_F_CAP_XMIT 0x10000 9#define IP6_TNL_F_CAP_XMIT 0x10000
10/* capable of receiving packets */ 10/* capable of receiving packets */
11#define IP6_TNL_F_CAP_RCV 0x20000 11#define IP6_TNL_F_CAP_RCV 0x20000
12/* determine capability on a per-packet basis */
13#define IP6_TNL_F_CAP_PER_PACKET 0x40000
12 14
13/* IPv6 tunnel */ 15/* IPv6 tunnel */
14 16
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 78df0866cc3..000c4674e18 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -19,6 +19,7 @@
19#include <net/flow.h> 19#include <net/flow.h>
20#include <linux/seq_file.h> 20#include <linux/seq_file.h>
21#include <net/fib_rules.h> 21#include <net/fib_rules.h>
22#include <net/inetpeer.h>
22 23
23struct fib_config { 24struct fib_config {
24 u8 fc_dst_len; 25 u8 fc_dst_len;
@@ -157,11 +158,12 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
157 FIB_RES_SADDR(net, res)) 158 FIB_RES_SADDR(net, res))
158 159
159struct fib_table { 160struct fib_table {
160 struct hlist_node tb_hlist; 161 struct hlist_node tb_hlist;
161 u32 tb_id; 162 u32 tb_id;
162 int tb_default; 163 int tb_default;
163 int tb_num_default; 164 int tb_num_default;
164 unsigned long tb_data[0]; 165 struct inet_peer_base tb_peers;
166 unsigned long tb_data[0];
165}; 167};
166 168
167extern int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, 169extern int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
@@ -218,20 +220,55 @@ extern void __net_exit fib4_rules_exit(struct net *net);
218extern u32 fib_rules_tclass(const struct fib_result *res); 220extern u32 fib_rules_tclass(const struct fib_result *res);
219#endif 221#endif
220 222
221extern int fib_lookup(struct net *n, struct flowi4 *flp, struct fib_result *res);
222
223extern struct fib_table *fib_new_table(struct net *net, u32 id); 223extern struct fib_table *fib_new_table(struct net *net, u32 id);
224extern struct fib_table *fib_get_table(struct net *net, u32 id); 224extern struct fib_table *fib_get_table(struct net *net, u32 id);
225 225
226extern int __fib_lookup(struct net *net, struct flowi4 *flp,
227 struct fib_result *res);
228
229static inline int fib_lookup(struct net *net, struct flowi4 *flp,
230 struct fib_result *res)
231{
232 if (!net->ipv4.fib_has_custom_rules) {
233 res->r = NULL;
234 if (net->ipv4.fib_local &&
235 !fib_table_lookup(net->ipv4.fib_local, flp, res,
236 FIB_LOOKUP_NOREF))
237 return 0;
238 if (net->ipv4.fib_main &&
239 !fib_table_lookup(net->ipv4.fib_main, flp, res,
240 FIB_LOOKUP_NOREF))
241 return 0;
242 if (net->ipv4.fib_default &&
243 !fib_table_lookup(net->ipv4.fib_default, flp, res,
244 FIB_LOOKUP_NOREF))
245 return 0;
246 return -ENETUNREACH;
247 }
248 return __fib_lookup(net, flp, res);
249}
250
226#endif /* CONFIG_IP_MULTIPLE_TABLES */ 251#endif /* CONFIG_IP_MULTIPLE_TABLES */
227 252
228/* Exported by fib_frontend.c */ 253/* Exported by fib_frontend.c */
229extern const struct nla_policy rtm_ipv4_policy[]; 254extern const struct nla_policy rtm_ipv4_policy[];
230extern void ip_fib_init(void); 255extern void ip_fib_init(void);
256extern __be32 fib_compute_spec_dst(struct sk_buff *skb);
231extern int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, 257extern int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
232 u8 tos, int oif, struct net_device *dev, 258 u8 tos, int oif, struct net_device *dev,
233 __be32 *spec_dst, u32 *itag); 259 struct in_device *idev, u32 *itag);
234extern void fib_select_default(struct fib_result *res); 260extern void fib_select_default(struct fib_result *res);
261#ifdef CONFIG_IP_ROUTE_CLASSID
262static inline int fib_num_tclassid_users(struct net *net)
263{
264 return net->ipv4.fib_num_tclassid_users;
265}
266#else
267static inline int fib_num_tclassid_users(struct net *net)
268{
269 return 0;
270}
271#endif
235 272
236/* Exported by fib_semantics.c */ 273/* Exported by fib_semantics.c */
237extern int ip_fib_check_default(__be32 gw, struct net_device *dev); 274extern int ip_fib_check_default(__be32 gw, struct net_device *dev);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index aecf88436ab..d4261d4d6c4 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -298,14 +298,23 @@ static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr
298 return memcmp(a1, a2, sizeof(struct in6_addr)); 298 return memcmp(a1, a2, sizeof(struct in6_addr));
299} 299}
300 300
301static inline int 301static inline bool
302ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m, 302ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m,
303 const struct in6_addr *a2) 303 const struct in6_addr *a2)
304{ 304{
305#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
306 const unsigned long *ul1 = (const unsigned long *)a1;
307 const unsigned long *ulm = (const unsigned long *)m;
308 const unsigned long *ul2 = (const unsigned long *)a2;
309
310 return !!(((ul1[0] ^ ul2[0]) & ulm[0]) |
311 ((ul1[1] ^ ul2[1]) & ulm[1]));
312#else
305 return !!(((a1->s6_addr32[0] ^ a2->s6_addr32[0]) & m->s6_addr32[0]) | 313 return !!(((a1->s6_addr32[0] ^ a2->s6_addr32[0]) & m->s6_addr32[0]) |
306 ((a1->s6_addr32[1] ^ a2->s6_addr32[1]) & m->s6_addr32[1]) | 314 ((a1->s6_addr32[1] ^ a2->s6_addr32[1]) & m->s6_addr32[1]) |
307 ((a1->s6_addr32[2] ^ a2->s6_addr32[2]) & m->s6_addr32[2]) | 315 ((a1->s6_addr32[2] ^ a2->s6_addr32[2]) & m->s6_addr32[2]) |
308 ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3])); 316 ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3]));
317#endif
309} 318}
310 319
311static inline void ipv6_addr_prefix(struct in6_addr *pfx, 320static inline void ipv6_addr_prefix(struct in6_addr *pfx,
@@ -335,10 +344,17 @@ static inline void ipv6_addr_set(struct in6_addr *addr,
335static inline bool ipv6_addr_equal(const struct in6_addr *a1, 344static inline bool ipv6_addr_equal(const struct in6_addr *a1,
336 const struct in6_addr *a2) 345 const struct in6_addr *a2)
337{ 346{
347#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
348 const unsigned long *ul1 = (const unsigned long *)a1;
349 const unsigned long *ul2 = (const unsigned long *)a2;
350
351 return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL;
352#else
338 return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) | 353 return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) |
339 (a1->s6_addr32[1] ^ a2->s6_addr32[1]) | 354 (a1->s6_addr32[1] ^ a2->s6_addr32[1]) |
340 (a1->s6_addr32[2] ^ a2->s6_addr32[2]) | 355 (a1->s6_addr32[2] ^ a2->s6_addr32[2]) |
341 (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0; 356 (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0;
357#endif
342} 358}
343 359
344static inline bool __ipv6_prefix_equal(const __be32 *a1, const __be32 *a2, 360static inline bool __ipv6_prefix_equal(const __be32 *a1, const __be32 *a2,
@@ -391,8 +407,14 @@ bool ip6_frag_match(struct inet_frag_queue *q, void *a);
391 407
392static inline bool ipv6_addr_any(const struct in6_addr *a) 408static inline bool ipv6_addr_any(const struct in6_addr *a)
393{ 409{
410#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
411 const unsigned long *ul = (const unsigned long *)a;
412
413 return (ul[0] | ul[1]) == 0UL;
414#else
394 return (a->s6_addr32[0] | a->s6_addr32[1] | 415 return (a->s6_addr32[0] | a->s6_addr32[1] |
395 a->s6_addr32[2] | a->s6_addr32[3]) == 0; 416 a->s6_addr32[2] | a->s6_addr32[3]) == 0;
417#endif
396} 418}
397 419
398static inline bool ipv6_addr_loopback(const struct in6_addr *a) 420static inline bool ipv6_addr_loopback(const struct in6_addr *a)
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 95e39b6a02e..670a58ba8a4 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1297,6 +1297,10 @@ enum ieee80211_hw_flags {
1297 * reports, by default it is set to _MCS, _GI and _BW but doesn't 1297 * reports, by default it is set to _MCS, _GI and _BW but doesn't
1298 * include _FMT. Use %IEEE80211_RADIOTAP_MCS_HAVE_* values, only 1298 * include _FMT. Use %IEEE80211_RADIOTAP_MCS_HAVE_* values, only
1299 * adding _BW is supported today. 1299 * adding _BW is supported today.
1300 *
1301 * @netdev_features: netdev features to be set in each netdev created
1302 * from this HW. Note only HW checksum features are currently
1303 * compatible with mac80211. Other feature bits will be rejected.
1300 */ 1304 */
1301struct ieee80211_hw { 1305struct ieee80211_hw {
1302 struct ieee80211_conf conf; 1306 struct ieee80211_conf conf;
@@ -1319,6 +1323,7 @@ struct ieee80211_hw {
1319 u8 max_tx_aggregation_subframes; 1323 u8 max_tx_aggregation_subframes;
1320 u8 offchannel_tx_hw_queue; 1324 u8 offchannel_tx_hw_queue;
1321 u8 radiotap_mcs_details; 1325 u8 radiotap_mcs_details;
1326 netdev_features_t netdev_features;
1322}; 1327};
1323 1328
1324/** 1329/**
@@ -2183,7 +2188,10 @@ enum ieee80211_rate_control_changed {
2183 * offload. Frames to transmit on the off-channel channel are transmitted 2188 * offload. Frames to transmit on the off-channel channel are transmitted
2184 * normally except for the %IEEE80211_TX_CTL_TX_OFFCHAN flag. When the 2189 * normally except for the %IEEE80211_TX_CTL_TX_OFFCHAN flag. When the
2185 * duration (which will always be non-zero) expires, the driver must call 2190 * duration (which will always be non-zero) expires, the driver must call
2186 * ieee80211_remain_on_channel_expired(). This callback may sleep. 2191 * ieee80211_remain_on_channel_expired().
2192 * Note that this callback may be called while the device is in IDLE and
2193 * must be accepted in this case.
2194 * This callback may sleep.
2187 * @cancel_remain_on_channel: Requests that an ongoing off-channel period is 2195 * @cancel_remain_on_channel: Requests that an ongoing off-channel period is
2188 * aborted before it expires. This callback may sleep. 2196 * aborted before it expires. This callback may sleep.
2189 * 2197 *
@@ -2246,6 +2254,9 @@ enum ieee80211_rate_control_changed {
2246 * @get_et_strings: Ethtool API to get a set of strings to describe stats 2254 * @get_et_strings: Ethtool API to get a set of strings to describe stats
2247 * and perhaps other supported types of ethtool data-sets. 2255 * and perhaps other supported types of ethtool data-sets.
2248 * 2256 *
2257 * @get_rssi: Get current signal strength in dBm, the function is optional
2258 * and can sleep.
2259 *
2249 */ 2260 */
2250struct ieee80211_ops { 2261struct ieee80211_ops {
2251 void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb); 2262 void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
@@ -2385,6 +2396,8 @@ struct ieee80211_ops {
2385 void (*get_et_strings)(struct ieee80211_hw *hw, 2396 void (*get_et_strings)(struct ieee80211_hw *hw,
2386 struct ieee80211_vif *vif, 2397 struct ieee80211_vif *vif,
2387 u32 sset, u8 *data); 2398 u32 sset, u8 *data);
2399 int (*get_rssi)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2400 struct ieee80211_sta *sta, s8 *rssi_dbm);
2388}; 2401};
2389 2402
2390/** 2403/**
@@ -3557,16 +3570,6 @@ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
3557 gfp_t gfp); 3570 gfp_t gfp);
3558 3571
3559/** 3572/**
3560 * ieee80211_get_operstate - get the operstate of the vif
3561 *
3562 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
3563 *
3564 * The driver might need to know the operstate of the net_device
3565 * (specifically, whether the link is IF_OPER_UP after resume)
3566 */
3567unsigned char ieee80211_get_operstate(struct ieee80211_vif *vif);
3568
3569/**
3570 * ieee80211_chswitch_done - Complete channel switch process 3573 * ieee80211_chswitch_done - Complete channel switch process
3571 * @vif: &struct ieee80211_vif pointer from the add_interface callback. 3574 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
3572 * @success: make the channel switch successful or not 3575 * @success: make the channel switch successful or not
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index c9f8ab5cc68..d0d11df9cba 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -21,6 +21,14 @@
21 21
22#include <net/af_ieee802154.h> 22#include <net/af_ieee802154.h>
23 23
24/* General MAC frame format:
25 * 2 bytes: Frame Control
26 * 1 byte: Sequence Number
27 * 20 bytes: Addressing fields
28 * 14 bytes: Auxiliary Security Header
29 */
30#define MAC802154_FRAME_HARD_HEADER_LEN (2 + 1 + 20 + 14)
31
24/* The following flags are used to indicate changed address settings from 32/* The following flags are used to indicate changed address settings from
25 * the stack to the hardware. 33 * the stack to the hardware.
26 */ 34 */
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 6cdfeedb650..344d8988842 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -202,9 +202,16 @@ extern struct neighbour * neigh_lookup(struct neigh_table *tbl,
202extern struct neighbour * neigh_lookup_nodev(struct neigh_table *tbl, 202extern struct neighbour * neigh_lookup_nodev(struct neigh_table *tbl,
203 struct net *net, 203 struct net *net,
204 const void *pkey); 204 const void *pkey);
205extern struct neighbour * neigh_create(struct neigh_table *tbl, 205extern struct neighbour * __neigh_create(struct neigh_table *tbl,
206 const void *pkey,
207 struct net_device *dev,
208 bool want_ref);
209static inline struct neighbour *neigh_create(struct neigh_table *tbl,
206 const void *pkey, 210 const void *pkey,
207 struct net_device *dev); 211 struct net_device *dev)
212{
213 return __neigh_create(tbl, pkey, dev, true);
214}
208extern void neigh_destroy(struct neighbour *neigh); 215extern void neigh_destroy(struct neighbour *neigh);
209extern int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb); 216extern int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
210extern int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, 217extern int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
@@ -302,12 +309,6 @@ static inline struct neighbour * neigh_clone(struct neighbour *neigh)
302 309
303#define neigh_hold(n) atomic_inc(&(n)->refcnt) 310#define neigh_hold(n) atomic_inc(&(n)->refcnt)
304 311
305static inline void neigh_confirm(struct neighbour *neigh)
306{
307 if (neigh)
308 neigh->confirmed = jiffies;
309}
310
311static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) 312static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
312{ 313{
313 unsigned long now = jiffies; 314 unsigned long now = jiffies;
@@ -351,15 +352,6 @@ static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
351 return dev_queue_xmit(skb); 352 return dev_queue_xmit(skb);
352} 353}
353 354
354static inline int neigh_output(struct neighbour *n, struct sk_buff *skb)
355{
356 struct hh_cache *hh = &n->hh;
357 if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
358 return neigh_hh_output(hh, skb);
359 else
360 return n->output(n, skb);
361}
362
363static inline struct neighbour * 355static inline struct neighbour *
364__neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat) 356__neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat)
365{ 357{
diff --git a/include/net/netevent.h b/include/net/netevent.h
index 086f8a5b59d..3ce4988c9c0 100644
--- a/include/net/netevent.h
+++ b/include/net/netevent.h
@@ -12,10 +12,14 @@
12 */ 12 */
13 13
14struct dst_entry; 14struct dst_entry;
15struct neighbour;
15 16
16struct netevent_redirect { 17struct netevent_redirect {
17 struct dst_entry *old; 18 struct dst_entry *old;
19 struct neighbour *old_neigh;
18 struct dst_entry *new; 20 struct dst_entry *new;
21 struct neighbour *new_neigh;
22 const void *daddr;
19}; 23};
20 24
21enum netevent_notif_type { 25enum netevent_notif_type {
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index cce7f6a798b..f1494feba79 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -39,36 +39,6 @@ union nf_conntrack_expect_proto {
39 /* insert expect proto private data here */ 39 /* insert expect proto private data here */
40}; 40};
41 41
42/* Add protocol helper include file here */
43#include <linux/netfilter/nf_conntrack_ftp.h>
44#include <linux/netfilter/nf_conntrack_pptp.h>
45#include <linux/netfilter/nf_conntrack_h323.h>
46#include <linux/netfilter/nf_conntrack_sane.h>
47#include <linux/netfilter/nf_conntrack_sip.h>
48
49/* per conntrack: application helper private data */
50union nf_conntrack_help {
51 /* insert conntrack helper private data (master) here */
52#if defined(CONFIG_NF_CONNTRACK_FTP) || defined(CONFIG_NF_CONNTRACK_FTP_MODULE)
53 struct nf_ct_ftp_master ct_ftp_info;
54#endif
55#if defined(CONFIG_NF_CONNTRACK_PPTP) || \
56 defined(CONFIG_NF_CONNTRACK_PPTP_MODULE)
57 struct nf_ct_pptp_master ct_pptp_info;
58#endif
59#if defined(CONFIG_NF_CONNTRACK_H323) || \
60 defined(CONFIG_NF_CONNTRACK_H323_MODULE)
61 struct nf_ct_h323_master ct_h323_info;
62#endif
63#if defined(CONFIG_NF_CONNTRACK_SANE) || \
64 defined(CONFIG_NF_CONNTRACK_SANE_MODULE)
65 struct nf_ct_sane_master ct_sane_info;
66#endif
67#if defined(CONFIG_NF_CONNTRACK_SIP) || defined(CONFIG_NF_CONNTRACK_SIP_MODULE)
68 struct nf_ct_sip_master ct_sip_info;
69#endif
70};
71
72#include <linux/types.h> 42#include <linux/types.h>
73#include <linux/skbuff.h> 43#include <linux/skbuff.h>
74#include <linux/timer.h> 44#include <linux/timer.h>
@@ -89,12 +59,13 @@ struct nf_conn_help {
89 /* Helper. if any */ 59 /* Helper. if any */
90 struct nf_conntrack_helper __rcu *helper; 60 struct nf_conntrack_helper __rcu *helper;
91 61
92 union nf_conntrack_help help;
93
94 struct hlist_head expectations; 62 struct hlist_head expectations;
95 63
96 /* Current number of expected connections */ 64 /* Current number of expected connections */
97 u8 expecting[NF_CT_MAX_EXPECT_CLASSES]; 65 u8 expecting[NF_CT_MAX_EXPECT_CLASSES];
66
67 /* private helper information. */
68 char data[];
98}; 69};
99 70
100#include <net/netfilter/ipv4/nf_conntrack_ipv4.h> 71#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index aced085132e..d8f5b9f5216 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -28,8 +28,8 @@ extern unsigned int nf_conntrack_in(struct net *net,
28extern int nf_conntrack_init(struct net *net); 28extern int nf_conntrack_init(struct net *net);
29extern void nf_conntrack_cleanup(struct net *net); 29extern void nf_conntrack_cleanup(struct net *net);
30 30
31extern int nf_conntrack_proto_init(void); 31extern int nf_conntrack_proto_init(struct net *net);
32extern void nf_conntrack_proto_fini(void); 32extern void nf_conntrack_proto_fini(struct net *net);
33 33
34extern bool 34extern bool
35nf_ct_get_tuple(const struct sk_buff *skb, 35nf_ct_get_tuple(const struct sk_buff *skb,
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 4619caadd9d..983f0026324 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -59,10 +59,12 @@ static inline struct net *nf_ct_exp_net(struct nf_conntrack_expect *exp)
59 return nf_ct_net(exp->master); 59 return nf_ct_net(exp->master);
60} 60}
61 61
62#define NF_CT_EXP_POLICY_NAME_LEN 16
63
62struct nf_conntrack_expect_policy { 64struct nf_conntrack_expect_policy {
63 unsigned int max_expected; 65 unsigned int max_expected;
64 unsigned int timeout; 66 unsigned int timeout;
65 const char *name; 67 char name[NF_CT_EXP_POLICY_NAME_LEN];
66}; 68};
67 69
68#define NF_CT_EXPECT_CLASS_DEFAULT 0 70#define NF_CT_EXPECT_CLASS_DEFAULT 0
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 96755c3798a..8b4d1fc2909 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -80,10 +80,13 @@ static inline void nf_ct_ext_free(struct nf_conn *ct)
80} 80}
81 81
82/* Add this type, returns pointer to data or NULL. */ 82/* Add this type, returns pointer to data or NULL. */
83void * 83void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
84__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp); 84 size_t var_alloc_len, gfp_t gfp);
85
85#define nf_ct_ext_add(ct, id, gfp) \ 86#define nf_ct_ext_add(ct, id, gfp) \
86 ((id##_TYPE *)__nf_ct_ext_add((ct), (id), (gfp))) 87 ((id##_TYPE *)__nf_ct_ext_add_length((ct), (id), 0, (gfp)))
88#define nf_ct_ext_add_length(ct, id, len, gfp) \
89 ((id##_TYPE *)__nf_ct_ext_add_length((ct), (id), (len), (gfp)))
87 90
88#define NF_CT_EXT_F_PREALLOC 0x0001 91#define NF_CT_EXT_F_PREALLOC 0x0001
89 92
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 1d1889409b9..9aad956d100 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -11,18 +11,27 @@
11#define _NF_CONNTRACK_HELPER_H 11#define _NF_CONNTRACK_HELPER_H
12#include <net/netfilter/nf_conntrack.h> 12#include <net/netfilter/nf_conntrack.h>
13#include <net/netfilter/nf_conntrack_extend.h> 13#include <net/netfilter/nf_conntrack_extend.h>
14#include <net/netfilter/nf_conntrack_expect.h>
14 15
15struct module; 16struct module;
16 17
18enum nf_ct_helper_flags {
19 NF_CT_HELPER_F_USERSPACE = (1 << 0),
20 NF_CT_HELPER_F_CONFIGURED = (1 << 1),
21};
22
17#define NF_CT_HELPER_NAME_LEN 16 23#define NF_CT_HELPER_NAME_LEN 16
18 24
19struct nf_conntrack_helper { 25struct nf_conntrack_helper {
20 struct hlist_node hnode; /* Internal use. */ 26 struct hlist_node hnode; /* Internal use. */
21 27
22 const char *name; /* name of the module */ 28 char name[NF_CT_HELPER_NAME_LEN]; /* name of the module */
23 struct module *me; /* pointer to self */ 29 struct module *me; /* pointer to self */
24 const struct nf_conntrack_expect_policy *expect_policy; 30 const struct nf_conntrack_expect_policy *expect_policy;
25 31
32 /* length of internal data, ie. sizeof(struct nf_ct_*_master) */
33 size_t data_len;
34
26 /* Tuple of things we will help (compared against server response) */ 35 /* Tuple of things we will help (compared against server response) */
27 struct nf_conntrack_tuple tuple; 36 struct nf_conntrack_tuple tuple;
28 37
@@ -35,8 +44,12 @@ struct nf_conntrack_helper {
35 44
36 void (*destroy)(struct nf_conn *ct); 45 void (*destroy)(struct nf_conn *ct);
37 46
47 int (*from_nlattr)(struct nlattr *attr, struct nf_conn *ct);
38 int (*to_nlattr)(struct sk_buff *skb, const struct nf_conn *ct); 48 int (*to_nlattr)(struct sk_buff *skb, const struct nf_conn *ct);
39 unsigned int expect_class_max; 49 unsigned int expect_class_max;
50
51 unsigned int flags;
52 unsigned int queue_num; /* For user-space helpers. */
40}; 53};
41 54
42extern struct nf_conntrack_helper * 55extern struct nf_conntrack_helper *
@@ -48,7 +61,7 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum);
48extern int nf_conntrack_helper_register(struct nf_conntrack_helper *); 61extern int nf_conntrack_helper_register(struct nf_conntrack_helper *);
49extern void nf_conntrack_helper_unregister(struct nf_conntrack_helper *); 62extern void nf_conntrack_helper_unregister(struct nf_conntrack_helper *);
50 63
51extern struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp); 64extern struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, struct nf_conntrack_helper *helper, gfp_t gfp);
52 65
53extern int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, 66extern int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
54 gfp_t flags); 67 gfp_t flags);
@@ -60,6 +73,15 @@ static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct)
60 return nf_ct_ext_find(ct, NF_CT_EXT_HELPER); 73 return nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
61} 74}
62 75
76static inline void *nfct_help_data(const struct nf_conn *ct)
77{
78 struct nf_conn_help *help;
79
80 help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
81
82 return (void *)help->data;
83}
84
63extern int nf_conntrack_helper_init(struct net *net); 85extern int nf_conntrack_helper_init(struct net *net);
64extern void nf_conntrack_helper_fini(struct net *net); 86extern void nf_conntrack_helper_fini(struct net *net);
65 87
@@ -82,4 +104,7 @@ nf_ct_helper_expectfn_find_by_name(const char *name);
82struct nf_ct_helper_expectfn * 104struct nf_ct_helper_expectfn *
83nf_ct_helper_expectfn_find_by_symbol(const void *symbol); 105nf_ct_helper_expectfn_find_by_symbol(const void *symbol);
84 106
107extern struct hlist_head *nf_ct_helper_hash;
108extern unsigned int nf_ct_helper_hsize;
109
85#endif /*_NF_CONNTRACK_HELPER_H*/ 110#endif /*_NF_CONNTRACK_HELPER_H*/
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index 9699c028b74..6f7c13f4ac0 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -64,11 +64,12 @@ struct nf_conntrack_l3proto {
64 size_t nla_size; 64 size_t nla_size;
65 65
66#ifdef CONFIG_SYSCTL 66#ifdef CONFIG_SYSCTL
67 struct ctl_table_header *ctl_table_header;
68 const char *ctl_table_path; 67 const char *ctl_table_path;
69 struct ctl_table *ctl_table;
70#endif /* CONFIG_SYSCTL */ 68#endif /* CONFIG_SYSCTL */
71 69
70 /* Init l3proto pernet data */
71 int (*init_net)(struct net *net);
72
72 /* Module (if any) which this is connected to. */ 73 /* Module (if any) which this is connected to. */
73 struct module *me; 74 struct module *me;
74}; 75};
@@ -76,8 +77,10 @@ struct nf_conntrack_l3proto {
76extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX]; 77extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX];
77 78
78/* Protocol registration. */ 79/* Protocol registration. */
79extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto); 80extern int nf_conntrack_l3proto_register(struct net *net,
80extern void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto); 81 struct nf_conntrack_l3proto *proto);
82extern void nf_conntrack_l3proto_unregister(struct net *net,
83 struct nf_conntrack_l3proto *proto);
81extern struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto); 84extern struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto);
82extern void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p); 85extern void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p);
83 86
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 3b572bb20aa..c3be4aef6bf 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -12,6 +12,7 @@
12#include <linux/netlink.h> 12#include <linux/netlink.h>
13#include <net/netlink.h> 13#include <net/netlink.h>
14#include <net/netfilter/nf_conntrack.h> 14#include <net/netfilter/nf_conntrack.h>
15#include <net/netns/generic.h>
15 16
16struct seq_file; 17struct seq_file;
17 18
@@ -86,23 +87,21 @@ struct nf_conntrack_l4proto {
86#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 87#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
87 struct { 88 struct {
88 size_t obj_size; 89 size_t obj_size;
89 int (*nlattr_to_obj)(struct nlattr *tb[], void *data); 90 int (*nlattr_to_obj)(struct nlattr *tb[],
91 struct net *net, void *data);
90 int (*obj_to_nlattr)(struct sk_buff *skb, const void *data); 92 int (*obj_to_nlattr)(struct sk_buff *skb, const void *data);
91 93
92 unsigned int nlattr_max; 94 unsigned int nlattr_max;
93 const struct nla_policy *nla_policy; 95 const struct nla_policy *nla_policy;
94 } ctnl_timeout; 96 } ctnl_timeout;
95#endif 97#endif
98 int *net_id;
99 /* Init l4proto pernet data */
100 int (*init_net)(struct net *net, u_int16_t proto);
101
102 /* Return the per-net protocol part. */
103 struct nf_proto_net *(*get_net_proto)(struct net *net);
96 104
97#ifdef CONFIG_SYSCTL
98 struct ctl_table_header **ctl_table_header;
99 struct ctl_table *ctl_table;
100 unsigned int *ctl_table_users;
101#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
102 struct ctl_table_header *ctl_compat_table_header;
103 struct ctl_table *ctl_compat_table;
104#endif
105#endif
106 /* Protocol name */ 105 /* Protocol name */
107 const char *name; 106 const char *name;
108 107
@@ -123,8 +122,18 @@ nf_ct_l4proto_find_get(u_int16_t l3proto, u_int8_t l4proto);
123extern void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p); 122extern void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p);
124 123
125/* Protocol registration. */ 124/* Protocol registration. */
126extern int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *proto); 125extern int nf_conntrack_l4proto_register(struct net *net,
127extern void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *proto); 126 struct nf_conntrack_l4proto *proto);
127extern void nf_conntrack_l4proto_unregister(struct net *net,
128 struct nf_conntrack_l4proto *proto);
129
130static inline void nf_ct_kfree_compat_sysctl_table(struct nf_proto_net *pn)
131{
132#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
133 kfree(pn->ctl_compat_table);
134 pn->ctl_compat_table = NULL;
135#endif
136}
128 137
129/* Generic netlink helpers */ 138/* Generic netlink helpers */
130extern int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, 139extern int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h
index 02bb6c29dc3..7d8fb7b46c4 100644
--- a/include/net/netfilter/nf_nat_helper.h
+++ b/include/net/netfilter/nf_nat_helper.h
@@ -54,4 +54,8 @@ extern void nf_nat_follow_master(struct nf_conn *ct,
54extern s16 nf_nat_get_offset(const struct nf_conn *ct, 54extern s16 nf_nat_get_offset(const struct nf_conn *ct,
55 enum ip_conntrack_dir dir, 55 enum ip_conntrack_dir dir,
56 u32 seq); 56 u32 seq);
57
58extern void nf_nat_tcp_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
59 u32 dir, int off);
60
57#endif 61#endif
diff --git a/include/net/netfilter/nfnetlink_queue.h b/include/net/netfilter/nfnetlink_queue.h
new file mode 100644
index 00000000000..86267a52951
--- /dev/null
+++ b/include/net/netfilter/nfnetlink_queue.h
@@ -0,0 +1,43 @@
1#ifndef _NET_NFNL_QUEUE_H_
2#define _NET_NFNL_QUEUE_H_
3
4#include <linux/netfilter/nf_conntrack_common.h>
5
6struct nf_conn;
7
8#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
9struct nf_conn *nfqnl_ct_get(struct sk_buff *entskb, size_t *size,
10 enum ip_conntrack_info *ctinfo);
11struct nf_conn *nfqnl_ct_parse(const struct sk_buff *skb,
12 const struct nlattr *attr,
13 enum ip_conntrack_info *ctinfo);
14int nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct,
15 enum ip_conntrack_info ctinfo);
16void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
17 enum ip_conntrack_info ctinfo, int diff);
18#else
19inline struct nf_conn *
20nfqnl_ct_get(struct sk_buff *entskb, size_t *size, enum ip_conntrack_info *ctinfo)
21{
22 return NULL;
23}
24
25inline struct nf_conn *nfqnl_ct_parse(const struct sk_buff *skb,
26 const struct nlattr *attr,
27 enum ip_conntrack_info *ctinfo)
28{
29 return NULL;
30}
31
32inline int
33nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo)
34{
35 return 0;
36}
37
38inline void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
39 enum ip_conntrack_info ctinfo, int diff)
40{
41}
42#endif /* NF_CONNTRACK */
43#endif
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index a053a19870c..3aecdc7a84f 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -4,10 +4,64 @@
4#include <linux/list.h> 4#include <linux/list.h>
5#include <linux/list_nulls.h> 5#include <linux/list_nulls.h>
6#include <linux/atomic.h> 6#include <linux/atomic.h>
7#include <linux/netfilter/nf_conntrack_tcp.h>
7 8
8struct ctl_table_header; 9struct ctl_table_header;
9struct nf_conntrack_ecache; 10struct nf_conntrack_ecache;
10 11
12struct nf_proto_net {
13#ifdef CONFIG_SYSCTL
14 struct ctl_table_header *ctl_table_header;
15 struct ctl_table *ctl_table;
16#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
17 struct ctl_table_header *ctl_compat_header;
18 struct ctl_table *ctl_compat_table;
19#endif
20#endif
21 unsigned int users;
22};
23
24struct nf_generic_net {
25 struct nf_proto_net pn;
26 unsigned int timeout;
27};
28
29struct nf_tcp_net {
30 struct nf_proto_net pn;
31 unsigned int timeouts[TCP_CONNTRACK_TIMEOUT_MAX];
32 unsigned int tcp_loose;
33 unsigned int tcp_be_liberal;
34 unsigned int tcp_max_retrans;
35};
36
37enum udp_conntrack {
38 UDP_CT_UNREPLIED,
39 UDP_CT_REPLIED,
40 UDP_CT_MAX
41};
42
43struct nf_udp_net {
44 struct nf_proto_net pn;
45 unsigned int timeouts[UDP_CT_MAX];
46};
47
48struct nf_icmp_net {
49 struct nf_proto_net pn;
50 unsigned int timeout;
51};
52
53struct nf_ip_net {
54 struct nf_generic_net generic;
55 struct nf_tcp_net tcp;
56 struct nf_udp_net udp;
57 struct nf_icmp_net icmp;
58 struct nf_icmp_net icmpv6;
59#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
60 struct ctl_table_header *ctl_table_header;
61 struct ctl_table *ctl_table;
62#endif
63};
64
11struct netns_ct { 65struct netns_ct {
12 atomic_t count; 66 atomic_t count;
13 unsigned int expect_count; 67 unsigned int expect_count;
@@ -28,6 +82,7 @@ struct netns_ct {
28 unsigned int sysctl_log_invalid; /* Log invalid packets */ 82 unsigned int sysctl_log_invalid; /* Log invalid packets */
29 int sysctl_auto_assign_helper; 83 int sysctl_auto_assign_helper;
30 bool auto_assign_helper_warned; 84 bool auto_assign_helper_warned;
85 struct nf_ip_net nf_ct_proto;
31#ifdef CONFIG_SYSCTL 86#ifdef CONFIG_SYSCTL
32 struct ctl_table_header *sysctl_header; 87 struct ctl_table_header *sysctl_header;
33 struct ctl_table_header *acct_sysctl_header; 88 struct ctl_table_header *acct_sysctl_header;
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index bbd023a1c9b..2e089a99d60 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -7,10 +7,12 @@
7 7
8#include <net/inet_frag.h> 8#include <net/inet_frag.h>
9 9
10struct tcpm_hash_bucket;
10struct ctl_table_header; 11struct ctl_table_header;
11struct ipv4_devconf; 12struct ipv4_devconf;
12struct fib_rules_ops; 13struct fib_rules_ops;
13struct hlist_head; 14struct hlist_head;
15struct fib_table;
14struct sock; 16struct sock;
15 17
16struct netns_ipv4 { 18struct netns_ipv4 {
@@ -24,13 +26,22 @@ struct netns_ipv4 {
24 struct ipv4_devconf *devconf_dflt; 26 struct ipv4_devconf *devconf_dflt;
25#ifdef CONFIG_IP_MULTIPLE_TABLES 27#ifdef CONFIG_IP_MULTIPLE_TABLES
26 struct fib_rules_ops *rules_ops; 28 struct fib_rules_ops *rules_ops;
29 bool fib_has_custom_rules;
30 struct fib_table *fib_local;
31 struct fib_table *fib_main;
32 struct fib_table *fib_default;
33#endif
34#ifdef CONFIG_IP_ROUTE_CLASSID
35 int fib_num_tclassid_users;
27#endif 36#endif
28 struct hlist_head *fib_table_hash; 37 struct hlist_head *fib_table_hash;
29 struct sock *fibnl; 38 struct sock *fibnl;
30 39
31 struct sock **icmp_sk; 40 struct sock **icmp_sk;
32 struct sock *tcp_sock; 41 struct sock *tcp_sock;
33 42 struct inet_peer_base *peers;
43 struct tcpm_hash_bucket *tcp_metrics_hash;
44 unsigned int tcp_metrics_hash_mask;
34 struct netns_frags frags; 45 struct netns_frags frags;
35#ifdef CONFIG_NETFILTER 46#ifdef CONFIG_NETFILTER
36 struct xt_table *iptable_filter; 47 struct xt_table *iptable_filter;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index b42be53587b..df0a5456a3f 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -33,6 +33,7 @@ struct netns_ipv6 {
33 struct netns_sysctl_ipv6 sysctl; 33 struct netns_sysctl_ipv6 sysctl;
34 struct ipv6_devconf *devconf_all; 34 struct ipv6_devconf *devconf_all;
35 struct ipv6_devconf *devconf_dflt; 35 struct ipv6_devconf *devconf_dflt;
36 struct inet_peer_base *peers;
36 struct netns_frags frags; 37 struct netns_frags frags;
37#ifdef CONFIG_NETFILTER 38#ifdef CONFIG_NETFILTER
38 struct xt_table *ip6table_filter; 39 struct xt_table *ip6table_filter;
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 4467c946085..e30e6a86971 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -31,7 +31,8 @@ struct nfc_hci_ops {
31 void (*close) (struct nfc_hci_dev *hdev); 31 void (*close) (struct nfc_hci_dev *hdev);
32 int (*hci_ready) (struct nfc_hci_dev *hdev); 32 int (*hci_ready) (struct nfc_hci_dev *hdev);
33 int (*xmit) (struct nfc_hci_dev *hdev, struct sk_buff *skb); 33 int (*xmit) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
34 int (*start_poll) (struct nfc_hci_dev *hdev, u32 protocols); 34 int (*start_poll) (struct nfc_hci_dev *hdev,
35 u32 im_protocols, u32 tm_protocols);
35 int (*target_from_gate) (struct nfc_hci_dev *hdev, u8 gate, 36 int (*target_from_gate) (struct nfc_hci_dev *hdev, u8 gate,
36 struct nfc_target *target); 37 struct nfc_target *target);
37 int (*complete_target_discovered) (struct nfc_hci_dev *hdev, u8 gate, 38 int (*complete_target_discovered) (struct nfc_hci_dev *hdev, u8 gate,
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index b7ca4a2a1d7..180964b954a 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -53,7 +53,8 @@ struct nfc_target;
53struct nfc_ops { 53struct nfc_ops {
54 int (*dev_up)(struct nfc_dev *dev); 54 int (*dev_up)(struct nfc_dev *dev);
55 int (*dev_down)(struct nfc_dev *dev); 55 int (*dev_down)(struct nfc_dev *dev);
56 int (*start_poll)(struct nfc_dev *dev, u32 protocols); 56 int (*start_poll)(struct nfc_dev *dev,
57 u32 im_protocols, u32 tm_protocols);
57 void (*stop_poll)(struct nfc_dev *dev); 58 void (*stop_poll)(struct nfc_dev *dev);
58 int (*dep_link_up)(struct nfc_dev *dev, struct nfc_target *target, 59 int (*dep_link_up)(struct nfc_dev *dev, struct nfc_target *target,
59 u8 comm_mode, u8 *gb, size_t gb_len); 60 u8 comm_mode, u8 *gb, size_t gb_len);
@@ -62,9 +63,10 @@ struct nfc_ops {
62 u32 protocol); 63 u32 protocol);
63 void (*deactivate_target)(struct nfc_dev *dev, 64 void (*deactivate_target)(struct nfc_dev *dev,
64 struct nfc_target *target); 65 struct nfc_target *target);
65 int (*data_exchange)(struct nfc_dev *dev, struct nfc_target *target, 66 int (*im_transceive)(struct nfc_dev *dev, struct nfc_target *target,
66 struct sk_buff *skb, data_exchange_cb_t cb, 67 struct sk_buff *skb, data_exchange_cb_t cb,
67 void *cb_context); 68 void *cb_context);
69 int (*tm_send)(struct nfc_dev *dev, struct sk_buff *skb);
68 int (*check_presence)(struct nfc_dev *dev, struct nfc_target *target); 70 int (*check_presence)(struct nfc_dev *dev, struct nfc_target *target);
69}; 71};
70 72
@@ -99,10 +101,10 @@ struct nfc_dev {
99 int targets_generation; 101 int targets_generation;
100 struct device dev; 102 struct device dev;
101 bool dev_up; 103 bool dev_up;
104 u8 rf_mode;
102 bool polling; 105 bool polling;
103 struct nfc_target *active_target; 106 struct nfc_target *active_target;
104 bool dep_link_up; 107 bool dep_link_up;
105 u32 dep_rf_mode;
106 struct nfc_genl_data genl_data; 108 struct nfc_genl_data genl_data;
107 u32 supported_protocols; 109 u32 supported_protocols;
108 110
@@ -188,6 +190,7 @@ struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp);
188 190
189int nfc_set_remote_general_bytes(struct nfc_dev *dev, 191int nfc_set_remote_general_bytes(struct nfc_dev *dev,
190 u8 *gt, u8 gt_len); 192 u8 *gt, u8 gt_len);
193u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, size_t *gb_len);
191 194
192int nfc_targets_found(struct nfc_dev *dev, 195int nfc_targets_found(struct nfc_dev *dev,
193 struct nfc_target *targets, int ntargets); 196 struct nfc_target *targets, int ntargets);
@@ -196,4 +199,9 @@ int nfc_target_lost(struct nfc_dev *dev, u32 target_idx);
196int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx, 199int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
197 u8 comm_mode, u8 rf_mode); 200 u8 comm_mode, u8 rf_mode);
198 201
202int nfc_tm_activated(struct nfc_dev *dev, u32 protocol, u8 comm_mode,
203 u8 *gb, size_t gb_len);
204int nfc_tm_deactivated(struct nfc_dev *dev);
205int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb);
206
199#endif /* __NET_NFC_H */ 207#endif /* __NET_NFC_H */
diff --git a/include/net/nfc/shdlc.h b/include/net/nfc/shdlc.h
index ab06afd462d..35e930d2f63 100644
--- a/include/net/nfc/shdlc.h
+++ b/include/net/nfc/shdlc.h
@@ -27,7 +27,8 @@ struct nfc_shdlc_ops {
27 void (*close) (struct nfc_shdlc *shdlc); 27 void (*close) (struct nfc_shdlc *shdlc);
28 int (*hci_ready) (struct nfc_shdlc *shdlc); 28 int (*hci_ready) (struct nfc_shdlc *shdlc);
29 int (*xmit) (struct nfc_shdlc *shdlc, struct sk_buff *skb); 29 int (*xmit) (struct nfc_shdlc *shdlc, struct sk_buff *skb);
30 int (*start_poll) (struct nfc_shdlc *shdlc, u32 protocols); 30 int (*start_poll) (struct nfc_shdlc *shdlc,
31 u32 im_protocols, u32 tm_protocols);
31 int (*target_from_gate) (struct nfc_shdlc *shdlc, u8 gate, 32 int (*target_from_gate) (struct nfc_shdlc *shdlc, u8 gate,
32 struct nfc_target *target); 33 struct nfc_target *target);
33 int (*complete_target_discovered) (struct nfc_shdlc *shdlc, u8 gate, 34 int (*complete_target_discovered) (struct nfc_shdlc *shdlc, u8 gate,
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 875f4895b03..057f2d31556 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -29,11 +29,15 @@
29#include <linux/ipv6.h> 29#include <linux/ipv6.h>
30#endif 30#endif
31 31
32#define MAX_INET_PROTOS 256 /* Must be a power of 2 */ 32/* This is one larger than the largest protocol value that can be
33 33 * found in an ipv4 or ipv6 header. Since in both cases the protocol
34 * value is presented in a __u8, this is defined to be 256.
35 */
36#define MAX_INET_PROTOS 256
34 37
35/* This is used to register protocols. */ 38/* This is used to register protocols. */
36struct net_protocol { 39struct net_protocol {
40 void (*early_demux)(struct sk_buff *skb);
37 int (*handler)(struct sk_buff *skb); 41 int (*handler)(struct sk_buff *skb);
38 void (*err_handler)(struct sk_buff *skb, u32 info); 42 void (*err_handler)(struct sk_buff *skb, u32 info);
39 int (*gso_send_check)(struct sk_buff *skb); 43 int (*gso_send_check)(struct sk_buff *skb);
diff --git a/include/net/route.h b/include/net/route.h
index 98705468ac0..52362368af0 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -40,7 +40,6 @@
40#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE)) 40#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE))
41 41
42struct fib_nh; 42struct fib_nh;
43struct inet_peer;
44struct fib_info; 43struct fib_info;
45struct rtable { 44struct rtable {
46 struct dst_entry dst; 45 struct dst_entry dst;
@@ -65,9 +64,7 @@ struct rtable {
65 __be32 rt_gateway; 64 __be32 rt_gateway;
66 65
67 /* Miscellaneous cached information */ 66 /* Miscellaneous cached information */
68 __be32 rt_spec_dst; /* RFC1122 specific destination */ 67 u32 rt_pmtu;
69 u32 rt_peer_genid;
70 struct inet_peer *peer; /* long-living peer info */
71 struct fib_info *fi; /* for client ref to shared metrics */ 68 struct fib_info *fi; /* for client ref to shared metrics */
72}; 69};
73 70
@@ -181,9 +178,10 @@ static inline int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 s
181 return ip_route_input_common(skb, dst, src, tos, devin, true); 178 return ip_route_input_common(skb, dst, src, tos, devin, true);
182} 179}
183 180
184extern unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph, 181extern void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
185 unsigned short new_mtu, struct net_device *dev); 182 int oif, u32 mark, u8 protocol, int flow_flags);
186extern void ip_rt_send_redirect(struct sk_buff *skb); 183extern void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu);
184extern void ip_rt_send_redirect(struct sk_buff *skb);
187 185
188extern unsigned int inet_addr_type(struct net *net, __be32 addr); 186extern unsigned int inet_addr_type(struct net *net, __be32 addr);
189extern unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr); 187extern unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr);
@@ -244,8 +242,6 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32
244 242
245 if (inet_sk(sk)->transparent) 243 if (inet_sk(sk)->transparent)
246 flow_flags |= FLOWI_FLAG_ANYSRC; 244 flow_flags |= FLOWI_FLAG_ANYSRC;
247 if (protocol == IPPROTO_TCP)
248 flow_flags |= FLOWI_FLAG_PRECOW_METRICS;
249 if (can_sleep) 245 if (can_sleep)
250 flow_flags |= FLOWI_FLAG_CAN_SLEEP; 246 flow_flags |= FLOWI_FLAG_CAN_SLEEP;
251 247
@@ -294,17 +290,6 @@ static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable
294 return rt; 290 return rt;
295} 291}
296 292
297extern void rt_bind_peer(struct rtable *rt, __be32 daddr, int create);
298
299static inline struct inet_peer *rt_get_peer(struct rtable *rt, __be32 daddr)
300{
301 if (rt->peer)
302 return rt->peer;
303
304 rt_bind_peer(rt, daddr, 0);
305 return rt->peer;
306}
307
308static inline int inet_iif(const struct sk_buff *skb) 293static inline int inet_iif(const struct sk_buff *skb)
309{ 294{
310 return skb_rtable(skb)->rt_iif; 295 return skb_rtable(skb)->rt_iif;
diff --git a/include/net/sock.h b/include/net/sock.h
index 4a452169956..dcb54a0793e 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -198,6 +198,7 @@ struct cg_proto;
198 * @sk_lock: synchronizer 198 * @sk_lock: synchronizer
199 * @sk_rcvbuf: size of receive buffer in bytes 199 * @sk_rcvbuf: size of receive buffer in bytes
200 * @sk_wq: sock wait queue and async head 200 * @sk_wq: sock wait queue and async head
201 * @sk_rx_dst: receive input route used by early tcp demux
201 * @sk_dst_cache: destination cache 202 * @sk_dst_cache: destination cache
202 * @sk_dst_lock: destination cache lock 203 * @sk_dst_lock: destination cache lock
203 * @sk_policy: flow policy 204 * @sk_policy: flow policy
@@ -317,6 +318,7 @@ struct sock {
317 struct xfrm_policy *sk_policy[2]; 318 struct xfrm_policy *sk_policy[2];
318#endif 319#endif
319 unsigned long sk_flags; 320 unsigned long sk_flags;
321 struct dst_entry *sk_rx_dst;
320 struct dst_entry *sk_dst_cache; 322 struct dst_entry *sk_dst_cache;
321 spinlock_t sk_dst_lock; 323 spinlock_t sk_dst_lock;
322 atomic_t sk_wmem_alloc; 324 atomic_t sk_wmem_alloc;
@@ -1426,6 +1428,7 @@ extern struct sk_buff *sock_rmalloc(struct sock *sk,
1426 gfp_t priority); 1428 gfp_t priority);
1427extern void sock_wfree(struct sk_buff *skb); 1429extern void sock_wfree(struct sk_buff *skb);
1428extern void sock_rfree(struct sk_buff *skb); 1430extern void sock_rfree(struct sk_buff *skb);
1431extern void sock_edemux(struct sk_buff *skb);
1429 1432
1430extern int sock_setsockopt(struct socket *sock, int level, 1433extern int sock_setsockopt(struct socket *sock, int level,
1431 int op, char __user *optval, 1434 int op, char __user *optval,
@@ -2152,7 +2155,7 @@ static inline void sk_change_net(struct sock *sk, struct net *net)
2152 2155
2153static inline struct sock *skb_steal_sock(struct sk_buff *skb) 2156static inline struct sock *skb_steal_sock(struct sk_buff *skb)
2154{ 2157{
2155 if (unlikely(skb->sk)) { 2158 if (skb->sk) {
2156 struct sock *sk = skb->sk; 2159 struct sock *sk = skb->sk;
2157 2160
2158 skb->destructor = NULL; 2161 skb->destructor = NULL;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e79aa48d9fc..3618fefae04 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -325,10 +325,10 @@ extern void tcp_v4_err(struct sk_buff *skb, u32);
325 325
326extern void tcp_shutdown (struct sock *sk, int how); 326extern void tcp_shutdown (struct sock *sk, int how);
327 327
328extern void tcp_v4_early_demux(struct sk_buff *skb);
328extern int tcp_v4_rcv(struct sk_buff *skb); 329extern int tcp_v4_rcv(struct sk_buff *skb);
329 330
330extern struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it); 331extern struct inet_peer *tcp_v4_get_peer(struct sock *sk);
331extern void *tcp_v4_tw_get_peer(struct sock *sk);
332extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); 332extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
333extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 333extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
334 size_t size); 334 size_t size);
@@ -388,6 +388,13 @@ extern void tcp_enter_frto(struct sock *sk);
388extern void tcp_enter_loss(struct sock *sk, int how); 388extern void tcp_enter_loss(struct sock *sk, int how);
389extern void tcp_clear_retrans(struct tcp_sock *tp); 389extern void tcp_clear_retrans(struct tcp_sock *tp);
390extern void tcp_update_metrics(struct sock *sk); 390extern void tcp_update_metrics(struct sock *sk);
391extern void tcp_init_metrics(struct sock *sk);
392extern void tcp_metrics_init(void);
393extern bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check);
394extern bool tcp_remember_stamp(struct sock *sk);
395extern bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
396extern void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
397extern void tcp_disable_fack(struct tcp_sock *tp);
391extern void tcp_close(struct sock *sk, long timeout); 398extern void tcp_close(struct sock *sk, long timeout);
392extern void tcp_init_sock(struct sock *sk); 399extern void tcp_init_sock(struct sock *sk);
393extern unsigned int tcp_poll(struct file * file, struct socket *sock, 400extern unsigned int tcp_poll(struct file * file, struct socket *sock,
@@ -556,6 +563,8 @@ static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
556 return (tp->srtt >> 3) + tp->rttvar; 563 return (tp->srtt >> 3) + tp->rttvar;
557} 564}
558 565
566extern void tcp_set_rto(struct sock *sk);
567
559static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) 568static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
560{ 569{
561 tp->pred_flags = htonl((tp->tcp_header_len << 26) | 570 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h
index 8d6689cb2c6..68f0ecad6c6 100644
--- a/include/net/timewait_sock.h
+++ b/include/net/timewait_sock.h
@@ -22,7 +22,6 @@ struct timewait_sock_ops {
22 int (*twsk_unique)(struct sock *sk, 22 int (*twsk_unique)(struct sock *sk,
23 struct sock *sktw, void *twp); 23 struct sock *sktw, void *twp);
24 void (*twsk_destructor)(struct sock *sk); 24 void (*twsk_destructor)(struct sock *sk);
25 void *(*twsk_getpeer)(struct sock *sk);
26}; 25};
27 26
28static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp) 27static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
@@ -41,11 +40,4 @@ static inline void twsk_destructor(struct sock *sk)
41 sk->sk_prot->twsk_prot->twsk_destructor(sk); 40 sk->sk_prot->twsk_prot->twsk_destructor(sk);
42} 41}
43 42
44static inline void *twsk_getpeer(struct sock *sk)
45{
46 if (sk->sk_prot->twsk_prot->twsk_getpeer)
47 return sk->sk_prot->twsk_prot->twsk_getpeer(sk);
48 return NULL;
49}
50
51#endif /* _TIMEWAIT_SOCK_H */ 43#endif /* _TIMEWAIT_SOCK_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index e0a55df5bde..17acbc92476 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1682,13 +1682,11 @@ static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
1682 1682
1683static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m) 1683static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
1684{ 1684{
1685 if ((m->m | m->v) && 1685 int ret = 0;
1686 nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m))
1687 goto nla_put_failure;
1688 return 0;
1689 1686
1690nla_put_failure: 1687 if (m->m | m->v)
1691 return -1; 1688 ret = nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
1689 return ret;
1692} 1690}
1693 1691
1694#endif /* _NET_XFRM_H */ 1692#endif /* _NET_XFRM_H */
diff --git a/kernel/audit.c b/kernel/audit.c
index 1c7f2c61416..4a3f28d2ca6 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -384,7 +384,7 @@ static void audit_hold_skb(struct sk_buff *skb)
384static void audit_printk_skb(struct sk_buff *skb) 384static void audit_printk_skb(struct sk_buff *skb)
385{ 385{
386 struct nlmsghdr *nlh = nlmsg_hdr(skb); 386 struct nlmsghdr *nlh = nlmsg_hdr(skb);
387 char *data = NLMSG_DATA(nlh); 387 char *data = nlmsg_data(nlh);
388 388
389 if (nlh->nlmsg_type != AUDIT_EOE) { 389 if (nlh->nlmsg_type != AUDIT_EOE) {
390 if (printk_ratelimit()) 390 if (printk_ratelimit())
@@ -516,14 +516,15 @@ struct sk_buff *audit_make_reply(int pid, int seq, int type, int done,
516 if (!skb) 516 if (!skb)
517 return NULL; 517 return NULL;
518 518
519 nlh = NLMSG_NEW(skb, pid, seq, t, size, flags); 519 nlh = nlmsg_put(skb, pid, seq, t, size, flags);
520 data = NLMSG_DATA(nlh); 520 if (!nlh)
521 goto out_kfree_skb;
522 data = nlmsg_data(nlh);
521 memcpy(data, payload, size); 523 memcpy(data, payload, size);
522 return skb; 524 return skb;
523 525
524nlmsg_failure: /* Used by NLMSG_NEW */ 526out_kfree_skb:
525 if (skb) 527 kfree_skb(skb);
526 kfree_skb(skb);
527 return NULL; 528 return NULL;
528} 529}
529 530
@@ -680,7 +681,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
680 sessionid = audit_get_sessionid(current); 681 sessionid = audit_get_sessionid(current);
681 security_task_getsecid(current, &sid); 682 security_task_getsecid(current, &sid);
682 seq = nlh->nlmsg_seq; 683 seq = nlh->nlmsg_seq;
683 data = NLMSG_DATA(nlh); 684 data = nlmsg_data(nlh);
684 685
685 switch (msg_type) { 686 switch (msg_type) {
686 case AUDIT_GET: 687 case AUDIT_GET:
@@ -961,14 +962,17 @@ static void audit_receive(struct sk_buff *skb)
961static int __init audit_init(void) 962static int __init audit_init(void)
962{ 963{
963 int i; 964 int i;
965 struct netlink_kernel_cfg cfg = {
966 .input = audit_receive,
967 };
964 968
965 if (audit_initialized == AUDIT_DISABLED) 969 if (audit_initialized == AUDIT_DISABLED)
966 return 0; 970 return 0;
967 971
968 printk(KERN_INFO "audit: initializing netlink socket (%s)\n", 972 printk(KERN_INFO "audit: initializing netlink socket (%s)\n",
969 audit_default ? "enabled" : "disabled"); 973 audit_default ? "enabled" : "disabled");
970 audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, 0, 974 audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT,
971 audit_receive, NULL, THIS_MODULE); 975 THIS_MODULE, &cfg);
972 if (!audit_sock) 976 if (!audit_sock)
973 audit_panic("cannot initialize netlink socket"); 977 audit_panic("cannot initialize netlink socket");
974 else 978 else
@@ -1060,13 +1064,15 @@ static struct audit_buffer * audit_buffer_alloc(struct audit_context *ctx,
1060 1064
1061 ab->skb = nlmsg_new(AUDIT_BUFSIZ, gfp_mask); 1065 ab->skb = nlmsg_new(AUDIT_BUFSIZ, gfp_mask);
1062 if (!ab->skb) 1066 if (!ab->skb)
1063 goto nlmsg_failure; 1067 goto err;
1064 1068
1065 nlh = NLMSG_NEW(ab->skb, 0, 0, type, 0, 0); 1069 nlh = nlmsg_put(ab->skb, 0, 0, type, 0, 0);
1070 if (!nlh)
1071 goto out_kfree_skb;
1066 1072
1067 return ab; 1073 return ab;
1068 1074
1069nlmsg_failure: /* Used by NLMSG_NEW */ 1075out_kfree_skb:
1070 kfree_skb(ab->skb); 1076 kfree_skb(ab->skb);
1071 ab->skb = NULL; 1077 ab->skb = NULL;
1072err: 1078err:
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 1a91efa6d12..0401d2916d9 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -373,13 +373,16 @@ EXPORT_SYMBOL_GPL(add_uevent_var);
373static int uevent_net_init(struct net *net) 373static int uevent_net_init(struct net *net)
374{ 374{
375 struct uevent_sock *ue_sk; 375 struct uevent_sock *ue_sk;
376 struct netlink_kernel_cfg cfg = {
377 .groups = 1,
378 };
376 379
377 ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL); 380 ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
378 if (!ue_sk) 381 if (!ue_sk)
379 return -ENOMEM; 382 return -ENOMEM;
380 383
381 ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, 384 ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT,
382 1, NULL, NULL, THIS_MODULE); 385 THIS_MODULE, &cfg);
383 if (!ue_sk->sk) { 386 if (!ue_sk->sk) {
384 printk(KERN_ERR 387 printk(KERN_ERR
385 "kobject_uevent: unable to create netlink socket!\n"); 388 "kobject_uevent: unable to create netlink socket!\n");
diff --git a/net/9p/client.c b/net/9p/client.c
index a170893d70e..8260f132b32 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1548,7 +1548,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1548 kernel_buf = 1; 1548 kernel_buf = 1;
1549 indata = data; 1549 indata = data;
1550 } else 1550 } else
1551 indata = (char *)udata; 1551 indata = (__force char *)udata;
1552 /* 1552 /*
1553 * response header len is 11 1553 * response header len is 11
1554 * PDU Header(7) + IO Size (4) 1554 * PDU Header(7) + IO Size (4)
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 2a167658bb9..35b8911b1c8 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -212,7 +212,7 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
212 * this takes a list of pages. 212 * this takes a list of pages.
213 * @sg: scatter/gather list to pack into 213 * @sg: scatter/gather list to pack into
214 * @start: which segment of the sg_list to start at 214 * @start: which segment of the sg_list to start at
215 * @**pdata: a list of pages to add into sg. 215 * @pdata: a list of pages to add into sg.
216 * @nr_pages: number of pages to pack into the scatter/gather list 216 * @nr_pages: number of pages to pack into the scatter/gather list
217 * @data: data to pack into scatter/gather list 217 * @data: data to pack into scatter/gather list
218 * @count: amount of data to pack into the scatter/gather list 218 * @count: amount of data to pack into the scatter/gather list
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 86852963b7f..33475291c9c 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -129,8 +129,8 @@ found:
129 129
130/** 130/**
131 * atalk_find_or_insert_socket - Try to find a socket matching ADDR 131 * atalk_find_or_insert_socket - Try to find a socket matching ADDR
132 * @sk - socket to insert in the list if it is not there already 132 * @sk: socket to insert in the list if it is not there already
133 * @sat - address to search for 133 * @sat: address to search for
134 * 134 *
135 * Try to find a socket matching ADDR in the socket list, if found then return 135 * Try to find a socket matching ADDR in the socket list, if found then return
136 * it. If not, insert SK into the socket list. 136 * it. If not, insert SK into the socket list.
@@ -1066,8 +1066,8 @@ static int atalk_release(struct socket *sock)
1066 1066
1067/** 1067/**
1068 * atalk_pick_and_bind_port - Pick a source port when one is not given 1068 * atalk_pick_and_bind_port - Pick a source port when one is not given
1069 * @sk - socket to insert into the tables 1069 * @sk: socket to insert into the tables
1070 * @sat - address to search for 1070 * @sat: address to search for
1071 * 1071 *
1072 * Pick a source port when one is not given. If we can find a suitable free 1072 * Pick a source port when one is not given. If we can find a suitable free
1073 * one, we insert the socket into the tables using it. 1073 * one, we insert the socket into the tables using it.
diff --git a/net/atm/lec.c b/net/atm/lec.c
index a7d172105c9..2e3d942e77f 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -231,9 +231,11 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
231 if (skb_headroom(skb) < 2) { 231 if (skb_headroom(skb) < 2) {
232 pr_debug("reallocating skb\n"); 232 pr_debug("reallocating skb\n");
233 skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); 233 skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN);
234 kfree_skb(skb); 234 if (unlikely(!skb2)) {
235 if (skb2 == NULL) 235 kfree_skb(skb);
236 return NETDEV_TX_OK; 236 return NETDEV_TX_OK;
237 }
238 consume_skb(skb);
237 skb = skb2; 239 skb = skb2;
238 } 240 }
239 skb_push(skb, 2); 241 skb_push(skb, 2);
@@ -1602,7 +1604,7 @@ static void lec_arp_expire_vcc(unsigned long data)
1602{ 1604{
1603 unsigned long flags; 1605 unsigned long flags;
1604 struct lec_arp_table *to_remove = (struct lec_arp_table *)data; 1606 struct lec_arp_table *to_remove = (struct lec_arp_table *)data;
1605 struct lec_priv *priv = (struct lec_priv *)to_remove->priv; 1607 struct lec_priv *priv = to_remove->priv;
1606 1608
1607 del_timer(&to_remove->timer); 1609 del_timer(&to_remove->timer);
1608 1610
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index ce1e59fdae7..226dca98944 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -283,7 +283,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
283 kfree_skb(n); 283 kfree_skb(n);
284 goto nospace; 284 goto nospace;
285 } 285 }
286 kfree_skb(skb); 286 consume_skb(skb);
287 skb = n; 287 skb = n;
288 if (skb == NULL) 288 if (skb == NULL)
289 return DROP_PACKET; 289 return DROP_PACKET;
diff --git a/net/ax25/ax25_addr.c b/net/ax25/ax25_addr.c
index 9162409559c..e7c9b0ea17a 100644
--- a/net/ax25/ax25_addr.c
+++ b/net/ax25/ax25_addr.c
@@ -189,8 +189,10 @@ const unsigned char *ax25_addr_parse(const unsigned char *buf, int len,
189 digi->ndigi = 0; 189 digi->ndigi = 0;
190 190
191 while (!(buf[-1] & AX25_EBIT)) { 191 while (!(buf[-1] & AX25_EBIT)) {
192 if (d >= AX25_MAX_DIGIS) return NULL; /* Max of 6 digis */ 192 if (d >= AX25_MAX_DIGIS)
193 if (len < 7) return NULL; /* Short packet */ 193 return NULL;
194 if (len < AX25_ADDR_LEN)
195 return NULL;
194 196
195 memcpy(&digi->calls[d], buf, AX25_ADDR_LEN); 197 memcpy(&digi->calls[d], buf, AX25_ADDR_LEN);
196 digi->ndigi = d + 1; 198 digi->ndigi = d + 1;
diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
index be8a25e0db6..be2acab9be9 100644
--- a/net/ax25/ax25_out.c
+++ b/net/ax25/ax25_out.c
@@ -350,7 +350,7 @@ void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
350 if (skb->sk != NULL) 350 if (skb->sk != NULL)
351 skb_set_owner_w(skbn, skb->sk); 351 skb_set_owner_w(skbn, skb->sk);
352 352
353 kfree_skb(skb); 353 consume_skb(skb);
354 skb = skbn; 354 skb = skbn;
355 } 355 }
356 356
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index a65588040b9..d39097737e3 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -474,7 +474,7 @@ struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src,
474 if (skb->sk != NULL) 474 if (skb->sk != NULL)
475 skb_set_owner_w(skbn, skb->sk); 475 skb_set_owner_w(skbn, skb->sk);
476 476
477 kfree_skb(skb); 477 consume_skb(skb);
478 478
479 skb = skbn; 479 skb = skbn;
480 } 480 }
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 6d5c1940667..8676d2b1d57 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -19,11 +19,10 @@
19# 19#
20 20
21obj-$(CONFIG_BATMAN_ADV) += batman-adv.o 21obj-$(CONFIG_BATMAN_ADV) += batman-adv.o
22batman-adv-y += bat_debugfs.o
23batman-adv-y += bat_iv_ogm.o 22batman-adv-y += bat_iv_ogm.o
24batman-adv-y += bat_sysfs.o
25batman-adv-y += bitarray.o 23batman-adv-y += bitarray.o
26batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o 24batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
25batman-adv-y += debugfs.o
27batman-adv-y += gateway_client.o 26batman-adv-y += gateway_client.o
28batman-adv-y += gateway_common.o 27batman-adv-y += gateway_common.o
29batman-adv-y += hard-interface.o 28batman-adv-y += hard-interface.o
@@ -35,6 +34,7 @@ batman-adv-y += ring_buffer.o
35batman-adv-y += routing.o 34batman-adv-y += routing.o
36batman-adv-y += send.o 35batman-adv-y += send.o
37batman-adv-y += soft-interface.o 36batman-adv-y += soft-interface.o
37batman-adv-y += sysfs.o
38batman-adv-y += translation-table.o 38batman-adv-y += translation-table.o
39batman-adv-y += unicast.o 39batman-adv-y += unicast.o
40batman-adv-y += vis.o 40batman-adv-y += vis.o
diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h
index 9852a688ba4..a0ba3bff9b3 100644
--- a/net/batman-adv/bat_algo.h
+++ b/net/batman-adv/bat_algo.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,12 +15,11 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_BAT_ALGO_H_ 20#ifndef _NET_BATMAN_ADV_BAT_ALGO_H_
23#define _NET_BATMAN_ADV_BAT_ALGO_H_ 21#define _NET_BATMAN_ADV_BAT_ALGO_H_
24 22
25int bat_iv_init(void); 23int batadv_iv_init(void);
26 24
27#endif /* _NET_BATMAN_ADV_BAT_ALGO_H_ */ 25#endif /* _NET_BATMAN_ADV_BAT_ALGO_H_ */
diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c
deleted file mode 100644
index 3b588f86d77..00000000000
--- a/net/batman-adv/bat_debugfs.c
+++ /dev/null
@@ -1,388 +0,0 @@
1/*
2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23
24#include <linux/debugfs.h>
25
26#include "bat_debugfs.h"
27#include "translation-table.h"
28#include "originator.h"
29#include "hard-interface.h"
30#include "gateway_common.h"
31#include "gateway_client.h"
32#include "soft-interface.h"
33#include "vis.h"
34#include "icmp_socket.h"
35#include "bridge_loop_avoidance.h"
36
37static struct dentry *bat_debugfs;
38
39#ifdef CONFIG_BATMAN_ADV_DEBUG
40#define LOG_BUFF_MASK (log_buff_len-1)
41#define LOG_BUFF(idx) (debug_log->log_buff[(idx) & LOG_BUFF_MASK])
42
43static int log_buff_len = LOG_BUF_LEN;
44
45static void emit_log_char(struct debug_log *debug_log, char c)
46{
47 LOG_BUFF(debug_log->log_end) = c;
48 debug_log->log_end++;
49
50 if (debug_log->log_end - debug_log->log_start > log_buff_len)
51 debug_log->log_start = debug_log->log_end - log_buff_len;
52}
53
54__printf(2, 3)
55static int fdebug_log(struct debug_log *debug_log, const char *fmt, ...)
56{
57 va_list args;
58 static char debug_log_buf[256];
59 char *p;
60
61 if (!debug_log)
62 return 0;
63
64 spin_lock_bh(&debug_log->lock);
65 va_start(args, fmt);
66 vscnprintf(debug_log_buf, sizeof(debug_log_buf), fmt, args);
67 va_end(args);
68
69 for (p = debug_log_buf; *p != 0; p++)
70 emit_log_char(debug_log, *p);
71
72 spin_unlock_bh(&debug_log->lock);
73
74 wake_up(&debug_log->queue_wait);
75
76 return 0;
77}
78
79int debug_log(struct bat_priv *bat_priv, const char *fmt, ...)
80{
81 va_list args;
82 char tmp_log_buf[256];
83
84 va_start(args, fmt);
85 vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args);
86 fdebug_log(bat_priv->debug_log, "[%10u] %s",
87 jiffies_to_msecs(jiffies), tmp_log_buf);
88 va_end(args);
89
90 return 0;
91}
92
93static int log_open(struct inode *inode, struct file *file)
94{
95 nonseekable_open(inode, file);
96 file->private_data = inode->i_private;
97 inc_module_count();
98 return 0;
99}
100
101static int log_release(struct inode *inode, struct file *file)
102{
103 dec_module_count();
104 return 0;
105}
106
107static ssize_t log_read(struct file *file, char __user *buf,
108 size_t count, loff_t *ppos)
109{
110 struct bat_priv *bat_priv = file->private_data;
111 struct debug_log *debug_log = bat_priv->debug_log;
112 int error, i = 0;
113 char c;
114
115 if ((file->f_flags & O_NONBLOCK) &&
116 !(debug_log->log_end - debug_log->log_start))
117 return -EAGAIN;
118
119 if (!buf)
120 return -EINVAL;
121
122 if (count == 0)
123 return 0;
124
125 if (!access_ok(VERIFY_WRITE, buf, count))
126 return -EFAULT;
127
128 error = wait_event_interruptible(debug_log->queue_wait,
129 (debug_log->log_start - debug_log->log_end));
130
131 if (error)
132 return error;
133
134 spin_lock_bh(&debug_log->lock);
135
136 while ((!error) && (i < count) &&
137 (debug_log->log_start != debug_log->log_end)) {
138 c = LOG_BUFF(debug_log->log_start);
139
140 debug_log->log_start++;
141
142 spin_unlock_bh(&debug_log->lock);
143
144 error = __put_user(c, buf);
145
146 spin_lock_bh(&debug_log->lock);
147
148 buf++;
149 i++;
150
151 }
152
153 spin_unlock_bh(&debug_log->lock);
154
155 if (!error)
156 return i;
157
158 return error;
159}
160
161static unsigned int log_poll(struct file *file, poll_table *wait)
162{
163 struct bat_priv *bat_priv = file->private_data;
164 struct debug_log *debug_log = bat_priv->debug_log;
165
166 poll_wait(file, &debug_log->queue_wait, wait);
167
168 if (debug_log->log_end - debug_log->log_start)
169 return POLLIN | POLLRDNORM;
170
171 return 0;
172}
173
174static const struct file_operations log_fops = {
175 .open = log_open,
176 .release = log_release,
177 .read = log_read,
178 .poll = log_poll,
179 .llseek = no_llseek,
180};
181
182static int debug_log_setup(struct bat_priv *bat_priv)
183{
184 struct dentry *d;
185
186 if (!bat_priv->debug_dir)
187 goto err;
188
189 bat_priv->debug_log = kzalloc(sizeof(*bat_priv->debug_log), GFP_ATOMIC);
190 if (!bat_priv->debug_log)
191 goto err;
192
193 spin_lock_init(&bat_priv->debug_log->lock);
194 init_waitqueue_head(&bat_priv->debug_log->queue_wait);
195
196 d = debugfs_create_file("log", S_IFREG | S_IRUSR,
197 bat_priv->debug_dir, bat_priv, &log_fops);
198 if (d)
199 goto err;
200
201 return 0;
202
203err:
204 return 1;
205}
206
207static void debug_log_cleanup(struct bat_priv *bat_priv)
208{
209 kfree(bat_priv->debug_log);
210 bat_priv->debug_log = NULL;
211}
212#else /* CONFIG_BATMAN_ADV_DEBUG */
213static int debug_log_setup(struct bat_priv *bat_priv)
214{
215 bat_priv->debug_log = NULL;
216 return 0;
217}
218
219static void debug_log_cleanup(struct bat_priv *bat_priv)
220{
221 return;
222}
223#endif
224
225static int bat_algorithms_open(struct inode *inode, struct file *file)
226{
227 return single_open(file, bat_algo_seq_print_text, NULL);
228}
229
230static int originators_open(struct inode *inode, struct file *file)
231{
232 struct net_device *net_dev = (struct net_device *)inode->i_private;
233 return single_open(file, orig_seq_print_text, net_dev);
234}
235
236static int gateways_open(struct inode *inode, struct file *file)
237{
238 struct net_device *net_dev = (struct net_device *)inode->i_private;
239 return single_open(file, gw_client_seq_print_text, net_dev);
240}
241
242static int transtable_global_open(struct inode *inode, struct file *file)
243{
244 struct net_device *net_dev = (struct net_device *)inode->i_private;
245 return single_open(file, tt_global_seq_print_text, net_dev);
246}
247
248#ifdef CONFIG_BATMAN_ADV_BLA
249static int bla_claim_table_open(struct inode *inode, struct file *file)
250{
251 struct net_device *net_dev = (struct net_device *)inode->i_private;
252 return single_open(file, bla_claim_table_seq_print_text, net_dev);
253}
254#endif
255
256static int transtable_local_open(struct inode *inode, struct file *file)
257{
258 struct net_device *net_dev = (struct net_device *)inode->i_private;
259 return single_open(file, tt_local_seq_print_text, net_dev);
260}
261
262static int vis_data_open(struct inode *inode, struct file *file)
263{
264 struct net_device *net_dev = (struct net_device *)inode->i_private;
265 return single_open(file, vis_seq_print_text, net_dev);
266}
267
268struct bat_debuginfo {
269 struct attribute attr;
270 const struct file_operations fops;
271};
272
273#define BAT_DEBUGINFO(_name, _mode, _open) \
274struct bat_debuginfo bat_debuginfo_##_name = { \
275 .attr = { .name = __stringify(_name), \
276 .mode = _mode, }, \
277 .fops = { .owner = THIS_MODULE, \
278 .open = _open, \
279 .read = seq_read, \
280 .llseek = seq_lseek, \
281 .release = single_release, \
282 } \
283};
284
285static BAT_DEBUGINFO(routing_algos, S_IRUGO, bat_algorithms_open);
286static BAT_DEBUGINFO(originators, S_IRUGO, originators_open);
287static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open);
288static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open);
289#ifdef CONFIG_BATMAN_ADV_BLA
290static BAT_DEBUGINFO(bla_claim_table, S_IRUGO, bla_claim_table_open);
291#endif
292static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open);
293static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open);
294
295static struct bat_debuginfo *mesh_debuginfos[] = {
296 &bat_debuginfo_originators,
297 &bat_debuginfo_gateways,
298 &bat_debuginfo_transtable_global,
299#ifdef CONFIG_BATMAN_ADV_BLA
300 &bat_debuginfo_bla_claim_table,
301#endif
302 &bat_debuginfo_transtable_local,
303 &bat_debuginfo_vis_data,
304 NULL,
305};
306
307void debugfs_init(void)
308{
309 struct bat_debuginfo *bat_debug;
310 struct dentry *file;
311
312 bat_debugfs = debugfs_create_dir(DEBUGFS_BAT_SUBDIR, NULL);
313 if (bat_debugfs == ERR_PTR(-ENODEV))
314 bat_debugfs = NULL;
315
316 if (!bat_debugfs)
317 goto out;
318
319 bat_debug = &bat_debuginfo_routing_algos;
320 file = debugfs_create_file(bat_debug->attr.name,
321 S_IFREG | bat_debug->attr.mode,
322 bat_debugfs, NULL, &bat_debug->fops);
323 if (!file)
324 pr_err("Can't add debugfs file: %s\n", bat_debug->attr.name);
325
326out:
327 return;
328}
329
330void debugfs_destroy(void)
331{
332 if (bat_debugfs) {
333 debugfs_remove_recursive(bat_debugfs);
334 bat_debugfs = NULL;
335 }
336}
337
338int debugfs_add_meshif(struct net_device *dev)
339{
340 struct bat_priv *bat_priv = netdev_priv(dev);
341 struct bat_debuginfo **bat_debug;
342 struct dentry *file;
343
344 if (!bat_debugfs)
345 goto out;
346
347 bat_priv->debug_dir = debugfs_create_dir(dev->name, bat_debugfs);
348 if (!bat_priv->debug_dir)
349 goto out;
350
351 bat_socket_setup(bat_priv);
352 debug_log_setup(bat_priv);
353
354 for (bat_debug = mesh_debuginfos; *bat_debug; ++bat_debug) {
355 file = debugfs_create_file(((*bat_debug)->attr).name,
356 S_IFREG | ((*bat_debug)->attr).mode,
357 bat_priv->debug_dir,
358 dev, &(*bat_debug)->fops);
359 if (!file) {
360 bat_err(dev, "Can't add debugfs file: %s/%s\n",
361 dev->name, ((*bat_debug)->attr).name);
362 goto rem_attr;
363 }
364 }
365
366 return 0;
367rem_attr:
368 debugfs_remove_recursive(bat_priv->debug_dir);
369 bat_priv->debug_dir = NULL;
370out:
371#ifdef CONFIG_DEBUG_FS
372 return -ENOMEM;
373#else
374 return 0;
375#endif /* CONFIG_DEBUG_FS */
376}
377
378void debugfs_del_meshif(struct net_device *dev)
379{
380 struct bat_priv *bat_priv = netdev_priv(dev);
381
382 debug_log_cleanup(bat_priv);
383
384 if (bat_debugfs) {
385 debugfs_remove_recursive(bat_priv->debug_dir);
386 bat_priv->debug_dir = NULL;
387 }
388}
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index dc53798ebb4..e877af8bdd1 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -30,15 +28,16 @@
30#include "send.h" 28#include "send.h"
31#include "bat_algo.h" 29#include "bat_algo.h"
32 30
33static struct neigh_node *bat_iv_ogm_neigh_new(struct hard_iface *hard_iface, 31static struct batadv_neigh_node *
34 const uint8_t *neigh_addr, 32batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
35 struct orig_node *orig_node, 33 const uint8_t *neigh_addr,
36 struct orig_node *orig_neigh, 34 struct batadv_orig_node *orig_node,
37 uint32_t seqno) 35 struct batadv_orig_node *orig_neigh, __be32 seqno)
38{ 36{
39 struct neigh_node *neigh_node; 37 struct batadv_neigh_node *neigh_node;
40 38
41 neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, seqno); 39 neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr,
40 ntohl(seqno));
42 if (!neigh_node) 41 if (!neigh_node)
43 goto out; 42 goto out;
44 43
@@ -55,30 +54,30 @@ out:
55 return neigh_node; 54 return neigh_node;
56} 55}
57 56
58static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface) 57static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
59{ 58{
60 struct batman_ogm_packet *batman_ogm_packet; 59 struct batadv_ogm_packet *batadv_ogm_packet;
61 uint32_t random_seqno; 60 uint32_t random_seqno;
62 int res = -1; 61 int res = -ENOMEM;
63 62
64 /* randomize initial seqno to avoid collision */ 63 /* randomize initial seqno to avoid collision */
65 get_random_bytes(&random_seqno, sizeof(random_seqno)); 64 get_random_bytes(&random_seqno, sizeof(random_seqno));
66 atomic_set(&hard_iface->seqno, random_seqno); 65 atomic_set(&hard_iface->seqno, random_seqno);
67 66
68 hard_iface->packet_len = BATMAN_OGM_HLEN; 67 hard_iface->packet_len = BATADV_OGM_HLEN;
69 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC); 68 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
70 69
71 if (!hard_iface->packet_buff) 70 if (!hard_iface->packet_buff)
72 goto out; 71 goto out;
73 72
74 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; 73 batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
75 batman_ogm_packet->header.packet_type = BAT_IV_OGM; 74 batadv_ogm_packet->header.packet_type = BATADV_IV_OGM;
76 batman_ogm_packet->header.version = COMPAT_VERSION; 75 batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION;
77 batman_ogm_packet->header.ttl = 2; 76 batadv_ogm_packet->header.ttl = 2;
78 batman_ogm_packet->flags = NO_FLAGS; 77 batadv_ogm_packet->flags = BATADV_NO_FLAGS;
79 batman_ogm_packet->tq = TQ_MAX_VALUE; 78 batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
80 batman_ogm_packet->tt_num_changes = 0; 79 batadv_ogm_packet->tt_num_changes = 0;
81 batman_ogm_packet->ttvn = 0; 80 batadv_ogm_packet->ttvn = 0;
82 81
83 res = 0; 82 res = 0;
84 83
@@ -86,133 +85,152 @@ out:
86 return res; 85 return res;
87} 86}
88 87
89static void bat_iv_ogm_iface_disable(struct hard_iface *hard_iface) 88static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
90{ 89{
91 kfree(hard_iface->packet_buff); 90 kfree(hard_iface->packet_buff);
92 hard_iface->packet_buff = NULL; 91 hard_iface->packet_buff = NULL;
93} 92}
94 93
95static void bat_iv_ogm_iface_update_mac(struct hard_iface *hard_iface) 94static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface)
96{ 95{
97 struct batman_ogm_packet *batman_ogm_packet; 96 struct batadv_ogm_packet *batadv_ogm_packet;
98 97
99 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; 98 batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
100 memcpy(batman_ogm_packet->orig, 99 memcpy(batadv_ogm_packet->orig,
101 hard_iface->net_dev->dev_addr, ETH_ALEN); 100 hard_iface->net_dev->dev_addr, ETH_ALEN);
102 memcpy(batman_ogm_packet->prev_sender, 101 memcpy(batadv_ogm_packet->prev_sender,
103 hard_iface->net_dev->dev_addr, ETH_ALEN); 102 hard_iface->net_dev->dev_addr, ETH_ALEN);
104} 103}
105 104
106static void bat_iv_ogm_primary_iface_set(struct hard_iface *hard_iface) 105static void
106batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
107{ 107{
108 struct batman_ogm_packet *batman_ogm_packet; 108 struct batadv_ogm_packet *batadv_ogm_packet;
109 109
110 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; 110 batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
111 batman_ogm_packet->flags = PRIMARIES_FIRST_HOP; 111 batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP;
112 batman_ogm_packet->header.ttl = TTL; 112 batadv_ogm_packet->header.ttl = BATADV_TTL;
113} 113}
114 114
115/* when do we schedule our own ogm to be sent */ 115/* when do we schedule our own ogm to be sent */
116static unsigned long bat_iv_ogm_emit_send_time(const struct bat_priv *bat_priv) 116static unsigned long
117batadv_iv_ogm_emit_send_time(const struct batadv_priv *bat_priv)
117{ 118{
118 return jiffies + msecs_to_jiffies( 119 unsigned int msecs;
119 atomic_read(&bat_priv->orig_interval) - 120
120 JITTER + (random32() % 2*JITTER)); 121 msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
122 msecs += (random32() % 2 * BATADV_JITTER);
123
124 return jiffies + msecs_to_jiffies(msecs);
121} 125}
122 126
123/* when do we schedule a ogm packet to be sent */ 127/* when do we schedule a ogm packet to be sent */
124static unsigned long bat_iv_ogm_fwd_send_time(void) 128static unsigned long batadv_iv_ogm_fwd_send_time(void)
125{ 129{
126 return jiffies + msecs_to_jiffies(random32() % (JITTER/2)); 130 return jiffies + msecs_to_jiffies(random32() % (BATADV_JITTER / 2));
127} 131}
128 132
129/* apply hop penalty for a normal link */ 133/* apply hop penalty for a normal link */
130static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv) 134static uint8_t batadv_hop_penalty(uint8_t tq,
135 const struct batadv_priv *bat_priv)
131{ 136{
132 int hop_penalty = atomic_read(&bat_priv->hop_penalty); 137 int hop_penalty = atomic_read(&bat_priv->hop_penalty);
133 return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE); 138 int new_tq;
139
140 new_tq = tq * (BATADV_TQ_MAX_VALUE - hop_penalty);
141 new_tq /= BATADV_TQ_MAX_VALUE;
142
143 return new_tq;
134} 144}
135 145
136/* is there another aggregated packet here? */ 146/* is there another aggregated packet here? */
137static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len, 147static int batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
138 int tt_num_changes) 148 int tt_num_changes)
139{ 149{
140 int next_buff_pos = buff_pos + BATMAN_OGM_HLEN + tt_len(tt_num_changes); 150 int next_buff_pos = 0;
151
152 next_buff_pos += buff_pos + BATADV_OGM_HLEN;
153 next_buff_pos += batadv_tt_len(tt_num_changes);
141 154
142 return (next_buff_pos <= packet_len) && 155 return (next_buff_pos <= packet_len) &&
143 (next_buff_pos <= MAX_AGGREGATION_BYTES); 156 (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
144} 157}
145 158
146/* send a batman ogm to a given interface */ 159/* send a batman ogm to a given interface */
147static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet, 160static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
148 struct hard_iface *hard_iface) 161 struct batadv_hard_iface *hard_iface)
149{ 162{
150 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 163 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
151 char *fwd_str; 164 char *fwd_str;
152 uint8_t packet_num; 165 uint8_t packet_num;
153 int16_t buff_pos; 166 int16_t buff_pos;
154 struct batman_ogm_packet *batman_ogm_packet; 167 struct batadv_ogm_packet *batadv_ogm_packet;
155 struct sk_buff *skb; 168 struct sk_buff *skb;
156 169
157 if (hard_iface->if_status != IF_ACTIVE) 170 if (hard_iface->if_status != BATADV_IF_ACTIVE)
158 return; 171 return;
159 172
160 packet_num = 0; 173 packet_num = 0;
161 buff_pos = 0; 174 buff_pos = 0;
162 batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data; 175 batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data;
163 176
164 /* adjust all flags and log packets */ 177 /* adjust all flags and log packets */
165 while (bat_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len, 178 while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
166 batman_ogm_packet->tt_num_changes)) { 179 batadv_ogm_packet->tt_num_changes)) {
167 180
168 /* we might have aggregated direct link packets with an 181 /* we might have aggregated direct link packets with an
169 * ordinary base packet */ 182 * ordinary base packet
183 */
170 if ((forw_packet->direct_link_flags & (1 << packet_num)) && 184 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
171 (forw_packet->if_incoming == hard_iface)) 185 (forw_packet->if_incoming == hard_iface))
172 batman_ogm_packet->flags |= DIRECTLINK; 186 batadv_ogm_packet->flags |= BATADV_DIRECTLINK;
173 else 187 else
174 batman_ogm_packet->flags &= ~DIRECTLINK; 188 batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
175 189
176 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ? 190 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
177 "Sending own" : 191 "Sending own" :
178 "Forwarding")); 192 "Forwarding"));
179 bat_dbg(DBG_BATMAN, bat_priv, 193 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
180 "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n", 194 "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
181 fwd_str, (packet_num > 0 ? "aggregated " : ""), 195 fwd_str, (packet_num > 0 ? "aggregated " : ""),
182 batman_ogm_packet->orig, 196 batadv_ogm_packet->orig,
183 ntohl(batman_ogm_packet->seqno), 197 ntohl(batadv_ogm_packet->seqno),
184 batman_ogm_packet->tq, batman_ogm_packet->header.ttl, 198 batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl,
185 (batman_ogm_packet->flags & DIRECTLINK ? 199 (batadv_ogm_packet->flags & BATADV_DIRECTLINK ?
186 "on" : "off"), 200 "on" : "off"),
187 batman_ogm_packet->ttvn, hard_iface->net_dev->name, 201 batadv_ogm_packet->ttvn, hard_iface->net_dev->name,
188 hard_iface->net_dev->dev_addr); 202 hard_iface->net_dev->dev_addr);
189 203
190 buff_pos += BATMAN_OGM_HLEN + 204 buff_pos += BATADV_OGM_HLEN;
191 tt_len(batman_ogm_packet->tt_num_changes); 205 buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
192 packet_num++; 206 packet_num++;
193 batman_ogm_packet = (struct batman_ogm_packet *) 207 batadv_ogm_packet = (struct batadv_ogm_packet *)
194 (forw_packet->skb->data + buff_pos); 208 (forw_packet->skb->data + buff_pos);
195 } 209 }
196 210
197 /* create clone because function is called more than once */ 211 /* create clone because function is called more than once */
198 skb = skb_clone(forw_packet->skb, GFP_ATOMIC); 212 skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
199 if (skb) 213 if (skb) {
200 send_skb_packet(skb, hard_iface, broadcast_addr); 214 batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX);
215 batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES,
216 skb->len + ETH_HLEN);
217 batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
218 }
201} 219}
202 220
203/* send a batman ogm packet */ 221/* send a batman ogm packet */
204static void bat_iv_ogm_emit(struct forw_packet *forw_packet) 222static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
205{ 223{
206 struct hard_iface *hard_iface; 224 struct batadv_hard_iface *hard_iface;
207 struct net_device *soft_iface; 225 struct net_device *soft_iface;
208 struct bat_priv *bat_priv; 226 struct batadv_priv *bat_priv;
209 struct hard_iface *primary_if = NULL; 227 struct batadv_hard_iface *primary_if = NULL;
210 struct batman_ogm_packet *batman_ogm_packet; 228 struct batadv_ogm_packet *batadv_ogm_packet;
211 unsigned char directlink; 229 unsigned char directlink;
212 230
213 batman_ogm_packet = (struct batman_ogm_packet *) 231 batadv_ogm_packet = (struct batadv_ogm_packet *)
214 (forw_packet->skb->data); 232 (forw_packet->skb->data);
215 directlink = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0); 233 directlink = (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0);
216 234
217 if (!forw_packet->if_incoming) { 235 if (!forw_packet->if_incoming) {
218 pr_err("Error - can't forward packet: incoming iface not specified\n"); 236 pr_err("Error - can't forward packet: incoming iface not specified\n");
@@ -222,31 +240,33 @@ static void bat_iv_ogm_emit(struct forw_packet *forw_packet)
222 soft_iface = forw_packet->if_incoming->soft_iface; 240 soft_iface = forw_packet->if_incoming->soft_iface;
223 bat_priv = netdev_priv(soft_iface); 241 bat_priv = netdev_priv(soft_iface);
224 242
225 if (forw_packet->if_incoming->if_status != IF_ACTIVE) 243 if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE)
226 goto out; 244 goto out;
227 245
228 primary_if = primary_if_get_selected(bat_priv); 246 primary_if = batadv_primary_if_get_selected(bat_priv);
229 if (!primary_if) 247 if (!primary_if)
230 goto out; 248 goto out;
231 249
232 /* multihomed peer assumed */ 250 /* multihomed peer assumed
233 /* non-primary OGMs are only broadcasted on their interface */ 251 * non-primary OGMs are only broadcasted on their interface
234 if ((directlink && (batman_ogm_packet->header.ttl == 1)) || 252 */
253 if ((directlink && (batadv_ogm_packet->header.ttl == 1)) ||
235 (forw_packet->own && (forw_packet->if_incoming != primary_if))) { 254 (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
236 255
237 /* FIXME: what about aggregated packets ? */ 256 /* FIXME: what about aggregated packets ? */
238 bat_dbg(DBG_BATMAN, bat_priv, 257 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
239 "%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n", 258 "%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n",
240 (forw_packet->own ? "Sending own" : "Forwarding"), 259 (forw_packet->own ? "Sending own" : "Forwarding"),
241 batman_ogm_packet->orig, 260 batadv_ogm_packet->orig,
242 ntohl(batman_ogm_packet->seqno), 261 ntohl(batadv_ogm_packet->seqno),
243 batman_ogm_packet->header.ttl, 262 batadv_ogm_packet->header.ttl,
244 forw_packet->if_incoming->net_dev->name, 263 forw_packet->if_incoming->net_dev->name,
245 forw_packet->if_incoming->net_dev->dev_addr); 264 forw_packet->if_incoming->net_dev->dev_addr);
246 265
247 /* skb is only used once and than forw_packet is free'd */ 266 /* skb is only used once and than forw_packet is free'd */
248 send_skb_packet(forw_packet->skb, forw_packet->if_incoming, 267 batadv_send_skb_packet(forw_packet->skb,
249 broadcast_addr); 268 forw_packet->if_incoming,
269 batadv_broadcast_addr);
250 forw_packet->skb = NULL; 270 forw_packet->skb = NULL;
251 271
252 goto out; 272 goto out;
@@ -254,70 +274,70 @@ static void bat_iv_ogm_emit(struct forw_packet *forw_packet)
254 274
255 /* broadcast on every interface */ 275 /* broadcast on every interface */
256 rcu_read_lock(); 276 rcu_read_lock();
257 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 277 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
258 if (hard_iface->soft_iface != soft_iface) 278 if (hard_iface->soft_iface != soft_iface)
259 continue; 279 continue;
260 280
261 bat_iv_ogm_send_to_if(forw_packet, hard_iface); 281 batadv_iv_ogm_send_to_if(forw_packet, hard_iface);
262 } 282 }
263 rcu_read_unlock(); 283 rcu_read_unlock();
264 284
265out: 285out:
266 if (primary_if) 286 if (primary_if)
267 hardif_free_ref(primary_if); 287 batadv_hardif_free_ref(primary_if);
268} 288}
269 289
270/* return true if new_packet can be aggregated with forw_packet */ 290/* return true if new_packet can be aggregated with forw_packet */
271static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet 291static bool
272 *new_batman_ogm_packet, 292batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
273 struct bat_priv *bat_priv, 293 struct batadv_priv *bat_priv,
274 int packet_len, unsigned long send_time, 294 int packet_len, unsigned long send_time,
275 bool directlink, 295 bool directlink,
276 const struct hard_iface *if_incoming, 296 const struct batadv_hard_iface *if_incoming,
277 const struct forw_packet *forw_packet) 297 const struct batadv_forw_packet *forw_packet)
278{ 298{
279 struct batman_ogm_packet *batman_ogm_packet; 299 struct batadv_ogm_packet *batadv_ogm_packet;
280 int aggregated_bytes = forw_packet->packet_len + packet_len; 300 int aggregated_bytes = forw_packet->packet_len + packet_len;
281 struct hard_iface *primary_if = NULL; 301 struct batadv_hard_iface *primary_if = NULL;
282 bool res = false; 302 bool res = false;
303 unsigned long aggregation_end_time;
283 304
284 batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data; 305 batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data;
306 aggregation_end_time = send_time;
307 aggregation_end_time += msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS);
285 308
286 /** 309 /* we can aggregate the current packet to this aggregated packet
287 * we can aggregate the current packet to this aggregated packet
288 * if: 310 * if:
289 * 311 *
290 * - the send time is within our MAX_AGGREGATION_MS time 312 * - the send time is within our MAX_AGGREGATION_MS time
291 * - the resulting packet wont be bigger than 313 * - the resulting packet wont be bigger than
292 * MAX_AGGREGATION_BYTES 314 * MAX_AGGREGATION_BYTES
293 */ 315 */
294
295 if (time_before(send_time, forw_packet->send_time) && 316 if (time_before(send_time, forw_packet->send_time) &&
296 time_after_eq(send_time + msecs_to_jiffies(MAX_AGGREGATION_MS), 317 time_after_eq(aggregation_end_time, forw_packet->send_time) &&
297 forw_packet->send_time) && 318 (aggregated_bytes <= BATADV_MAX_AGGREGATION_BYTES)) {
298 (aggregated_bytes <= MAX_AGGREGATION_BYTES)) {
299 319
300 /** 320 /* check aggregation compatibility
301 * check aggregation compatibility
302 * -> direct link packets are broadcasted on 321 * -> direct link packets are broadcasted on
303 * their interface only 322 * their interface only
304 * -> aggregate packet if the current packet is 323 * -> aggregate packet if the current packet is
305 * a "global" packet as well as the base 324 * a "global" packet as well as the base
306 * packet 325 * packet
307 */ 326 */
308 327 primary_if = batadv_primary_if_get_selected(bat_priv);
309 primary_if = primary_if_get_selected(bat_priv);
310 if (!primary_if) 328 if (!primary_if)
311 goto out; 329 goto out;
312 330
313 /* packets without direct link flag and high TTL 331 /* packets without direct link flag and high TTL
314 * are flooded through the net */ 332 * are flooded through the net
333 */
315 if ((!directlink) && 334 if ((!directlink) &&
316 (!(batman_ogm_packet->flags & DIRECTLINK)) && 335 (!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) &&
317 (batman_ogm_packet->header.ttl != 1) && 336 (batadv_ogm_packet->header.ttl != 1) &&
318 337
319 /* own packets originating non-primary 338 /* own packets originating non-primary
320 * interfaces leave only that interface */ 339 * interfaces leave only that interface
340 */
321 ((!forw_packet->own) || 341 ((!forw_packet->own) ||
322 (forw_packet->if_incoming == primary_if))) { 342 (forw_packet->if_incoming == primary_if))) {
323 res = true; 343 res = true;
@@ -325,15 +345,17 @@ static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet
325 } 345 }
326 346
327 /* if the incoming packet is sent via this one 347 /* if the incoming packet is sent via this one
328 * interface only - we still can aggregate */ 348 * interface only - we still can aggregate
349 */
329 if ((directlink) && 350 if ((directlink) &&
330 (new_batman_ogm_packet->header.ttl == 1) && 351 (new_bat_ogm_packet->header.ttl == 1) &&
331 (forw_packet->if_incoming == if_incoming) && 352 (forw_packet->if_incoming == if_incoming) &&
332 353
333 /* packets from direct neighbors or 354 /* packets from direct neighbors or
334 * own secondary interface packets 355 * own secondary interface packets
335 * (= secondary interface packets in general) */ 356 * (= secondary interface packets in general)
336 (batman_ogm_packet->flags & DIRECTLINK || 357 */
358 (batadv_ogm_packet->flags & BATADV_DIRECTLINK ||
337 (forw_packet->own && 359 (forw_packet->own &&
338 forw_packet->if_incoming != primary_if))) { 360 forw_packet->if_incoming != primary_if))) {
339 res = true; 361 res = true;
@@ -343,29 +365,30 @@ static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet
343 365
344out: 366out:
345 if (primary_if) 367 if (primary_if)
346 hardif_free_ref(primary_if); 368 batadv_hardif_free_ref(primary_if);
347 return res; 369 return res;
348} 370}
349 371
350/* create a new aggregated packet and add this packet to it */ 372/* create a new aggregated packet and add this packet to it */
351static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff, 373static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
352 int packet_len, unsigned long send_time, 374 int packet_len, unsigned long send_time,
353 bool direct_link, 375 bool direct_link,
354 struct hard_iface *if_incoming, 376 struct batadv_hard_iface *if_incoming,
355 int own_packet) 377 int own_packet)
356{ 378{
357 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 379 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
358 struct forw_packet *forw_packet_aggr; 380 struct batadv_forw_packet *forw_packet_aggr;
359 unsigned char *skb_buff; 381 unsigned char *skb_buff;
382 unsigned int skb_size;
360 383
361 if (!atomic_inc_not_zero(&if_incoming->refcount)) 384 if (!atomic_inc_not_zero(&if_incoming->refcount))
362 return; 385 return;
363 386
364 /* own packet should always be scheduled */ 387 /* own packet should always be scheduled */
365 if (!own_packet) { 388 if (!own_packet) {
366 if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) { 389 if (!batadv_atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
367 bat_dbg(DBG_BATMAN, bat_priv, 390 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
368 "batman packet queue full\n"); 391 "batman packet queue full\n");
369 goto out; 392 goto out;
370 } 393 }
371 } 394 }
@@ -378,12 +401,12 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
378 } 401 }
379 402
380 if ((atomic_read(&bat_priv->aggregated_ogms)) && 403 if ((atomic_read(&bat_priv->aggregated_ogms)) &&
381 (packet_len < MAX_AGGREGATION_BYTES)) 404 (packet_len < BATADV_MAX_AGGREGATION_BYTES))
382 forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES + 405 skb_size = BATADV_MAX_AGGREGATION_BYTES + ETH_HLEN;
383 ETH_HLEN);
384 else 406 else
385 forw_packet_aggr->skb = dev_alloc_skb(packet_len + ETH_HLEN); 407 skb_size = packet_len + ETH_HLEN;
386 408
409 forw_packet_aggr->skb = dev_alloc_skb(skb_size);
387 if (!forw_packet_aggr->skb) { 410 if (!forw_packet_aggr->skb) {
388 if (!own_packet) 411 if (!own_packet)
389 atomic_inc(&bat_priv->batman_queue_left); 412 atomic_inc(&bat_priv->batman_queue_left);
@@ -401,7 +424,7 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
401 forw_packet_aggr->own = own_packet; 424 forw_packet_aggr->own = own_packet;
402 forw_packet_aggr->if_incoming = if_incoming; 425 forw_packet_aggr->if_incoming = if_incoming;
403 forw_packet_aggr->num_packets = 0; 426 forw_packet_aggr->num_packets = 0;
404 forw_packet_aggr->direct_link_flags = NO_FLAGS; 427 forw_packet_aggr->direct_link_flags = BATADV_NO_FLAGS;
405 forw_packet_aggr->send_time = send_time; 428 forw_packet_aggr->send_time = send_time;
406 429
407 /* save packet direct link flag status */ 430 /* save packet direct link flag status */
@@ -415,20 +438,20 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
415 438
416 /* start timer for this packet */ 439 /* start timer for this packet */
417 INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work, 440 INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
418 send_outstanding_bat_ogm_packet); 441 batadv_send_outstanding_bat_ogm_packet);
419 queue_delayed_work(bat_event_workqueue, 442 queue_delayed_work(batadv_event_workqueue,
420 &forw_packet_aggr->delayed_work, 443 &forw_packet_aggr->delayed_work,
421 send_time - jiffies); 444 send_time - jiffies);
422 445
423 return; 446 return;
424out: 447out:
425 hardif_free_ref(if_incoming); 448 batadv_hardif_free_ref(if_incoming);
426} 449}
427 450
428/* aggregate a new packet into the existing ogm packet */ 451/* aggregate a new packet into the existing ogm packet */
429static void bat_iv_ogm_aggregate(struct forw_packet *forw_packet_aggr, 452static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
430 const unsigned char *packet_buff, 453 const unsigned char *packet_buff,
431 int packet_len, bool direct_link) 454 int packet_len, bool direct_link)
432{ 455{
433 unsigned char *skb_buff; 456 unsigned char *skb_buff;
434 457
@@ -443,22 +466,25 @@ static void bat_iv_ogm_aggregate(struct forw_packet *forw_packet_aggr,
443 (1 << forw_packet_aggr->num_packets); 466 (1 << forw_packet_aggr->num_packets);
444} 467}
445 468
446static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv, 469static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
447 unsigned char *packet_buff, 470 unsigned char *packet_buff,
448 int packet_len, struct hard_iface *if_incoming, 471 int packet_len,
449 int own_packet, unsigned long send_time) 472 struct batadv_hard_iface *if_incoming,
473 int own_packet, unsigned long send_time)
450{ 474{
451 /** 475 /* _aggr -> pointer to the packet we want to aggregate with
452 * _aggr -> pointer to the packet we want to aggregate with
453 * _pos -> pointer to the position in the queue 476 * _pos -> pointer to the position in the queue
454 */ 477 */
455 struct forw_packet *forw_packet_aggr = NULL, *forw_packet_pos = NULL; 478 struct batadv_forw_packet *forw_packet_aggr = NULL;
479 struct batadv_forw_packet *forw_packet_pos = NULL;
456 struct hlist_node *tmp_node; 480 struct hlist_node *tmp_node;
457 struct batman_ogm_packet *batman_ogm_packet; 481 struct batadv_ogm_packet *batadv_ogm_packet;
458 bool direct_link; 482 bool direct_link;
483 unsigned long max_aggregation_jiffies;
459 484
460 batman_ogm_packet = (struct batman_ogm_packet *)packet_buff; 485 batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff;
461 direct_link = batman_ogm_packet->flags & DIRECTLINK ? 1 : 0; 486 direct_link = batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0;
487 max_aggregation_jiffies = msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS);
462 488
463 /* find position for the packet in the forward queue */ 489 /* find position for the packet in the forward queue */
464 spin_lock_bh(&bat_priv->forw_bat_list_lock); 490 spin_lock_bh(&bat_priv->forw_bat_list_lock);
@@ -466,11 +492,11 @@ static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv,
466 if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) { 492 if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
467 hlist_for_each_entry(forw_packet_pos, tmp_node, 493 hlist_for_each_entry(forw_packet_pos, tmp_node,
468 &bat_priv->forw_bat_list, list) { 494 &bat_priv->forw_bat_list, list) {
469 if (bat_iv_ogm_can_aggregate(batman_ogm_packet, 495 if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet,
470 bat_priv, packet_len, 496 bat_priv, packet_len,
471 send_time, direct_link, 497 send_time, direct_link,
472 if_incoming, 498 if_incoming,
473 forw_packet_pos)) { 499 forw_packet_pos)) {
474 forw_packet_aggr = forw_packet_pos; 500 forw_packet_aggr = forw_packet_pos;
475 break; 501 break;
476 } 502 }
@@ -478,42 +504,41 @@ static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv,
478 } 504 }
479 505
480 /* nothing to aggregate with - either aggregation disabled or no 506 /* nothing to aggregate with - either aggregation disabled or no
481 * suitable aggregation packet found */ 507 * suitable aggregation packet found
508 */
482 if (!forw_packet_aggr) { 509 if (!forw_packet_aggr) {
483 /* the following section can run without the lock */ 510 /* the following section can run without the lock */
484 spin_unlock_bh(&bat_priv->forw_bat_list_lock); 511 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
485 512
486 /** 513 /* if we could not aggregate this packet with one of the others
487 * if we could not aggregate this packet with one of the others
488 * we hold it back for a while, so that it might be aggregated 514 * we hold it back for a while, so that it might be aggregated
489 * later on 515 * later on
490 */ 516 */
491 if ((!own_packet) && 517 if (!own_packet && atomic_read(&bat_priv->aggregated_ogms))
492 (atomic_read(&bat_priv->aggregated_ogms))) 518 send_time += max_aggregation_jiffies;
493 send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
494 519
495 bat_iv_ogm_aggregate_new(packet_buff, packet_len, 520 batadv_iv_ogm_aggregate_new(packet_buff, packet_len,
496 send_time, direct_link, 521 send_time, direct_link,
497 if_incoming, own_packet); 522 if_incoming, own_packet);
498 } else { 523 } else {
499 bat_iv_ogm_aggregate(forw_packet_aggr, packet_buff, 524 batadv_iv_ogm_aggregate(forw_packet_aggr, packet_buff,
500 packet_len, direct_link); 525 packet_len, direct_link);
501 spin_unlock_bh(&bat_priv->forw_bat_list_lock); 526 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
502 } 527 }
503} 528}
504 529
505static void bat_iv_ogm_forward(struct orig_node *orig_node, 530static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
506 const struct ethhdr *ethhdr, 531 const struct ethhdr *ethhdr,
507 struct batman_ogm_packet *batman_ogm_packet, 532 struct batadv_ogm_packet *batadv_ogm_packet,
508 bool is_single_hop_neigh, 533 bool is_single_hop_neigh,
509 bool is_from_best_next_hop, 534 bool is_from_best_next_hop,
510 struct hard_iface *if_incoming) 535 struct batadv_hard_iface *if_incoming)
511{ 536{
512 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 537 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
513 uint8_t tt_num_changes; 538 uint8_t tt_num_changes;
514 539
515 if (batman_ogm_packet->header.ttl <= 1) { 540 if (batadv_ogm_packet->header.ttl <= 1) {
516 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n"); 541 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
517 return; 542 return;
518 } 543 }
519 544
@@ -525,110 +550,113 @@ static void bat_iv_ogm_forward(struct orig_node *orig_node,
525 * simply drop the ogm. 550 * simply drop the ogm.
526 */ 551 */
527 if (is_single_hop_neigh) 552 if (is_single_hop_neigh)
528 batman_ogm_packet->flags |= NOT_BEST_NEXT_HOP; 553 batadv_ogm_packet->flags |= BATADV_NOT_BEST_NEXT_HOP;
529 else 554 else
530 return; 555 return;
531 } 556 }
532 557
533 tt_num_changes = batman_ogm_packet->tt_num_changes; 558 tt_num_changes = batadv_ogm_packet->tt_num_changes;
534 559
535 batman_ogm_packet->header.ttl--; 560 batadv_ogm_packet->header.ttl--;
536 memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN); 561 memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
537 562
538 /* apply hop penalty */ 563 /* apply hop penalty */
539 batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv); 564 batadv_ogm_packet->tq = batadv_hop_penalty(batadv_ogm_packet->tq,
565 bat_priv);
540 566
541 bat_dbg(DBG_BATMAN, bat_priv, 567 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
542 "Forwarding packet: tq: %i, ttl: %i\n", 568 "Forwarding packet: tq: %i, ttl: %i\n",
543 batman_ogm_packet->tq, batman_ogm_packet->header.ttl); 569 batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl);
544
545 batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno);
546 batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc);
547 570
548 /* switch of primaries first hop flag when forwarding */ 571 /* switch of primaries first hop flag when forwarding */
549 batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP; 572 batadv_ogm_packet->flags &= ~BATADV_PRIMARIES_FIRST_HOP;
550 if (is_single_hop_neigh) 573 if (is_single_hop_neigh)
551 batman_ogm_packet->flags |= DIRECTLINK; 574 batadv_ogm_packet->flags |= BATADV_DIRECTLINK;
552 else 575 else
553 batman_ogm_packet->flags &= ~DIRECTLINK; 576 batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
554 577
555 bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet, 578 batadv_iv_ogm_queue_add(bat_priv, (unsigned char *)batadv_ogm_packet,
556 BATMAN_OGM_HLEN + tt_len(tt_num_changes), 579 BATADV_OGM_HLEN + batadv_tt_len(tt_num_changes),
557 if_incoming, 0, bat_iv_ogm_fwd_send_time()); 580 if_incoming, 0, batadv_iv_ogm_fwd_send_time());
558} 581}
559 582
560static void bat_iv_ogm_schedule(struct hard_iface *hard_iface, 583static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
561 int tt_num_changes)
562{ 584{
563 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 585 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
564 struct batman_ogm_packet *batman_ogm_packet; 586 struct batadv_ogm_packet *batadv_ogm_packet;
565 struct hard_iface *primary_if; 587 struct batadv_hard_iface *primary_if;
566 int vis_server; 588 int vis_server, tt_num_changes = 0;
567 589
568 vis_server = atomic_read(&bat_priv->vis_mode); 590 vis_server = atomic_read(&bat_priv->vis_mode);
569 primary_if = primary_if_get_selected(bat_priv); 591 primary_if = batadv_primary_if_get_selected(bat_priv);
592
593 if (hard_iface == primary_if)
594 tt_num_changes = batadv_tt_append_diff(bat_priv,
595 &hard_iface->packet_buff,
596 &hard_iface->packet_len,
597 BATADV_OGM_HLEN);
570 598
571 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; 599 batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
572 600
573 /* change sequence number to network order */ 601 /* change sequence number to network order */
574 batman_ogm_packet->seqno = 602 batadv_ogm_packet->seqno =
575 htonl((uint32_t)atomic_read(&hard_iface->seqno)); 603 htonl((uint32_t)atomic_read(&hard_iface->seqno));
604 atomic_inc(&hard_iface->seqno);
576 605
577 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn); 606 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
578 batman_ogm_packet->tt_crc = htons((uint16_t) 607 batadv_ogm_packet->tt_crc = htons(bat_priv->tt_crc);
579 atomic_read(&bat_priv->tt_crc));
580 if (tt_num_changes >= 0) 608 if (tt_num_changes >= 0)
581 batman_ogm_packet->tt_num_changes = tt_num_changes; 609 batadv_ogm_packet->tt_num_changes = tt_num_changes;
582 610
583 if (vis_server == VIS_TYPE_SERVER_SYNC) 611 if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC)
584 batman_ogm_packet->flags |= VIS_SERVER; 612 batadv_ogm_packet->flags |= BATADV_VIS_SERVER;
585 else 613 else
586 batman_ogm_packet->flags &= ~VIS_SERVER; 614 batadv_ogm_packet->flags &= ~BATADV_VIS_SERVER;
587 615
588 if ((hard_iface == primary_if) && 616 if ((hard_iface == primary_if) &&
589 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)) 617 (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER))
590 batman_ogm_packet->gw_flags = 618 batadv_ogm_packet->gw_flags =
591 (uint8_t)atomic_read(&bat_priv->gw_bandwidth); 619 (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
592 else 620 else
593 batman_ogm_packet->gw_flags = NO_FLAGS; 621 batadv_ogm_packet->gw_flags = BATADV_NO_FLAGS;
594
595 atomic_inc(&hard_iface->seqno);
596 622
597 slide_own_bcast_window(hard_iface); 623 batadv_slide_own_bcast_window(hard_iface);
598 bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff, 624 batadv_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
599 hard_iface->packet_len, hard_iface, 1, 625 hard_iface->packet_len, hard_iface, 1,
600 bat_iv_ogm_emit_send_time(bat_priv)); 626 batadv_iv_ogm_emit_send_time(bat_priv));
601 627
602 if (primary_if) 628 if (primary_if)
603 hardif_free_ref(primary_if); 629 batadv_hardif_free_ref(primary_if);
604} 630}
605 631
606static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, 632static void
607 struct orig_node *orig_node, 633batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
608 const struct ethhdr *ethhdr, 634 struct batadv_orig_node *orig_node,
609 const struct batman_ogm_packet 635 const struct ethhdr *ethhdr,
610 *batman_ogm_packet, 636 const struct batadv_ogm_packet *batadv_ogm_packet,
611 struct hard_iface *if_incoming, 637 struct batadv_hard_iface *if_incoming,
612 const unsigned char *tt_buff, 638 const unsigned char *tt_buff,
613 int is_duplicate) 639 int is_duplicate)
614{ 640{
615 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; 641 struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
616 struct neigh_node *router = NULL; 642 struct batadv_neigh_node *router = NULL;
617 struct orig_node *orig_node_tmp; 643 struct batadv_orig_node *orig_node_tmp;
618 struct hlist_node *node; 644 struct hlist_node *node;
619 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh; 645 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
646 uint8_t *neigh_addr;
620 647
621 bat_dbg(DBG_BATMAN, bat_priv, 648 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
622 "update_originator(): Searching and updating originator entry of received packet\n"); 649 "update_originator(): Searching and updating originator entry of received packet\n");
623 650
624 rcu_read_lock(); 651 rcu_read_lock();
625 hlist_for_each_entry_rcu(tmp_neigh_node, node, 652 hlist_for_each_entry_rcu(tmp_neigh_node, node,
626 &orig_node->neigh_list, list) { 653 &orig_node->neigh_list, list) {
627 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) && 654 neigh_addr = tmp_neigh_node->addr;
628 (tmp_neigh_node->if_incoming == if_incoming) && 655 if (batadv_compare_eth(neigh_addr, ethhdr->h_source) &&
629 atomic_inc_not_zero(&tmp_neigh_node->refcount)) { 656 tmp_neigh_node->if_incoming == if_incoming &&
657 atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
630 if (neigh_node) 658 if (neigh_node)
631 neigh_node_free_ref(neigh_node); 659 batadv_neigh_node_free_ref(neigh_node);
632 neigh_node = tmp_neigh_node; 660 neigh_node = tmp_neigh_node;
633 continue; 661 continue;
634 } 662 }
@@ -637,53 +665,55 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
637 continue; 665 continue;
638 666
639 spin_lock_bh(&tmp_neigh_node->lq_update_lock); 667 spin_lock_bh(&tmp_neigh_node->lq_update_lock);
640 ring_buffer_set(tmp_neigh_node->tq_recv, 668 batadv_ring_buffer_set(tmp_neigh_node->tq_recv,
641 &tmp_neigh_node->tq_index, 0); 669 &tmp_neigh_node->tq_index, 0);
642 tmp_neigh_node->tq_avg = 670 tmp_neigh_node->tq_avg =
643 ring_buffer_avg(tmp_neigh_node->tq_recv); 671 batadv_ring_buffer_avg(tmp_neigh_node->tq_recv);
644 spin_unlock_bh(&tmp_neigh_node->lq_update_lock); 672 spin_unlock_bh(&tmp_neigh_node->lq_update_lock);
645 } 673 }
646 674
647 if (!neigh_node) { 675 if (!neigh_node) {
648 struct orig_node *orig_tmp; 676 struct batadv_orig_node *orig_tmp;
649 677
650 orig_tmp = get_orig_node(bat_priv, ethhdr->h_source); 678 orig_tmp = batadv_get_orig_node(bat_priv, ethhdr->h_source);
651 if (!orig_tmp) 679 if (!orig_tmp)
652 goto unlock; 680 goto unlock;
653 681
654 neigh_node = bat_iv_ogm_neigh_new(if_incoming, ethhdr->h_source, 682 neigh_node = batadv_iv_ogm_neigh_new(if_incoming,
655 orig_node, orig_tmp, 683 ethhdr->h_source,
656 batman_ogm_packet->seqno); 684 orig_node, orig_tmp,
685 batadv_ogm_packet->seqno);
657 686
658 orig_node_free_ref(orig_tmp); 687 batadv_orig_node_free_ref(orig_tmp);
659 if (!neigh_node) 688 if (!neigh_node)
660 goto unlock; 689 goto unlock;
661 } else 690 } else
662 bat_dbg(DBG_BATMAN, bat_priv, 691 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
663 "Updating existing last-hop neighbor of originator\n"); 692 "Updating existing last-hop neighbor of originator\n");
664 693
665 rcu_read_unlock(); 694 rcu_read_unlock();
666 695
667 orig_node->flags = batman_ogm_packet->flags; 696 orig_node->flags = batadv_ogm_packet->flags;
668 neigh_node->last_seen = jiffies; 697 neigh_node->last_seen = jiffies;
669 698
670 spin_lock_bh(&neigh_node->lq_update_lock); 699 spin_lock_bh(&neigh_node->lq_update_lock);
671 ring_buffer_set(neigh_node->tq_recv, 700 batadv_ring_buffer_set(neigh_node->tq_recv,
672 &neigh_node->tq_index, 701 &neigh_node->tq_index,
673 batman_ogm_packet->tq); 702 batadv_ogm_packet->tq);
674 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv); 703 neigh_node->tq_avg = batadv_ring_buffer_avg(neigh_node->tq_recv);
675 spin_unlock_bh(&neigh_node->lq_update_lock); 704 spin_unlock_bh(&neigh_node->lq_update_lock);
676 705
677 if (!is_duplicate) { 706 if (!is_duplicate) {
678 orig_node->last_ttl = batman_ogm_packet->header.ttl; 707 orig_node->last_ttl = batadv_ogm_packet->header.ttl;
679 neigh_node->last_ttl = batman_ogm_packet->header.ttl; 708 neigh_node->last_ttl = batadv_ogm_packet->header.ttl;
680 } 709 }
681 710
682 bonding_candidate_add(orig_node, neigh_node); 711 batadv_bonding_candidate_add(orig_node, neigh_node);
683 712
684 /* if this neighbor already is our next hop there is nothing 713 /* if this neighbor already is our next hop there is nothing
685 * to change */ 714 * to change
686 router = orig_node_get_router(orig_node); 715 */
716 router = batadv_orig_node_get_router(orig_node);
687 if (router == neigh_node) 717 if (router == neigh_node)
688 goto update_tt; 718 goto update_tt;
689 719
@@ -692,7 +722,8 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
692 goto update_tt; 722 goto update_tt;
693 723
694 /* if the TQ is the same and the link not more symmetric we 724 /* if the TQ is the same and the link not more symmetric we
695 * won't consider it either */ 725 * won't consider it either
726 */
696 if (router && (neigh_node->tq_avg == router->tq_avg)) { 727 if (router && (neigh_node->tq_avg == router->tq_avg)) {
697 orig_node_tmp = router->orig_node; 728 orig_node_tmp = router->orig_node;
698 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); 729 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
@@ -710,30 +741,31 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
710 goto update_tt; 741 goto update_tt;
711 } 742 }
712 743
713 update_route(bat_priv, orig_node, neigh_node); 744 batadv_update_route(bat_priv, orig_node, neigh_node);
714 745
715update_tt: 746update_tt:
716 /* I have to check for transtable changes only if the OGM has been 747 /* I have to check for transtable changes only if the OGM has been
717 * sent through a primary interface */ 748 * sent through a primary interface
718 if (((batman_ogm_packet->orig != ethhdr->h_source) && 749 */
719 (batman_ogm_packet->header.ttl > 2)) || 750 if (((batadv_ogm_packet->orig != ethhdr->h_source) &&
720 (batman_ogm_packet->flags & PRIMARIES_FIRST_HOP)) 751 (batadv_ogm_packet->header.ttl > 2)) ||
721 tt_update_orig(bat_priv, orig_node, tt_buff, 752 (batadv_ogm_packet->flags & BATADV_PRIMARIES_FIRST_HOP))
722 batman_ogm_packet->tt_num_changes, 753 batadv_tt_update_orig(bat_priv, orig_node, tt_buff,
723 batman_ogm_packet->ttvn, 754 batadv_ogm_packet->tt_num_changes,
724 batman_ogm_packet->tt_crc); 755 batadv_ogm_packet->ttvn,
756 ntohs(batadv_ogm_packet->tt_crc));
725 757
726 if (orig_node->gw_flags != batman_ogm_packet->gw_flags) 758 if (orig_node->gw_flags != batadv_ogm_packet->gw_flags)
727 gw_node_update(bat_priv, orig_node, 759 batadv_gw_node_update(bat_priv, orig_node,
728 batman_ogm_packet->gw_flags); 760 batadv_ogm_packet->gw_flags);
729 761
730 orig_node->gw_flags = batman_ogm_packet->gw_flags; 762 orig_node->gw_flags = batadv_ogm_packet->gw_flags;
731 763
732 /* restart gateway selection if fast or late switching was enabled */ 764 /* restart gateway selection if fast or late switching was enabled */
733 if ((orig_node->gw_flags) && 765 if ((orig_node->gw_flags) &&
734 (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) && 766 (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_CLIENT) &&
735 (atomic_read(&bat_priv->gw_sel_class) > 2)) 767 (atomic_read(&bat_priv->gw_sel_class) > 2))
736 gw_check_election(bat_priv, orig_node); 768 batadv_gw_check_election(bat_priv, orig_node);
737 769
738 goto out; 770 goto out;
739 771
@@ -741,29 +773,32 @@ unlock:
741 rcu_read_unlock(); 773 rcu_read_unlock();
742out: 774out:
743 if (neigh_node) 775 if (neigh_node)
744 neigh_node_free_ref(neigh_node); 776 batadv_neigh_node_free_ref(neigh_node);
745 if (router) 777 if (router)
746 neigh_node_free_ref(router); 778 batadv_neigh_node_free_ref(router);
747} 779}
748 780
749static int bat_iv_ogm_calc_tq(struct orig_node *orig_node, 781static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
750 struct orig_node *orig_neigh_node, 782 struct batadv_orig_node *orig_neigh_node,
751 struct batman_ogm_packet *batman_ogm_packet, 783 struct batadv_ogm_packet *batadv_ogm_packet,
752 struct hard_iface *if_incoming) 784 struct batadv_hard_iface *if_incoming)
753{ 785{
754 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 786 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
755 struct neigh_node *neigh_node = NULL, *tmp_neigh_node; 787 struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node;
756 struct hlist_node *node; 788 struct hlist_node *node;
757 uint8_t total_count; 789 uint8_t total_count;
758 uint8_t orig_eq_count, neigh_rq_count, tq_own; 790 uint8_t orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
759 int tq_asym_penalty, ret = 0; 791 unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
792 int tq_asym_penalty, inv_asym_penalty, ret = 0;
793 unsigned int combined_tq;
760 794
761 /* find corresponding one hop neighbor */ 795 /* find corresponding one hop neighbor */
762 rcu_read_lock(); 796 rcu_read_lock();
763 hlist_for_each_entry_rcu(tmp_neigh_node, node, 797 hlist_for_each_entry_rcu(tmp_neigh_node, node,
764 &orig_neigh_node->neigh_list, list) { 798 &orig_neigh_node->neigh_list, list) {
765 799
766 if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig)) 800 if (!batadv_compare_eth(tmp_neigh_node->addr,
801 orig_neigh_node->orig))
767 continue; 802 continue;
768 803
769 if (tmp_neigh_node->if_incoming != if_incoming) 804 if (tmp_neigh_node->if_incoming != if_incoming)
@@ -778,11 +813,11 @@ static int bat_iv_ogm_calc_tq(struct orig_node *orig_node,
778 rcu_read_unlock(); 813 rcu_read_unlock();
779 814
780 if (!neigh_node) 815 if (!neigh_node)
781 neigh_node = bat_iv_ogm_neigh_new(if_incoming, 816 neigh_node = batadv_iv_ogm_neigh_new(if_incoming,
782 orig_neigh_node->orig, 817 orig_neigh_node->orig,
783 orig_neigh_node, 818 orig_neigh_node,
784 orig_neigh_node, 819 orig_neigh_node,
785 batman_ogm_packet->seqno); 820 batadv_ogm_packet->seqno);
786 821
787 if (!neigh_node) 822 if (!neigh_node)
788 goto out; 823 goto out;
@@ -803,47 +838,52 @@ static int bat_iv_ogm_calc_tq(struct orig_node *orig_node,
803 total_count = (orig_eq_count > neigh_rq_count ? 838 total_count = (orig_eq_count > neigh_rq_count ?
804 neigh_rq_count : orig_eq_count); 839 neigh_rq_count : orig_eq_count);
805 840
806 /* if we have too few packets (too less data) we set tq_own to zero */ 841 /* if we have too few packets (too less data) we set tq_own to zero
807 /* if we receive too few packets it is not considered bidirectional */ 842 * if we receive too few packets it is not considered bidirectional
808 if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) || 843 */
809 (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM)) 844 if (total_count < BATADV_TQ_LOCAL_BIDRECT_SEND_MINIMUM ||
845 neigh_rq_count < BATADV_TQ_LOCAL_BIDRECT_RECV_MINIMUM)
810 tq_own = 0; 846 tq_own = 0;
811 else 847 else
812 /* neigh_node->real_packet_count is never zero as we 848 /* neigh_node->real_packet_count is never zero as we
813 * only purge old information when getting new 849 * only purge old information when getting new
814 * information */ 850 * information
815 tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count; 851 */
852 tq_own = (BATADV_TQ_MAX_VALUE * total_count) / neigh_rq_count;
816 853
817 /* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does 854 /* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
818 * affect the nearly-symmetric links only a little, but 855 * affect the nearly-symmetric links only a little, but
819 * punishes asymmetric links more. This will give a value 856 * punishes asymmetric links more. This will give a value
820 * between 0 and TQ_MAX_VALUE 857 * between 0 and TQ_MAX_VALUE
821 */ 858 */
822 tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE * 859 neigh_rq_inv = BATADV_TQ_LOCAL_WINDOW_SIZE - neigh_rq_count;
823 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) * 860 neigh_rq_inv_cube = neigh_rq_inv * neigh_rq_inv * neigh_rq_inv;
824 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) * 861 neigh_rq_max_cube = BATADV_TQ_LOCAL_WINDOW_SIZE *
825 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) / 862 BATADV_TQ_LOCAL_WINDOW_SIZE *
826 (TQ_LOCAL_WINDOW_SIZE * 863 BATADV_TQ_LOCAL_WINDOW_SIZE;
827 TQ_LOCAL_WINDOW_SIZE * 864 inv_asym_penalty = BATADV_TQ_MAX_VALUE * neigh_rq_inv_cube;
828 TQ_LOCAL_WINDOW_SIZE); 865 inv_asym_penalty /= neigh_rq_max_cube;
829 866 tq_asym_penalty = BATADV_TQ_MAX_VALUE - inv_asym_penalty;
830 batman_ogm_packet->tq = ((batman_ogm_packet->tq * tq_own 867
831 * tq_asym_penalty) / 868 combined_tq = batadv_ogm_packet->tq * tq_own * tq_asym_penalty;
832 (TQ_MAX_VALUE * TQ_MAX_VALUE)); 869 combined_tq /= BATADV_TQ_MAX_VALUE * BATADV_TQ_MAX_VALUE;
833 870 batadv_ogm_packet->tq = combined_tq;
834 bat_dbg(DBG_BATMAN, bat_priv, 871
835 "bidirectional: orig = %-15pM neigh = %-15pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, total tq: %3i\n", 872 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
836 orig_node->orig, orig_neigh_node->orig, total_count, 873 "bidirectional: orig = %-15pM neigh = %-15pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, total tq: %3i\n",
837 neigh_rq_count, tq_own, tq_asym_penalty, batman_ogm_packet->tq); 874 orig_node->orig, orig_neigh_node->orig, total_count,
875 neigh_rq_count, tq_own,
876 tq_asym_penalty, batadv_ogm_packet->tq);
838 877
839 /* if link has the minimum required transmission quality 878 /* if link has the minimum required transmission quality
840 * consider it bidirectional */ 879 * consider it bidirectional
841 if (batman_ogm_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT) 880 */
881 if (batadv_ogm_packet->tq >= BATADV_TQ_TOTAL_BIDRECT_LIMIT)
842 ret = 1; 882 ret = 1;
843 883
844out: 884out:
845 if (neigh_node) 885 if (neigh_node)
846 neigh_node_free_ref(neigh_node); 886 batadv_neigh_node_free_ref(neigh_node);
847 return ret; 887 return ret;
848} 888}
849 889
@@ -855,90 +895,94 @@ out:
855 * -1 the packet is old and has been received while the seqno window 895 * -1 the packet is old and has been received while the seqno window
856 * was protected. Caller should drop it. 896 * was protected. Caller should drop it.
857 */ 897 */
858static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, 898static int
859 const struct batman_ogm_packet 899batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
860 *batman_ogm_packet, 900 const struct batadv_ogm_packet *batadv_ogm_packet,
861 const struct hard_iface *if_incoming) 901 const struct batadv_hard_iface *if_incoming)
862{ 902{
863 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 903 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
864 struct orig_node *orig_node; 904 struct batadv_orig_node *orig_node;
865 struct neigh_node *tmp_neigh_node; 905 struct batadv_neigh_node *tmp_neigh_node;
866 struct hlist_node *node; 906 struct hlist_node *node;
867 int is_duplicate = 0; 907 int is_duplicate = 0;
868 int32_t seq_diff; 908 int32_t seq_diff;
869 int need_update = 0; 909 int need_update = 0;
870 int set_mark, ret = -1; 910 int set_mark, ret = -1;
911 uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
912 uint8_t *neigh_addr;
871 913
872 orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig); 914 orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
873 if (!orig_node) 915 if (!orig_node)
874 return 0; 916 return 0;
875 917
876 spin_lock_bh(&orig_node->ogm_cnt_lock); 918 spin_lock_bh(&orig_node->ogm_cnt_lock);
877 seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno; 919 seq_diff = seqno - orig_node->last_real_seqno;
878 920
879 /* signalize caller that the packet is to be dropped. */ 921 /* signalize caller that the packet is to be dropped. */
880 if (!hlist_empty(&orig_node->neigh_list) && 922 if (!hlist_empty(&orig_node->neigh_list) &&
881 window_protected(bat_priv, seq_diff, 923 batadv_window_protected(bat_priv, seq_diff,
882 &orig_node->batman_seqno_reset)) 924 &orig_node->batman_seqno_reset))
883 goto out; 925 goto out;
884 926
885 rcu_read_lock(); 927 rcu_read_lock();
886 hlist_for_each_entry_rcu(tmp_neigh_node, node, 928 hlist_for_each_entry_rcu(tmp_neigh_node, node,
887 &orig_node->neigh_list, list) { 929 &orig_node->neigh_list, list) {
888 930
889 is_duplicate |= bat_test_bit(tmp_neigh_node->real_bits, 931 is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits,
890 orig_node->last_real_seqno, 932 orig_node->last_real_seqno,
891 batman_ogm_packet->seqno); 933 seqno);
892 934
893 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) && 935 neigh_addr = tmp_neigh_node->addr;
894 (tmp_neigh_node->if_incoming == if_incoming)) 936 if (batadv_compare_eth(neigh_addr, ethhdr->h_source) &&
937 tmp_neigh_node->if_incoming == if_incoming)
895 set_mark = 1; 938 set_mark = 1;
896 else 939 else
897 set_mark = 0; 940 set_mark = 0;
898 941
899 /* if the window moved, set the update flag. */ 942 /* if the window moved, set the update flag. */
900 need_update |= bit_get_packet(bat_priv, 943 need_update |= batadv_bit_get_packet(bat_priv,
901 tmp_neigh_node->real_bits, 944 tmp_neigh_node->real_bits,
902 seq_diff, set_mark); 945 seq_diff, set_mark);
903 946
904 tmp_neigh_node->real_packet_count = 947 tmp_neigh_node->real_packet_count =
905 bitmap_weight(tmp_neigh_node->real_bits, 948 bitmap_weight(tmp_neigh_node->real_bits,
906 TQ_LOCAL_WINDOW_SIZE); 949 BATADV_TQ_LOCAL_WINDOW_SIZE);
907 } 950 }
908 rcu_read_unlock(); 951 rcu_read_unlock();
909 952
910 if (need_update) { 953 if (need_update) {
911 bat_dbg(DBG_BATMAN, bat_priv, 954 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
912 "updating last_seqno: old %u, new %u\n", 955 "updating last_seqno: old %u, new %u\n",
913 orig_node->last_real_seqno, batman_ogm_packet->seqno); 956 orig_node->last_real_seqno, seqno);
914 orig_node->last_real_seqno = batman_ogm_packet->seqno; 957 orig_node->last_real_seqno = seqno;
915 } 958 }
916 959
917 ret = is_duplicate; 960 ret = is_duplicate;
918 961
919out: 962out:
920 spin_unlock_bh(&orig_node->ogm_cnt_lock); 963 spin_unlock_bh(&orig_node->ogm_cnt_lock);
921 orig_node_free_ref(orig_node); 964 batadv_orig_node_free_ref(orig_node);
922 return ret; 965 return ret;
923} 966}
924 967
925static void bat_iv_ogm_process(const struct ethhdr *ethhdr, 968static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
926 struct batman_ogm_packet *batman_ogm_packet, 969 struct batadv_ogm_packet *batadv_ogm_packet,
927 const unsigned char *tt_buff, 970 const unsigned char *tt_buff,
928 struct hard_iface *if_incoming) 971 struct batadv_hard_iface *if_incoming)
929{ 972{
930 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 973 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
931 struct hard_iface *hard_iface; 974 struct batadv_hard_iface *hard_iface;
932 struct orig_node *orig_neigh_node, *orig_node; 975 struct batadv_orig_node *orig_neigh_node, *orig_node;
933 struct neigh_node *router = NULL, *router_router = NULL; 976 struct batadv_neigh_node *router = NULL, *router_router = NULL;
934 struct neigh_node *orig_neigh_router = NULL; 977 struct batadv_neigh_node *orig_neigh_router = NULL;
935 int has_directlink_flag; 978 int has_directlink_flag;
936 int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; 979 int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
937 int is_broadcast = 0, is_bidirectional; 980 int is_broadcast = 0, is_bidirect;
938 bool is_single_hop_neigh = false; 981 bool is_single_hop_neigh = false;
939 bool is_from_best_next_hop = false; 982 bool is_from_best_next_hop = false;
940 int is_duplicate; 983 int is_duplicate, sameseq, simlar_ttl;
941 uint32_t if_incoming_seqno; 984 uint32_t if_incoming_seqno;
985 uint8_t *prev_sender;
942 986
943 /* Silently drop when the batman packet is actually not a 987 /* Silently drop when the batman packet is actually not a
944 * correct packet. 988 * correct packet.
@@ -948,49 +992,53 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
948 * it as an additional length. 992 * it as an additional length.
949 * 993 *
950 * TODO: A more sane solution would be to have a bit in the 994 * TODO: A more sane solution would be to have a bit in the
951 * batman_ogm_packet to detect whether the packet is the last 995 * batadv_ogm_packet to detect whether the packet is the last
952 * packet in an aggregation. Here we expect that the padding 996 * packet in an aggregation. Here we expect that the padding
953 * is always zero (or not 0x01) 997 * is always zero (or not 0x01)
954 */ 998 */
955 if (batman_ogm_packet->header.packet_type != BAT_IV_OGM) 999 if (batadv_ogm_packet->header.packet_type != BATADV_IV_OGM)
956 return; 1000 return;
957 1001
958 /* could be changed by schedule_own_packet() */ 1002 /* could be changed by schedule_own_packet() */
959 if_incoming_seqno = atomic_read(&if_incoming->seqno); 1003 if_incoming_seqno = atomic_read(&if_incoming->seqno);
960 1004
961 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0); 1005 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
1006 has_directlink_flag = 1;
1007 else
1008 has_directlink_flag = 0;
962 1009
963 if (compare_eth(ethhdr->h_source, batman_ogm_packet->orig)) 1010 if (batadv_compare_eth(ethhdr->h_source, batadv_ogm_packet->orig))
964 is_single_hop_neigh = true; 1011 is_single_hop_neigh = true;
965 1012
966 bat_dbg(DBG_BATMAN, bat_priv, 1013 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
967 "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n", 1014 "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
968 ethhdr->h_source, if_incoming->net_dev->name, 1015 ethhdr->h_source, if_incoming->net_dev->name,
969 if_incoming->net_dev->dev_addr, batman_ogm_packet->orig, 1016 if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig,
970 batman_ogm_packet->prev_sender, batman_ogm_packet->seqno, 1017 batadv_ogm_packet->prev_sender,
971 batman_ogm_packet->ttvn, batman_ogm_packet->tt_crc, 1018 ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->ttvn,
972 batman_ogm_packet->tt_num_changes, batman_ogm_packet->tq, 1019 ntohs(batadv_ogm_packet->tt_crc),
973 batman_ogm_packet->header.ttl, 1020 batadv_ogm_packet->tt_num_changes, batadv_ogm_packet->tq,
974 batman_ogm_packet->header.version, has_directlink_flag); 1021 batadv_ogm_packet->header.ttl,
1022 batadv_ogm_packet->header.version, has_directlink_flag);
975 1023
976 rcu_read_lock(); 1024 rcu_read_lock();
977 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 1025 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
978 if (hard_iface->if_status != IF_ACTIVE) 1026 if (hard_iface->if_status != BATADV_IF_ACTIVE)
979 continue; 1027 continue;
980 1028
981 if (hard_iface->soft_iface != if_incoming->soft_iface) 1029 if (hard_iface->soft_iface != if_incoming->soft_iface)
982 continue; 1030 continue;
983 1031
984 if (compare_eth(ethhdr->h_source, 1032 if (batadv_compare_eth(ethhdr->h_source,
985 hard_iface->net_dev->dev_addr)) 1033 hard_iface->net_dev->dev_addr))
986 is_my_addr = 1; 1034 is_my_addr = 1;
987 1035
988 if (compare_eth(batman_ogm_packet->orig, 1036 if (batadv_compare_eth(batadv_ogm_packet->orig,
989 hard_iface->net_dev->dev_addr)) 1037 hard_iface->net_dev->dev_addr))
990 is_my_orig = 1; 1038 is_my_orig = 1;
991 1039
992 if (compare_eth(batman_ogm_packet->prev_sender, 1040 if (batadv_compare_eth(batadv_ogm_packet->prev_sender,
993 hard_iface->net_dev->dev_addr)) 1041 hard_iface->net_dev->dev_addr))
994 is_my_oldorig = 1; 1042 is_my_oldorig = 1;
995 1043
996 if (is_broadcast_ether_addr(ethhdr->h_source)) 1044 if (is_broadcast_ether_addr(ethhdr->h_source))
@@ -998,268 +1046,278 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
998 } 1046 }
999 rcu_read_unlock(); 1047 rcu_read_unlock();
1000 1048
1001 if (batman_ogm_packet->header.version != COMPAT_VERSION) { 1049 if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
1002 bat_dbg(DBG_BATMAN, bat_priv, 1050 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1003 "Drop packet: incompatible batman version (%i)\n", 1051 "Drop packet: incompatible batman version (%i)\n",
1004 batman_ogm_packet->header.version); 1052 batadv_ogm_packet->header.version);
1005 return; 1053 return;
1006 } 1054 }
1007 1055
1008 if (is_my_addr) { 1056 if (is_my_addr) {
1009 bat_dbg(DBG_BATMAN, bat_priv, 1057 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1010 "Drop packet: received my own broadcast (sender: %pM)\n", 1058 "Drop packet: received my own broadcast (sender: %pM)\n",
1011 ethhdr->h_source); 1059 ethhdr->h_source);
1012 return; 1060 return;
1013 } 1061 }
1014 1062
1015 if (is_broadcast) { 1063 if (is_broadcast) {
1016 bat_dbg(DBG_BATMAN, bat_priv, 1064 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1017 "Drop packet: ignoring all packets with broadcast source addr (sender: %pM)\n", 1065 "Drop packet: ignoring all packets with broadcast source addr (sender: %pM)\n",
1018 ethhdr->h_source); 1066 ethhdr->h_source);
1019 return; 1067 return;
1020 } 1068 }
1021 1069
1022 if (is_my_orig) { 1070 if (is_my_orig) {
1023 unsigned long *word; 1071 unsigned long *word;
1024 int offset; 1072 int offset;
1073 int32_t bit_pos;
1074 int16_t if_num;
1075 uint8_t *weight;
1025 1076
1026 orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source); 1077 orig_neigh_node = batadv_get_orig_node(bat_priv,
1078 ethhdr->h_source);
1027 if (!orig_neigh_node) 1079 if (!orig_neigh_node)
1028 return; 1080 return;
1029 1081
1030 /* neighbor has to indicate direct link and it has to 1082 /* neighbor has to indicate direct link and it has to
1031 * come via the corresponding interface */ 1083 * come via the corresponding interface
1032 /* save packet seqno for bidirectional check */ 1084 * save packet seqno for bidirectional check
1085 */
1033 if (has_directlink_flag && 1086 if (has_directlink_flag &&
1034 compare_eth(if_incoming->net_dev->dev_addr, 1087 batadv_compare_eth(if_incoming->net_dev->dev_addr,
1035 batman_ogm_packet->orig)) { 1088 batadv_ogm_packet->orig)) {
1036 offset = if_incoming->if_num * NUM_WORDS; 1089 if_num = if_incoming->if_num;
1090 offset = if_num * BATADV_NUM_WORDS;
1037 1091
1038 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock); 1092 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
1039 word = &(orig_neigh_node->bcast_own[offset]); 1093 word = &(orig_neigh_node->bcast_own[offset]);
1040 bat_set_bit(word, 1094 bit_pos = if_incoming_seqno - 2;
1041 if_incoming_seqno - 1095 bit_pos -= ntohl(batadv_ogm_packet->seqno);
1042 batman_ogm_packet->seqno - 2); 1096 batadv_set_bit(word, bit_pos);
1043 orig_neigh_node->bcast_own_sum[if_incoming->if_num] = 1097 weight = &orig_neigh_node->bcast_own_sum[if_num];
1044 bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE); 1098 *weight = bitmap_weight(word,
1099 BATADV_TQ_LOCAL_WINDOW_SIZE);
1045 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock); 1100 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
1046 } 1101 }
1047 1102
1048 bat_dbg(DBG_BATMAN, bat_priv, 1103 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1049 "Drop packet: originator packet from myself (via neighbor)\n"); 1104 "Drop packet: originator packet from myself (via neighbor)\n");
1050 orig_node_free_ref(orig_neigh_node); 1105 batadv_orig_node_free_ref(orig_neigh_node);
1051 return; 1106 return;
1052 } 1107 }
1053 1108
1054 if (is_my_oldorig) { 1109 if (is_my_oldorig) {
1055 bat_dbg(DBG_BATMAN, bat_priv, 1110 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1056 "Drop packet: ignoring all rebroadcast echos (sender: %pM)\n", 1111 "Drop packet: ignoring all rebroadcast echos (sender: %pM)\n",
1057 ethhdr->h_source); 1112 ethhdr->h_source);
1058 return; 1113 return;
1059 } 1114 }
1060 1115
1061 if (batman_ogm_packet->flags & NOT_BEST_NEXT_HOP) { 1116 if (batadv_ogm_packet->flags & BATADV_NOT_BEST_NEXT_HOP) {
1062 bat_dbg(DBG_BATMAN, bat_priv, 1117 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1063 "Drop packet: ignoring all packets not forwarded from the best next hop (sender: %pM)\n", 1118 "Drop packet: ignoring all packets not forwarded from the best next hop (sender: %pM)\n",
1064 ethhdr->h_source); 1119 ethhdr->h_source);
1065 return; 1120 return;
1066 } 1121 }
1067 1122
1068 orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig); 1123 orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
1069 if (!orig_node) 1124 if (!orig_node)
1070 return; 1125 return;
1071 1126
1072 is_duplicate = bat_iv_ogm_update_seqnos(ethhdr, batman_ogm_packet, 1127 is_duplicate = batadv_iv_ogm_update_seqnos(ethhdr, batadv_ogm_packet,
1073 if_incoming); 1128 if_incoming);
1074 1129
1075 if (is_duplicate == -1) { 1130 if (is_duplicate == -1) {
1076 bat_dbg(DBG_BATMAN, bat_priv, 1131 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1077 "Drop packet: packet within seqno protection time (sender: %pM)\n", 1132 "Drop packet: packet within seqno protection time (sender: %pM)\n",
1078 ethhdr->h_source); 1133 ethhdr->h_source);
1079 goto out; 1134 goto out;
1080 } 1135 }
1081 1136
1082 if (batman_ogm_packet->tq == 0) { 1137 if (batadv_ogm_packet->tq == 0) {
1083 bat_dbg(DBG_BATMAN, bat_priv, 1138 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1084 "Drop packet: originator packet with tq equal 0\n"); 1139 "Drop packet: originator packet with tq equal 0\n");
1085 goto out; 1140 goto out;
1086 } 1141 }
1087 1142
1088 router = orig_node_get_router(orig_node); 1143 router = batadv_orig_node_get_router(orig_node);
1089 if (router) 1144 if (router)
1090 router_router = orig_node_get_router(router->orig_node); 1145 router_router = batadv_orig_node_get_router(router->orig_node);
1091 1146
1092 if ((router && router->tq_avg != 0) && 1147 if ((router && router->tq_avg != 0) &&
1093 (compare_eth(router->addr, ethhdr->h_source))) 1148 (batadv_compare_eth(router->addr, ethhdr->h_source)))
1094 is_from_best_next_hop = true; 1149 is_from_best_next_hop = true;
1095 1150
1151 prev_sender = batadv_ogm_packet->prev_sender;
1096 /* avoid temporary routing loops */ 1152 /* avoid temporary routing loops */
1097 if (router && router_router && 1153 if (router && router_router &&
1098 (compare_eth(router->addr, batman_ogm_packet->prev_sender)) && 1154 (batadv_compare_eth(router->addr, prev_sender)) &&
1099 !(compare_eth(batman_ogm_packet->orig, 1155 !(batadv_compare_eth(batadv_ogm_packet->orig, prev_sender)) &&
1100 batman_ogm_packet->prev_sender)) && 1156 (batadv_compare_eth(router->addr, router_router->addr))) {
1101 (compare_eth(router->addr, router_router->addr))) { 1157 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1102 bat_dbg(DBG_BATMAN, bat_priv, 1158 "Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM)\n",
1103 "Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM)\n", 1159 ethhdr->h_source);
1104 ethhdr->h_source);
1105 goto out; 1160 goto out;
1106 } 1161 }
1107 1162
1108 /* if sender is a direct neighbor the sender mac equals 1163 /* if sender is a direct neighbor the sender mac equals
1109 * originator mac */ 1164 * originator mac
1165 */
1110 orig_neigh_node = (is_single_hop_neigh ? 1166 orig_neigh_node = (is_single_hop_neigh ?
1111 orig_node : 1167 orig_node :
1112 get_orig_node(bat_priv, ethhdr->h_source)); 1168 batadv_get_orig_node(bat_priv, ethhdr->h_source));
1113 if (!orig_neigh_node) 1169 if (!orig_neigh_node)
1114 goto out; 1170 goto out;
1115 1171
1116 orig_neigh_router = orig_node_get_router(orig_neigh_node); 1172 orig_neigh_router = batadv_orig_node_get_router(orig_neigh_node);
1117 1173
1118 /* drop packet if sender is not a direct neighbor and if we 1174 /* drop packet if sender is not a direct neighbor and if we
1119 * don't route towards it */ 1175 * don't route towards it
1176 */
1120 if (!is_single_hop_neigh && (!orig_neigh_router)) { 1177 if (!is_single_hop_neigh && (!orig_neigh_router)) {
1121 bat_dbg(DBG_BATMAN, bat_priv, 1178 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1122 "Drop packet: OGM via unknown neighbor!\n"); 1179 "Drop packet: OGM via unknown neighbor!\n");
1123 goto out_neigh; 1180 goto out_neigh;
1124 } 1181 }
1125 1182
1126 is_bidirectional = bat_iv_ogm_calc_tq(orig_node, orig_neigh_node, 1183 is_bidirect = batadv_iv_ogm_calc_tq(orig_node, orig_neigh_node,
1127 batman_ogm_packet, if_incoming); 1184 batadv_ogm_packet, if_incoming);
1128 1185
1129 bonding_save_primary(orig_node, orig_neigh_node, batman_ogm_packet); 1186 batadv_bonding_save_primary(orig_node, orig_neigh_node,
1187 batadv_ogm_packet);
1130 1188
1131 /* update ranking if it is not a duplicate or has the same 1189 /* update ranking if it is not a duplicate or has the same
1132 * seqno and similar ttl as the non-duplicate */ 1190 * seqno and similar ttl as the non-duplicate
1133 if (is_bidirectional && 1191 */
1134 (!is_duplicate || 1192 sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno);
1135 ((orig_node->last_real_seqno == batman_ogm_packet->seqno) && 1193 simlar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl;
1136 (orig_node->last_ttl - 3 <= batman_ogm_packet->header.ttl)))) 1194 if (is_bidirect && (!is_duplicate || (sameseq && simlar_ttl)))
1137 bat_iv_ogm_orig_update(bat_priv, orig_node, ethhdr, 1195 batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
1138 batman_ogm_packet, if_incoming, 1196 batadv_ogm_packet, if_incoming,
1139 tt_buff, is_duplicate); 1197 tt_buff, is_duplicate);
1140 1198
1141 /* is single hop (direct) neighbor */ 1199 /* is single hop (direct) neighbor */
1142 if (is_single_hop_neigh) { 1200 if (is_single_hop_neigh) {
1143 1201
1144 /* mark direct link on incoming interface */ 1202 /* mark direct link on incoming interface */
1145 bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet, 1203 batadv_iv_ogm_forward(orig_node, ethhdr, batadv_ogm_packet,
1146 is_single_hop_neigh, is_from_best_next_hop, 1204 is_single_hop_neigh,
1147 if_incoming); 1205 is_from_best_next_hop, if_incoming);
1148 1206
1149 bat_dbg(DBG_BATMAN, bat_priv, 1207 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1150 "Forwarding packet: rebroadcast neighbor packet with direct link flag\n"); 1208 "Forwarding packet: rebroadcast neighbor packet with direct link flag\n");
1151 goto out_neigh; 1209 goto out_neigh;
1152 } 1210 }
1153 1211
1154 /* multihop originator */ 1212 /* multihop originator */
1155 if (!is_bidirectional) { 1213 if (!is_bidirect) {
1156 bat_dbg(DBG_BATMAN, bat_priv, 1214 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1157 "Drop packet: not received via bidirectional link\n"); 1215 "Drop packet: not received via bidirectional link\n");
1158 goto out_neigh; 1216 goto out_neigh;
1159 } 1217 }
1160 1218
1161 if (is_duplicate) { 1219 if (is_duplicate) {
1162 bat_dbg(DBG_BATMAN, bat_priv, 1220 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1163 "Drop packet: duplicate packet received\n"); 1221 "Drop packet: duplicate packet received\n");
1164 goto out_neigh; 1222 goto out_neigh;
1165 } 1223 }
1166 1224
1167 bat_dbg(DBG_BATMAN, bat_priv, 1225 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1168 "Forwarding packet: rebroadcast originator packet\n"); 1226 "Forwarding packet: rebroadcast originator packet\n");
1169 bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet, 1227 batadv_iv_ogm_forward(orig_node, ethhdr, batadv_ogm_packet,
1170 is_single_hop_neigh, is_from_best_next_hop, 1228 is_single_hop_neigh, is_from_best_next_hop,
1171 if_incoming); 1229 if_incoming);
1172 1230
1173out_neigh: 1231out_neigh:
1174 if ((orig_neigh_node) && (!is_single_hop_neigh)) 1232 if ((orig_neigh_node) && (!is_single_hop_neigh))
1175 orig_node_free_ref(orig_neigh_node); 1233 batadv_orig_node_free_ref(orig_neigh_node);
1176out: 1234out:
1177 if (router) 1235 if (router)
1178 neigh_node_free_ref(router); 1236 batadv_neigh_node_free_ref(router);
1179 if (router_router) 1237 if (router_router)
1180 neigh_node_free_ref(router_router); 1238 batadv_neigh_node_free_ref(router_router);
1181 if (orig_neigh_router) 1239 if (orig_neigh_router)
1182 neigh_node_free_ref(orig_neigh_router); 1240 batadv_neigh_node_free_ref(orig_neigh_router);
1183 1241
1184 orig_node_free_ref(orig_node); 1242 batadv_orig_node_free_ref(orig_node);
1185} 1243}
1186 1244
1187static int bat_iv_ogm_receive(struct sk_buff *skb, 1245static int batadv_iv_ogm_receive(struct sk_buff *skb,
1188 struct hard_iface *if_incoming) 1246 struct batadv_hard_iface *if_incoming)
1189{ 1247{
1190 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 1248 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
1191 struct batman_ogm_packet *batman_ogm_packet; 1249 struct batadv_ogm_packet *batadv_ogm_packet;
1192 struct ethhdr *ethhdr; 1250 struct ethhdr *ethhdr;
1193 int buff_pos = 0, packet_len; 1251 int buff_pos = 0, packet_len;
1194 unsigned char *tt_buff, *packet_buff; 1252 unsigned char *tt_buff, *packet_buff;
1195 bool ret; 1253 bool ret;
1196 1254
1197 ret = check_management_packet(skb, if_incoming, BATMAN_OGM_HLEN); 1255 ret = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN);
1198 if (!ret) 1256 if (!ret)
1199 return NET_RX_DROP; 1257 return NET_RX_DROP;
1200 1258
1201 /* did we receive a B.A.T.M.A.N. IV OGM packet on an interface 1259 /* did we receive a B.A.T.M.A.N. IV OGM packet on an interface
1202 * that does not have B.A.T.M.A.N. IV enabled ? 1260 * that does not have B.A.T.M.A.N. IV enabled ?
1203 */ 1261 */
1204 if (bat_priv->bat_algo_ops->bat_ogm_emit != bat_iv_ogm_emit) 1262 if (bat_priv->bat_algo_ops->bat_ogm_emit != batadv_iv_ogm_emit)
1205 return NET_RX_DROP; 1263 return NET_RX_DROP;
1206 1264
1265 batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
1266 batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
1267 skb->len + ETH_HLEN);
1268
1207 packet_len = skb_headlen(skb); 1269 packet_len = skb_headlen(skb);
1208 ethhdr = (struct ethhdr *)skb_mac_header(skb); 1270 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1209 packet_buff = skb->data; 1271 packet_buff = skb->data;
1210 batman_ogm_packet = (struct batman_ogm_packet *)packet_buff; 1272 batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff;
1211 1273
1212 /* unpack the aggregated packets and process them one by one */ 1274 /* unpack the aggregated packets and process them one by one */
1213 do { 1275 do {
1214 /* network to host order for our 32bit seqno and the 1276 tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN;
1215 orig_interval */
1216 batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno);
1217 batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc);
1218
1219 tt_buff = packet_buff + buff_pos + BATMAN_OGM_HLEN;
1220 1277
1221 bat_iv_ogm_process(ethhdr, batman_ogm_packet, 1278 batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff,
1222 tt_buff, if_incoming); 1279 if_incoming);
1223 1280
1224 buff_pos += BATMAN_OGM_HLEN + 1281 buff_pos += BATADV_OGM_HLEN;
1225 tt_len(batman_ogm_packet->tt_num_changes); 1282 buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
1226 1283
1227 batman_ogm_packet = (struct batman_ogm_packet *) 1284 batadv_ogm_packet = (struct batadv_ogm_packet *)
1228 (packet_buff + buff_pos); 1285 (packet_buff + buff_pos);
1229 } while (bat_iv_ogm_aggr_packet(buff_pos, packet_len, 1286 } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len,
1230 batman_ogm_packet->tt_num_changes)); 1287 batadv_ogm_packet->tt_num_changes));
1231 1288
1232 kfree_skb(skb); 1289 kfree_skb(skb);
1233 return NET_RX_SUCCESS; 1290 return NET_RX_SUCCESS;
1234} 1291}
1235 1292
1236static struct bat_algo_ops batman_iv __read_mostly = { 1293static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
1237 .name = "BATMAN IV", 1294 .name = "BATMAN_IV",
1238 .bat_iface_enable = bat_iv_ogm_iface_enable, 1295 .bat_iface_enable = batadv_iv_ogm_iface_enable,
1239 .bat_iface_disable = bat_iv_ogm_iface_disable, 1296 .bat_iface_disable = batadv_iv_ogm_iface_disable,
1240 .bat_iface_update_mac = bat_iv_ogm_iface_update_mac, 1297 .bat_iface_update_mac = batadv_iv_ogm_iface_update_mac,
1241 .bat_primary_iface_set = bat_iv_ogm_primary_iface_set, 1298 .bat_primary_iface_set = batadv_iv_ogm_primary_iface_set,
1242 .bat_ogm_schedule = bat_iv_ogm_schedule, 1299 .bat_ogm_schedule = batadv_iv_ogm_schedule,
1243 .bat_ogm_emit = bat_iv_ogm_emit, 1300 .bat_ogm_emit = batadv_iv_ogm_emit,
1244}; 1301};
1245 1302
1246int __init bat_iv_init(void) 1303int __init batadv_iv_init(void)
1247{ 1304{
1248 int ret; 1305 int ret;
1249 1306
1250 /* batman originator packet */ 1307 /* batman originator packet */
1251 ret = recv_handler_register(BAT_IV_OGM, bat_iv_ogm_receive); 1308 ret = batadv_recv_handler_register(BATADV_IV_OGM,
1309 batadv_iv_ogm_receive);
1252 if (ret < 0) 1310 if (ret < 0)
1253 goto out; 1311 goto out;
1254 1312
1255 ret = bat_algo_register(&batman_iv); 1313 ret = batadv_algo_register(&batadv_batman_iv);
1256 if (ret < 0) 1314 if (ret < 0)
1257 goto handler_unregister; 1315 goto handler_unregister;
1258 1316
1259 goto out; 1317 goto out;
1260 1318
1261handler_unregister: 1319handler_unregister:
1262 recv_handler_unregister(BAT_IV_OGM); 1320 batadv_recv_handler_unregister(BATADV_IV_OGM);
1263out: 1321out:
1264 return ret; 1322 return ret;
1265} 1323}
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
deleted file mode 100644
index 5bc7b66d32d..00000000000
--- a/net/batman-adv/bat_sysfs.c
+++ /dev/null
@@ -1,735 +0,0 @@
1/*
2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "bat_sysfs.h"
24#include "translation-table.h"
25#include "originator.h"
26#include "hard-interface.h"
27#include "gateway_common.h"
28#include "gateway_client.h"
29#include "vis.h"
30
31static struct net_device *kobj_to_netdev(struct kobject *obj)
32{
33 struct device *dev = container_of(obj->parent, struct device, kobj);
34 return to_net_dev(dev);
35}
36
37static struct bat_priv *kobj_to_batpriv(struct kobject *obj)
38{
39 struct net_device *net_dev = kobj_to_netdev(obj);
40 return netdev_priv(net_dev);
41}
42
43#define UEV_TYPE_VAR "BATTYPE="
44#define UEV_ACTION_VAR "BATACTION="
45#define UEV_DATA_VAR "BATDATA="
46
47static char *uev_action_str[] = {
48 "add",
49 "del",
50 "change"
51};
52
53static char *uev_type_str[] = {
54 "gw"
55};
56
57/* Use this, if you have customized show and store functions */
58#define BAT_ATTR(_name, _mode, _show, _store) \
59struct bat_attribute bat_attr_##_name = { \
60 .attr = {.name = __stringify(_name), \
61 .mode = _mode }, \
62 .show = _show, \
63 .store = _store, \
64};
65
66#define BAT_ATTR_SIF_STORE_BOOL(_name, _post_func) \
67ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
68 char *buff, size_t count) \
69{ \
70 struct net_device *net_dev = kobj_to_netdev(kobj); \
71 struct bat_priv *bat_priv = netdev_priv(net_dev); \
72 return __store_bool_attr(buff, count, _post_func, attr, \
73 &bat_priv->_name, net_dev); \
74}
75
76#define BAT_ATTR_SIF_SHOW_BOOL(_name) \
77ssize_t show_##_name(struct kobject *kobj, \
78 struct attribute *attr, char *buff) \
79{ \
80 struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \
81 return sprintf(buff, "%s\n", \
82 atomic_read(&bat_priv->_name) == 0 ? \
83 "disabled" : "enabled"); \
84} \
85
86/* Use this, if you are going to turn a [name] in the soft-interface
87 * (bat_priv) on or off */
88#define BAT_ATTR_SIF_BOOL(_name, _mode, _post_func) \
89 static BAT_ATTR_SIF_STORE_BOOL(_name, _post_func) \
90 static BAT_ATTR_SIF_SHOW_BOOL(_name) \
91 static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
92
93
94#define BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \
95ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
96 char *buff, size_t count) \
97{ \
98 struct net_device *net_dev = kobj_to_netdev(kobj); \
99 struct bat_priv *bat_priv = netdev_priv(net_dev); \
100 return __store_uint_attr(buff, count, _min, _max, _post_func, \
101 attr, &bat_priv->_name, net_dev); \
102}
103
104#define BAT_ATTR_SIF_SHOW_UINT(_name) \
105ssize_t show_##_name(struct kobject *kobj, \
106 struct attribute *attr, char *buff) \
107{ \
108 struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \
109 return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name)); \
110} \
111
112/* Use this, if you are going to set [name] in the soft-interface
113 * (bat_priv) to an unsigned integer value */
114#define BAT_ATTR_SIF_UINT(_name, _mode, _min, _max, _post_func) \
115 static BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \
116 static BAT_ATTR_SIF_SHOW_UINT(_name) \
117 static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
118
119
120#define BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \
121ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
122 char *buff, size_t count) \
123{ \
124 struct net_device *net_dev = kobj_to_netdev(kobj); \
125 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); \
126 ssize_t length; \
127 \
128 if (!hard_iface) \
129 return 0; \
130 \
131 length = __store_uint_attr(buff, count, _min, _max, _post_func, \
132 attr, &hard_iface->_name, net_dev); \
133 \
134 hardif_free_ref(hard_iface); \
135 return length; \
136}
137
138#define BAT_ATTR_HIF_SHOW_UINT(_name) \
139ssize_t show_##_name(struct kobject *kobj, \
140 struct attribute *attr, char *buff) \
141{ \
142 struct net_device *net_dev = kobj_to_netdev(kobj); \
143 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); \
144 ssize_t length; \
145 \
146 if (!hard_iface) \
147 return 0; \
148 \
149 length = sprintf(buff, "%i\n", atomic_read(&hard_iface->_name));\
150 \
151 hardif_free_ref(hard_iface); \
152 return length; \
153}
154
155/* Use this, if you are going to set [name] in hard_iface to an
156 * unsigned integer value*/
157#define BAT_ATTR_HIF_UINT(_name, _mode, _min, _max, _post_func) \
158 static BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \
159 static BAT_ATTR_HIF_SHOW_UINT(_name) \
160 static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
161
162
163static int store_bool_attr(char *buff, size_t count,
164 struct net_device *net_dev,
165 const char *attr_name, atomic_t *attr)
166{
167 int enabled = -1;
168
169 if (buff[count - 1] == '\n')
170 buff[count - 1] = '\0';
171
172 if ((strncmp(buff, "1", 2) == 0) ||
173 (strncmp(buff, "enable", 7) == 0) ||
174 (strncmp(buff, "enabled", 8) == 0))
175 enabled = 1;
176
177 if ((strncmp(buff, "0", 2) == 0) ||
178 (strncmp(buff, "disable", 8) == 0) ||
179 (strncmp(buff, "disabled", 9) == 0))
180 enabled = 0;
181
182 if (enabled < 0) {
183 bat_info(net_dev,
184 "%s: Invalid parameter received: %s\n",
185 attr_name, buff);
186 return -EINVAL;
187 }
188
189 if (atomic_read(attr) == enabled)
190 return count;
191
192 bat_info(net_dev, "%s: Changing from: %s to: %s\n", attr_name,
193 atomic_read(attr) == 1 ? "enabled" : "disabled",
194 enabled == 1 ? "enabled" : "disabled");
195
196 atomic_set(attr, (unsigned int)enabled);
197 return count;
198}
199
200static inline ssize_t __store_bool_attr(char *buff, size_t count,
201 void (*post_func)(struct net_device *),
202 struct attribute *attr,
203 atomic_t *attr_store, struct net_device *net_dev)
204{
205 int ret;
206
207 ret = store_bool_attr(buff, count, net_dev, attr->name, attr_store);
208 if (post_func && ret)
209 post_func(net_dev);
210
211 return ret;
212}
213
214static int store_uint_attr(const char *buff, size_t count,
215 struct net_device *net_dev, const char *attr_name,
216 unsigned int min, unsigned int max, atomic_t *attr)
217{
218 unsigned long uint_val;
219 int ret;
220
221 ret = kstrtoul(buff, 10, &uint_val);
222 if (ret) {
223 bat_info(net_dev,
224 "%s: Invalid parameter received: %s\n",
225 attr_name, buff);
226 return -EINVAL;
227 }
228
229 if (uint_val < min) {
230 bat_info(net_dev, "%s: Value is too small: %lu min: %u\n",
231 attr_name, uint_val, min);
232 return -EINVAL;
233 }
234
235 if (uint_val > max) {
236 bat_info(net_dev, "%s: Value is too big: %lu max: %u\n",
237 attr_name, uint_val, max);
238 return -EINVAL;
239 }
240
241 if (atomic_read(attr) == uint_val)
242 return count;
243
244 bat_info(net_dev, "%s: Changing from: %i to: %lu\n",
245 attr_name, atomic_read(attr), uint_val);
246
247 atomic_set(attr, uint_val);
248 return count;
249}
250
251static inline ssize_t __store_uint_attr(const char *buff, size_t count,
252 int min, int max,
253 void (*post_func)(struct net_device *),
254 const struct attribute *attr,
255 atomic_t *attr_store, struct net_device *net_dev)
256{
257 int ret;
258
259 ret = store_uint_attr(buff, count, net_dev, attr->name,
260 min, max, attr_store);
261 if (post_func && ret)
262 post_func(net_dev);
263
264 return ret;
265}
266
267static ssize_t show_vis_mode(struct kobject *kobj, struct attribute *attr,
268 char *buff)
269{
270 struct bat_priv *bat_priv = kobj_to_batpriv(kobj);
271 int vis_mode = atomic_read(&bat_priv->vis_mode);
272
273 return sprintf(buff, "%s\n",
274 vis_mode == VIS_TYPE_CLIENT_UPDATE ?
275 "client" : "server");
276}
277
278static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
279 char *buff, size_t count)
280{
281 struct net_device *net_dev = kobj_to_netdev(kobj);
282 struct bat_priv *bat_priv = netdev_priv(net_dev);
283 unsigned long val;
284 int ret, vis_mode_tmp = -1;
285
286 ret = kstrtoul(buff, 10, &val);
287
288 if (((count == 2) && (!ret) && (val == VIS_TYPE_CLIENT_UPDATE)) ||
289 (strncmp(buff, "client", 6) == 0) ||
290 (strncmp(buff, "off", 3) == 0))
291 vis_mode_tmp = VIS_TYPE_CLIENT_UPDATE;
292
293 if (((count == 2) && (!ret) && (val == VIS_TYPE_SERVER_SYNC)) ||
294 (strncmp(buff, "server", 6) == 0))
295 vis_mode_tmp = VIS_TYPE_SERVER_SYNC;
296
297 if (vis_mode_tmp < 0) {
298 if (buff[count - 1] == '\n')
299 buff[count - 1] = '\0';
300
301 bat_info(net_dev,
302 "Invalid parameter for 'vis mode' setting received: %s\n",
303 buff);
304 return -EINVAL;
305 }
306
307 if (atomic_read(&bat_priv->vis_mode) == vis_mode_tmp)
308 return count;
309
310 bat_info(net_dev, "Changing vis mode from: %s to: %s\n",
311 atomic_read(&bat_priv->vis_mode) == VIS_TYPE_CLIENT_UPDATE ?
312 "client" : "server", vis_mode_tmp == VIS_TYPE_CLIENT_UPDATE ?
313 "client" : "server");
314
315 atomic_set(&bat_priv->vis_mode, (unsigned int)vis_mode_tmp);
316 return count;
317}
318
319static ssize_t show_bat_algo(struct kobject *kobj, struct attribute *attr,
320 char *buff)
321{
322 struct bat_priv *bat_priv = kobj_to_batpriv(kobj);
323 return sprintf(buff, "%s\n", bat_priv->bat_algo_ops->name);
324}
325
326static void post_gw_deselect(struct net_device *net_dev)
327{
328 struct bat_priv *bat_priv = netdev_priv(net_dev);
329 gw_deselect(bat_priv);
330}
331
332static ssize_t show_gw_mode(struct kobject *kobj, struct attribute *attr,
333 char *buff)
334{
335 struct bat_priv *bat_priv = kobj_to_batpriv(kobj);
336 int bytes_written;
337
338 switch (atomic_read(&bat_priv->gw_mode)) {
339 case GW_MODE_CLIENT:
340 bytes_written = sprintf(buff, "%s\n", GW_MODE_CLIENT_NAME);
341 break;
342 case GW_MODE_SERVER:
343 bytes_written = sprintf(buff, "%s\n", GW_MODE_SERVER_NAME);
344 break;
345 default:
346 bytes_written = sprintf(buff, "%s\n", GW_MODE_OFF_NAME);
347 break;
348 }
349
350 return bytes_written;
351}
352
353static ssize_t store_gw_mode(struct kobject *kobj, struct attribute *attr,
354 char *buff, size_t count)
355{
356 struct net_device *net_dev = kobj_to_netdev(kobj);
357 struct bat_priv *bat_priv = netdev_priv(net_dev);
358 char *curr_gw_mode_str;
359 int gw_mode_tmp = -1;
360
361 if (buff[count - 1] == '\n')
362 buff[count - 1] = '\0';
363
364 if (strncmp(buff, GW_MODE_OFF_NAME, strlen(GW_MODE_OFF_NAME)) == 0)
365 gw_mode_tmp = GW_MODE_OFF;
366
367 if (strncmp(buff, GW_MODE_CLIENT_NAME,
368 strlen(GW_MODE_CLIENT_NAME)) == 0)
369 gw_mode_tmp = GW_MODE_CLIENT;
370
371 if (strncmp(buff, GW_MODE_SERVER_NAME,
372 strlen(GW_MODE_SERVER_NAME)) == 0)
373 gw_mode_tmp = GW_MODE_SERVER;
374
375 if (gw_mode_tmp < 0) {
376 bat_info(net_dev,
377 "Invalid parameter for 'gw mode' setting received: %s\n",
378 buff);
379 return -EINVAL;
380 }
381
382 if (atomic_read(&bat_priv->gw_mode) == gw_mode_tmp)
383 return count;
384
385 switch (atomic_read(&bat_priv->gw_mode)) {
386 case GW_MODE_CLIENT:
387 curr_gw_mode_str = GW_MODE_CLIENT_NAME;
388 break;
389 case GW_MODE_SERVER:
390 curr_gw_mode_str = GW_MODE_SERVER_NAME;
391 break;
392 default:
393 curr_gw_mode_str = GW_MODE_OFF_NAME;
394 break;
395 }
396
397 bat_info(net_dev, "Changing gw mode from: %s to: %s\n",
398 curr_gw_mode_str, buff);
399
400 gw_deselect(bat_priv);
401 atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp);
402 return count;
403}
404
405static ssize_t show_gw_bwidth(struct kobject *kobj, struct attribute *attr,
406 char *buff)
407{
408 struct bat_priv *bat_priv = kobj_to_batpriv(kobj);
409 int down, up;
410 int gw_bandwidth = atomic_read(&bat_priv->gw_bandwidth);
411
412 gw_bandwidth_to_kbit(gw_bandwidth, &down, &up);
413 return sprintf(buff, "%i%s/%i%s\n",
414 (down > 2048 ? down / 1024 : down),
415 (down > 2048 ? "MBit" : "KBit"),
416 (up > 2048 ? up / 1024 : up),
417 (up > 2048 ? "MBit" : "KBit"));
418}
419
420static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr,
421 char *buff, size_t count)
422{
423 struct net_device *net_dev = kobj_to_netdev(kobj);
424
425 if (buff[count - 1] == '\n')
426 buff[count - 1] = '\0';
427
428 return gw_bandwidth_set(net_dev, buff, count);
429}
430
431BAT_ATTR_SIF_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
432BAT_ATTR_SIF_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
433#ifdef CONFIG_BATMAN_ADV_BLA
434BAT_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
435#endif
436BAT_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
437BAT_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
438static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
439static BAT_ATTR(routing_algo, S_IRUGO, show_bat_algo, NULL);
440static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode);
441BAT_ATTR_SIF_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL);
442BAT_ATTR_SIF_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL);
443BAT_ATTR_SIF_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE,
444 post_gw_deselect);
445static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth,
446 store_gw_bwidth);
447#ifdef CONFIG_BATMAN_ADV_DEBUG
448BAT_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL);
449#endif
450
451static struct bat_attribute *mesh_attrs[] = {
452 &bat_attr_aggregated_ogms,
453 &bat_attr_bonding,
454#ifdef CONFIG_BATMAN_ADV_BLA
455 &bat_attr_bridge_loop_avoidance,
456#endif
457 &bat_attr_fragmentation,
458 &bat_attr_ap_isolation,
459 &bat_attr_vis_mode,
460 &bat_attr_routing_algo,
461 &bat_attr_gw_mode,
462 &bat_attr_orig_interval,
463 &bat_attr_hop_penalty,
464 &bat_attr_gw_sel_class,
465 &bat_attr_gw_bandwidth,
466#ifdef CONFIG_BATMAN_ADV_DEBUG
467 &bat_attr_log_level,
468#endif
469 NULL,
470};
471
472int sysfs_add_meshif(struct net_device *dev)
473{
474 struct kobject *batif_kobject = &dev->dev.kobj;
475 struct bat_priv *bat_priv = netdev_priv(dev);
476 struct bat_attribute **bat_attr;
477 int err;
478
479 bat_priv->mesh_obj = kobject_create_and_add(SYSFS_IF_MESH_SUBDIR,
480 batif_kobject);
481 if (!bat_priv->mesh_obj) {
482 bat_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
483 SYSFS_IF_MESH_SUBDIR);
484 goto out;
485 }
486
487 for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr) {
488 err = sysfs_create_file(bat_priv->mesh_obj,
489 &((*bat_attr)->attr));
490 if (err) {
491 bat_err(dev, "Can't add sysfs file: %s/%s/%s\n",
492 dev->name, SYSFS_IF_MESH_SUBDIR,
493 ((*bat_attr)->attr).name);
494 goto rem_attr;
495 }
496 }
497
498 return 0;
499
500rem_attr:
501 for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr)
502 sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
503
504 kobject_put(bat_priv->mesh_obj);
505 bat_priv->mesh_obj = NULL;
506out:
507 return -ENOMEM;
508}
509
510void sysfs_del_meshif(struct net_device *dev)
511{
512 struct bat_priv *bat_priv = netdev_priv(dev);
513 struct bat_attribute **bat_attr;
514
515 for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr)
516 sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
517
518 kobject_put(bat_priv->mesh_obj);
519 bat_priv->mesh_obj = NULL;
520}
521
522static ssize_t show_mesh_iface(struct kobject *kobj, struct attribute *attr,
523 char *buff)
524{
525 struct net_device *net_dev = kobj_to_netdev(kobj);
526 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
527 ssize_t length;
528
529 if (!hard_iface)
530 return 0;
531
532 length = sprintf(buff, "%s\n", hard_iface->if_status == IF_NOT_IN_USE ?
533 "none" : hard_iface->soft_iface->name);
534
535 hardif_free_ref(hard_iface);
536
537 return length;
538}
539
540static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
541 char *buff, size_t count)
542{
543 struct net_device *net_dev = kobj_to_netdev(kobj);
544 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
545 int status_tmp = -1;
546 int ret = count;
547
548 if (!hard_iface)
549 return count;
550
551 if (buff[count - 1] == '\n')
552 buff[count - 1] = '\0';
553
554 if (strlen(buff) >= IFNAMSIZ) {
555 pr_err("Invalid parameter for 'mesh_iface' setting received: interface name too long '%s'\n",
556 buff);
557 hardif_free_ref(hard_iface);
558 return -EINVAL;
559 }
560
561 if (strncmp(buff, "none", 4) == 0)
562 status_tmp = IF_NOT_IN_USE;
563 else
564 status_tmp = IF_I_WANT_YOU;
565
566 if (hard_iface->if_status == status_tmp)
567 goto out;
568
569 if ((hard_iface->soft_iface) &&
570 (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0))
571 goto out;
572
573 if (!rtnl_trylock()) {
574 ret = -ERESTARTSYS;
575 goto out;
576 }
577
578 if (status_tmp == IF_NOT_IN_USE) {
579 hardif_disable_interface(hard_iface);
580 goto unlock;
581 }
582
583 /* if the interface already is in use */
584 if (hard_iface->if_status != IF_NOT_IN_USE)
585 hardif_disable_interface(hard_iface);
586
587 ret = hardif_enable_interface(hard_iface, buff);
588
589unlock:
590 rtnl_unlock();
591out:
592 hardif_free_ref(hard_iface);
593 return ret;
594}
595
596static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr,
597 char *buff)
598{
599 struct net_device *net_dev = kobj_to_netdev(kobj);
600 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
601 ssize_t length;
602
603 if (!hard_iface)
604 return 0;
605
606 switch (hard_iface->if_status) {
607 case IF_TO_BE_REMOVED:
608 length = sprintf(buff, "disabling\n");
609 break;
610 case IF_INACTIVE:
611 length = sprintf(buff, "inactive\n");
612 break;
613 case IF_ACTIVE:
614 length = sprintf(buff, "active\n");
615 break;
616 case IF_TO_BE_ACTIVATED:
617 length = sprintf(buff, "enabling\n");
618 break;
619 case IF_NOT_IN_USE:
620 default:
621 length = sprintf(buff, "not in use\n");
622 break;
623 }
624
625 hardif_free_ref(hard_iface);
626
627 return length;
628}
629
630static BAT_ATTR(mesh_iface, S_IRUGO | S_IWUSR,
631 show_mesh_iface, store_mesh_iface);
632static BAT_ATTR(iface_status, S_IRUGO, show_iface_status, NULL);
633
634static struct bat_attribute *batman_attrs[] = {
635 &bat_attr_mesh_iface,
636 &bat_attr_iface_status,
637 NULL,
638};
639
640int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev)
641{
642 struct kobject *hardif_kobject = &dev->dev.kobj;
643 struct bat_attribute **bat_attr;
644 int err;
645
646 *hardif_obj = kobject_create_and_add(SYSFS_IF_BAT_SUBDIR,
647 hardif_kobject);
648
649 if (!*hardif_obj) {
650 bat_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
651 SYSFS_IF_BAT_SUBDIR);
652 goto out;
653 }
654
655 for (bat_attr = batman_attrs; *bat_attr; ++bat_attr) {
656 err = sysfs_create_file(*hardif_obj, &((*bat_attr)->attr));
657 if (err) {
658 bat_err(dev, "Can't add sysfs file: %s/%s/%s\n",
659 dev->name, SYSFS_IF_BAT_SUBDIR,
660 ((*bat_attr)->attr).name);
661 goto rem_attr;
662 }
663 }
664
665 return 0;
666
667rem_attr:
668 for (bat_attr = batman_attrs; *bat_attr; ++bat_attr)
669 sysfs_remove_file(*hardif_obj, &((*bat_attr)->attr));
670out:
671 return -ENOMEM;
672}
673
674void sysfs_del_hardif(struct kobject **hardif_obj)
675{
676 kobject_put(*hardif_obj);
677 *hardif_obj = NULL;
678}
679
680int throw_uevent(struct bat_priv *bat_priv, enum uev_type type,
681 enum uev_action action, const char *data)
682{
683 int ret = -1;
684 struct hard_iface *primary_if = NULL;
685 struct kobject *bat_kobj;
686 char *uevent_env[4] = { NULL, NULL, NULL, NULL };
687
688 primary_if = primary_if_get_selected(bat_priv);
689 if (!primary_if)
690 goto out;
691
692 bat_kobj = &primary_if->soft_iface->dev.kobj;
693
694 uevent_env[0] = kmalloc(strlen(UEV_TYPE_VAR) +
695 strlen(uev_type_str[type]) + 1,
696 GFP_ATOMIC);
697 if (!uevent_env[0])
698 goto out;
699
700 sprintf(uevent_env[0], "%s%s", UEV_TYPE_VAR, uev_type_str[type]);
701
702 uevent_env[1] = kmalloc(strlen(UEV_ACTION_VAR) +
703 strlen(uev_action_str[action]) + 1,
704 GFP_ATOMIC);
705 if (!uevent_env[1])
706 goto out;
707
708 sprintf(uevent_env[1], "%s%s", UEV_ACTION_VAR, uev_action_str[action]);
709
710 /* If the event is DEL, ignore the data field */
711 if (action != UEV_DEL) {
712 uevent_env[2] = kmalloc(strlen(UEV_DATA_VAR) +
713 strlen(data) + 1, GFP_ATOMIC);
714 if (!uevent_env[2])
715 goto out;
716
717 sprintf(uevent_env[2], "%s%s", UEV_DATA_VAR, data);
718 }
719
720 ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
721out:
722 kfree(uevent_env[0]);
723 kfree(uevent_env[1]);
724 kfree(uevent_env[2]);
725
726 if (primary_if)
727 hardif_free_ref(primary_if);
728
729 if (ret)
730 bat_dbg(DBG_BATMAN, bat_priv,
731 "Impossible to send uevent for (%s,%s,%s) event (err: %d)\n",
732 uev_type_str[type], uev_action_str[action],
733 (action == UEV_DEL ? "NULL" : data), ret);
734 return ret;
735}
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index 07ae6e1b8ac..aea174cdbfb 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Simon Wunderlich, Marek Lindner 3 * Simon Wunderlich, Marek Lindner
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -25,12 +23,12 @@
25#include <linux/bitops.h> 23#include <linux/bitops.h>
26 24
27/* shift the packet array by n places. */ 25/* shift the packet array by n places. */
28static void bat_bitmap_shift_left(unsigned long *seq_bits, int32_t n) 26static void batadv_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
29{ 27{
30 if (n <= 0 || n >= TQ_LOCAL_WINDOW_SIZE) 28 if (n <= 0 || n >= BATADV_TQ_LOCAL_WINDOW_SIZE)
31 return; 29 return;
32 30
33 bitmap_shift_left(seq_bits, seq_bits, n, TQ_LOCAL_WINDOW_SIZE); 31 bitmap_shift_left(seq_bits, seq_bits, n, BATADV_TQ_LOCAL_WINDOW_SIZE);
34} 32}
35 33
36 34
@@ -40,58 +38,57 @@ static void bat_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
40 * 1 if the window was moved (either new or very old) 38 * 1 if the window was moved (either new or very old)
41 * 0 if the window was not moved/shifted. 39 * 0 if the window was not moved/shifted.
42 */ 40 */
43int bit_get_packet(void *priv, unsigned long *seq_bits, 41int batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
44 int32_t seq_num_diff, int set_mark) 42 int32_t seq_num_diff, int set_mark)
45{ 43{
46 struct bat_priv *bat_priv = priv; 44 struct batadv_priv *bat_priv = priv;
47 45
48 /* sequence number is slightly older. We already got a sequence number 46 /* sequence number is slightly older. We already got a sequence number
49 * higher than this one, so we just mark it. */ 47 * higher than this one, so we just mark it.
50 48 */
51 if ((seq_num_diff <= 0) && (seq_num_diff > -TQ_LOCAL_WINDOW_SIZE)) { 49 if (seq_num_diff <= 0 && seq_num_diff > -BATADV_TQ_LOCAL_WINDOW_SIZE) {
52 if (set_mark) 50 if (set_mark)
53 bat_set_bit(seq_bits, -seq_num_diff); 51 batadv_set_bit(seq_bits, -seq_num_diff);
54 return 0; 52 return 0;
55 } 53 }
56 54
57 /* sequence number is slightly newer, so we shift the window and 55 /* sequence number is slightly newer, so we shift the window and
58 * set the mark if required */ 56 * set the mark if required
59 57 */
60 if ((seq_num_diff > 0) && (seq_num_diff < TQ_LOCAL_WINDOW_SIZE)) { 58 if (seq_num_diff > 0 && seq_num_diff < BATADV_TQ_LOCAL_WINDOW_SIZE) {
61 bat_bitmap_shift_left(seq_bits, seq_num_diff); 59 batadv_bitmap_shift_left(seq_bits, seq_num_diff);
62 60
63 if (set_mark) 61 if (set_mark)
64 bat_set_bit(seq_bits, 0); 62 batadv_set_bit(seq_bits, 0);
65 return 1; 63 return 1;
66 } 64 }
67 65
68 /* sequence number is much newer, probably missed a lot of packets */ 66 /* sequence number is much newer, probably missed a lot of packets */
69 67 if (seq_num_diff >= BATADV_TQ_LOCAL_WINDOW_SIZE &&
70 if ((seq_num_diff >= TQ_LOCAL_WINDOW_SIZE) && 68 seq_num_diff < BATADV_EXPECTED_SEQNO_RANGE) {
71 (seq_num_diff < EXPECTED_SEQNO_RANGE)) { 69 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
72 bat_dbg(DBG_BATMAN, bat_priv, 70 "We missed a lot of packets (%i) !\n",
73 "We missed a lot of packets (%i) !\n", 71 seq_num_diff - 1);
74 seq_num_diff - 1); 72 bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
75 bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE);
76 if (set_mark) 73 if (set_mark)
77 bat_set_bit(seq_bits, 0); 74 batadv_set_bit(seq_bits, 0);
78 return 1; 75 return 1;
79 } 76 }
80 77
81 /* received a much older packet. The other host either restarted 78 /* received a much older packet. The other host either restarted
82 * or the old packet got delayed somewhere in the network. The 79 * or the old packet got delayed somewhere in the network. The
83 * packet should be dropped without calling this function if the 80 * packet should be dropped without calling this function if the
84 * seqno window is protected. */ 81 * seqno window is protected.
85 82 */
86 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) || 83 if (seq_num_diff <= -BATADV_TQ_LOCAL_WINDOW_SIZE ||
87 (seq_num_diff >= EXPECTED_SEQNO_RANGE)) { 84 seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE) {
88 85
89 bat_dbg(DBG_BATMAN, bat_priv, 86 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
90 "Other host probably restarted!\n"); 87 "Other host probably restarted!\n");
91 88
92 bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE); 89 bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
93 if (set_mark) 90 if (set_mark)
94 bat_set_bit(seq_bits, 0); 91 batadv_set_bit(seq_bits, 0);
95 92
96 return 1; 93 return 1;
97 } 94 }
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index 1835c15cda4..a081ce1c051 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Simon Wunderlich, Marek Lindner 3 * Simon Wunderlich, Marek Lindner
5 * 4 *
@@ -16,39 +15,40 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_BITARRAY_H_ 20#ifndef _NET_BATMAN_ADV_BITARRAY_H_
23#define _NET_BATMAN_ADV_BITARRAY_H_ 21#define _NET_BATMAN_ADV_BITARRAY_H_
24 22
25/* returns true if the corresponding bit in the given seq_bits indicates true 23/* returns true if the corresponding bit in the given seq_bits indicates true
26 * and curr_seqno is within range of last_seqno */ 24 * and curr_seqno is within range of last_seqno
27static inline int bat_test_bit(const unsigned long *seq_bits, 25 */
28 uint32_t last_seqno, uint32_t curr_seqno) 26static inline int batadv_test_bit(const unsigned long *seq_bits,
27 uint32_t last_seqno, uint32_t curr_seqno)
29{ 28{
30 int32_t diff; 29 int32_t diff;
31 30
32 diff = last_seqno - curr_seqno; 31 diff = last_seqno - curr_seqno;
33 if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE) 32 if (diff < 0 || diff >= BATADV_TQ_LOCAL_WINDOW_SIZE)
34 return 0; 33 return 0;
35 else 34 else
36 return test_bit(diff, seq_bits); 35 return test_bit(diff, seq_bits);
37} 36}
38 37
39/* turn corresponding bit on, so we can remember that we got the packet */ 38/* turn corresponding bit on, so we can remember that we got the packet */
40static inline void bat_set_bit(unsigned long *seq_bits, int32_t n) 39static inline void batadv_set_bit(unsigned long *seq_bits, int32_t n)
41{ 40{
42 /* if too old, just drop it */ 41 /* if too old, just drop it */
43 if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE) 42 if (n < 0 || n >= BATADV_TQ_LOCAL_WINDOW_SIZE)
44 return; 43 return;
45 44
46 set_bit(n, seq_bits); /* turn the position on */ 45 set_bit(n, seq_bits); /* turn the position on */
47} 46}
48 47
49/* receive and process one packet, returns 1 if received seq_num is considered 48/* receive and process one packet, returns 1 if received seq_num is considered
50 * new, 0 if old */ 49 * new, 0 if old
51int bit_get_packet(void *priv, unsigned long *seq_bits, 50 */
52 int32_t seq_num_diff, int set_mark); 51int batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
52 int32_t seq_num_diff, int set_mark);
53 53
54#endif /* _NET_BATMAN_ADV_BITARRAY_H_ */ 54#endif /* _NET_BATMAN_ADV_BITARRAY_H_ */
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index c5863f49913..6705d35b17c 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Simon Wunderlich 3 * Simon Wunderlich
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -33,14 +31,14 @@
33#include <net/arp.h> 31#include <net/arp.h>
34#include <linux/if_vlan.h> 32#include <linux/if_vlan.h>
35 33
36static const uint8_t announce_mac[4] = {0x43, 0x05, 0x43, 0x05}; 34static const uint8_t batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
37 35
38static void bla_periodic_work(struct work_struct *work); 36static void batadv_bla_periodic_work(struct work_struct *work);
39static void bla_send_announce(struct bat_priv *bat_priv, 37static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
40 struct backbone_gw *backbone_gw); 38 struct batadv_backbone_gw *backbone_gw);
41 39
42/* return the index of the claim */ 40/* return the index of the claim */
43static inline uint32_t choose_claim(const void *data, uint32_t size) 41static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
44{ 42{
45 const unsigned char *key = data; 43 const unsigned char *key = data;
46 uint32_t hash = 0; 44 uint32_t hash = 0;
@@ -60,7 +58,8 @@ static inline uint32_t choose_claim(const void *data, uint32_t size)
60} 58}
61 59
62/* return the index of the backbone gateway */ 60/* return the index of the backbone gateway */
63static inline uint32_t choose_backbone_gw(const void *data, uint32_t size) 61static inline uint32_t batadv_choose_backbone_gw(const void *data,
62 uint32_t size)
64{ 63{
65 const unsigned char *key = data; 64 const unsigned char *key = data;
66 uint32_t hash = 0; 65 uint32_t hash = 0;
@@ -81,74 +80,75 @@ static inline uint32_t choose_backbone_gw(const void *data, uint32_t size)
81 80
82 81
83/* compares address and vid of two backbone gws */ 82/* compares address and vid of two backbone gws */
84static int compare_backbone_gw(const struct hlist_node *node, const void *data2) 83static int batadv_compare_backbone_gw(const struct hlist_node *node,
84 const void *data2)
85{ 85{
86 const void *data1 = container_of(node, struct backbone_gw, 86 const void *data1 = container_of(node, struct batadv_backbone_gw,
87 hash_entry); 87 hash_entry);
88 88
89 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0); 89 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
90} 90}
91 91
92/* compares address and vid of two claims */ 92/* compares address and vid of two claims */
93static int compare_claim(const struct hlist_node *node, const void *data2) 93static int batadv_compare_claim(const struct hlist_node *node,
94 const void *data2)
94{ 95{
95 const void *data1 = container_of(node, struct claim, 96 const void *data1 = container_of(node, struct batadv_claim,
96 hash_entry); 97 hash_entry);
97 98
98 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0); 99 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
99} 100}
100 101
101/* free a backbone gw */ 102/* free a backbone gw */
102static void backbone_gw_free_ref(struct backbone_gw *backbone_gw) 103static void batadv_backbone_gw_free_ref(struct batadv_backbone_gw *backbone_gw)
103{ 104{
104 if (atomic_dec_and_test(&backbone_gw->refcount)) 105 if (atomic_dec_and_test(&backbone_gw->refcount))
105 kfree_rcu(backbone_gw, rcu); 106 kfree_rcu(backbone_gw, rcu);
106} 107}
107 108
108/* finally deinitialize the claim */ 109/* finally deinitialize the claim */
109static void claim_free_rcu(struct rcu_head *rcu) 110static void batadv_claim_free_rcu(struct rcu_head *rcu)
110{ 111{
111 struct claim *claim; 112 struct batadv_claim *claim;
112 113
113 claim = container_of(rcu, struct claim, rcu); 114 claim = container_of(rcu, struct batadv_claim, rcu);
114 115
115 backbone_gw_free_ref(claim->backbone_gw); 116 batadv_backbone_gw_free_ref(claim->backbone_gw);
116 kfree(claim); 117 kfree(claim);
117} 118}
118 119
119/* free a claim, call claim_free_rcu if its the last reference */ 120/* free a claim, call claim_free_rcu if its the last reference */
120static void claim_free_ref(struct claim *claim) 121static void batadv_claim_free_ref(struct batadv_claim *claim)
121{ 122{
122 if (atomic_dec_and_test(&claim->refcount)) 123 if (atomic_dec_and_test(&claim->refcount))
123 call_rcu(&claim->rcu, claim_free_rcu); 124 call_rcu(&claim->rcu, batadv_claim_free_rcu);
124} 125}
125 126
126/** 127/* @bat_priv: the bat priv with all the soft interface information
127 * @bat_priv: the bat priv with all the soft interface information
128 * @data: search data (may be local/static data) 128 * @data: search data (may be local/static data)
129 * 129 *
130 * looks for a claim in the hash, and returns it if found 130 * looks for a claim in the hash, and returns it if found
131 * or NULL otherwise. 131 * or NULL otherwise.
132 */ 132 */
133static struct claim *claim_hash_find(struct bat_priv *bat_priv, 133static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv,
134 struct claim *data) 134 struct batadv_claim *data)
135{ 135{
136 struct hashtable_t *hash = bat_priv->claim_hash; 136 struct batadv_hashtable *hash = bat_priv->claim_hash;
137 struct hlist_head *head; 137 struct hlist_head *head;
138 struct hlist_node *node; 138 struct hlist_node *node;
139 struct claim *claim; 139 struct batadv_claim *claim;
140 struct claim *claim_tmp = NULL; 140 struct batadv_claim *claim_tmp = NULL;
141 int index; 141 int index;
142 142
143 if (!hash) 143 if (!hash)
144 return NULL; 144 return NULL;
145 145
146 index = choose_claim(data, hash->size); 146 index = batadv_choose_claim(data, hash->size);
147 head = &hash->table[index]; 147 head = &hash->table[index];
148 148
149 rcu_read_lock(); 149 rcu_read_lock();
150 hlist_for_each_entry_rcu(claim, node, head, hash_entry) { 150 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
151 if (!compare_claim(&claim->hash_entry, data)) 151 if (!batadv_compare_claim(&claim->hash_entry, data))
152 continue; 152 continue;
153 153
154 if (!atomic_inc_not_zero(&claim->refcount)) 154 if (!atomic_inc_not_zero(&claim->refcount))
@@ -163,21 +163,22 @@ static struct claim *claim_hash_find(struct bat_priv *bat_priv,
163} 163}
164 164
165/** 165/**
166 * batadv_backbone_hash_find - looks for a claim in the hash
166 * @bat_priv: the bat priv with all the soft interface information 167 * @bat_priv: the bat priv with all the soft interface information
167 * @addr: the address of the originator 168 * @addr: the address of the originator
168 * @vid: the VLAN ID 169 * @vid: the VLAN ID
169 * 170 *
170 * looks for a claim in the hash, and returns it if found 171 * Returns claim if found or NULL otherwise.
171 * or NULL otherwise.
172 */ 172 */
173static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv, 173static struct batadv_backbone_gw *
174 uint8_t *addr, short vid) 174batadv_backbone_hash_find(struct batadv_priv *bat_priv,
175 uint8_t *addr, short vid)
175{ 176{
176 struct hashtable_t *hash = bat_priv->backbone_hash; 177 struct batadv_hashtable *hash = bat_priv->backbone_hash;
177 struct hlist_head *head; 178 struct hlist_head *head;
178 struct hlist_node *node; 179 struct hlist_node *node;
179 struct backbone_gw search_entry, *backbone_gw; 180 struct batadv_backbone_gw search_entry, *backbone_gw;
180 struct backbone_gw *backbone_gw_tmp = NULL; 181 struct batadv_backbone_gw *backbone_gw_tmp = NULL;
181 int index; 182 int index;
182 183
183 if (!hash) 184 if (!hash)
@@ -186,13 +187,13 @@ static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
186 memcpy(search_entry.orig, addr, ETH_ALEN); 187 memcpy(search_entry.orig, addr, ETH_ALEN);
187 search_entry.vid = vid; 188 search_entry.vid = vid;
188 189
189 index = choose_backbone_gw(&search_entry, hash->size); 190 index = batadv_choose_backbone_gw(&search_entry, hash->size);
190 head = &hash->table[index]; 191 head = &hash->table[index];
191 192
192 rcu_read_lock(); 193 rcu_read_lock();
193 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 194 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
194 if (!compare_backbone_gw(&backbone_gw->hash_entry, 195 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
195 &search_entry)) 196 &search_entry))
196 continue; 197 continue;
197 198
198 if (!atomic_inc_not_zero(&backbone_gw->refcount)) 199 if (!atomic_inc_not_zero(&backbone_gw->refcount))
@@ -207,12 +208,13 @@ static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
207} 208}
208 209
209/* delete all claims for a backbone */ 210/* delete all claims for a backbone */
210static void bla_del_backbone_claims(struct backbone_gw *backbone_gw) 211static void
212batadv_bla_del_backbone_claims(struct batadv_backbone_gw *backbone_gw)
211{ 213{
212 struct hashtable_t *hash; 214 struct batadv_hashtable *hash;
213 struct hlist_node *node, *node_tmp; 215 struct hlist_node *node, *node_tmp;
214 struct hlist_head *head; 216 struct hlist_head *head;
215 struct claim *claim; 217 struct batadv_claim *claim;
216 int i; 218 int i;
217 spinlock_t *list_lock; /* protects write access to the hash lists */ 219 spinlock_t *list_lock; /* protects write access to the hash lists */
218 220
@@ -231,36 +233,35 @@ static void bla_del_backbone_claims(struct backbone_gw *backbone_gw)
231 if (claim->backbone_gw != backbone_gw) 233 if (claim->backbone_gw != backbone_gw)
232 continue; 234 continue;
233 235
234 claim_free_ref(claim); 236 batadv_claim_free_ref(claim);
235 hlist_del_rcu(node); 237 hlist_del_rcu(node);
236 } 238 }
237 spin_unlock_bh(list_lock); 239 spin_unlock_bh(list_lock);
238 } 240 }
239 241
240 /* all claims gone, intialize CRC */ 242 /* all claims gone, intialize CRC */
241 backbone_gw->crc = BLA_CRC_INIT; 243 backbone_gw->crc = BATADV_BLA_CRC_INIT;
242} 244}
243 245
244/** 246/**
247 * batadv_bla_send_claim - sends a claim frame according to the provided info
245 * @bat_priv: the bat priv with all the soft interface information 248 * @bat_priv: the bat priv with all the soft interface information
246 * @orig: the mac address to be announced within the claim 249 * @orig: the mac address to be announced within the claim
247 * @vid: the VLAN ID 250 * @vid: the VLAN ID
248 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...) 251 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
249 *
250 * sends a claim frame according to the provided info.
251 */ 252 */
252static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac, 253static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
253 short vid, int claimtype) 254 short vid, int claimtype)
254{ 255{
255 struct sk_buff *skb; 256 struct sk_buff *skb;
256 struct ethhdr *ethhdr; 257 struct ethhdr *ethhdr;
257 struct hard_iface *primary_if; 258 struct batadv_hard_iface *primary_if;
258 struct net_device *soft_iface; 259 struct net_device *soft_iface;
259 uint8_t *hw_src; 260 uint8_t *hw_src;
260 struct bla_claim_dst local_claim_dest; 261 struct batadv_bla_claim_dst local_claim_dest;
261 uint32_t zeroip = 0; 262 __be32 zeroip = 0;
262 263
263 primary_if = primary_if_get_selected(bat_priv); 264 primary_if = batadv_primary_if_get_selected(bat_priv);
264 if (!primary_if) 265 if (!primary_if)
265 return; 266 return;
266 267
@@ -294,40 +295,41 @@ static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
294 295
295 /* now we pretend that the client would have sent this ... */ 296 /* now we pretend that the client would have sent this ... */
296 switch (claimtype) { 297 switch (claimtype) {
297 case CLAIM_TYPE_ADD: 298 case BATADV_CLAIM_TYPE_ADD:
298 /* normal claim frame 299 /* normal claim frame
299 * set Ethernet SRC to the clients mac 300 * set Ethernet SRC to the clients mac
300 */ 301 */
301 memcpy(ethhdr->h_source, mac, ETH_ALEN); 302 memcpy(ethhdr->h_source, mac, ETH_ALEN);
302 bat_dbg(DBG_BLA, bat_priv, 303 batadv_dbg(BATADV_DBG_BLA, bat_priv,
303 "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid); 304 "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
304 break; 305 break;
305 case CLAIM_TYPE_DEL: 306 case BATADV_CLAIM_TYPE_DEL:
306 /* unclaim frame 307 /* unclaim frame
307 * set HW SRC to the clients mac 308 * set HW SRC to the clients mac
308 */ 309 */
309 memcpy(hw_src, mac, ETH_ALEN); 310 memcpy(hw_src, mac, ETH_ALEN);
310 bat_dbg(DBG_BLA, bat_priv, 311 batadv_dbg(BATADV_DBG_BLA, bat_priv,
311 "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, vid); 312 "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac,
313 vid);
312 break; 314 break;
313 case CLAIM_TYPE_ANNOUNCE: 315 case BATADV_CLAIM_TYPE_ANNOUNCE:
314 /* announcement frame 316 /* announcement frame
315 * set HW SRC to the special mac containg the crc 317 * set HW SRC to the special mac containg the crc
316 */ 318 */
317 memcpy(hw_src, mac, ETH_ALEN); 319 memcpy(hw_src, mac, ETH_ALEN);
318 bat_dbg(DBG_BLA, bat_priv, 320 batadv_dbg(BATADV_DBG_BLA, bat_priv,
319 "bla_send_claim(): ANNOUNCE of %pM on vid %d\n", 321 "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
320 ethhdr->h_source, vid); 322 ethhdr->h_source, vid);
321 break; 323 break;
322 case CLAIM_TYPE_REQUEST: 324 case BATADV_CLAIM_TYPE_REQUEST:
323 /* request frame 325 /* request frame
324 * set HW SRC to the special mac containg the crc 326 * set HW SRC to the special mac containg the crc
325 */ 327 */
326 memcpy(hw_src, mac, ETH_ALEN); 328 memcpy(hw_src, mac, ETH_ALEN);
327 memcpy(ethhdr->h_dest, mac, ETH_ALEN); 329 memcpy(ethhdr->h_dest, mac, ETH_ALEN);
328 bat_dbg(DBG_BLA, bat_priv, 330 batadv_dbg(BATADV_DBG_BLA, bat_priv,
329 "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n", 331 "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
330 ethhdr->h_source, ethhdr->h_dest, vid); 332 ethhdr->h_source, ethhdr->h_dest, vid);
331 break; 333 break;
332 334
333 } 335 }
@@ -344,10 +346,11 @@ static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
344 netif_rx(skb); 346 netif_rx(skb);
345out: 347out:
346 if (primary_if) 348 if (primary_if)
347 hardif_free_ref(primary_if); 349 batadv_hardif_free_ref(primary_if);
348} 350}
349 351
350/** 352/**
353 * batadv_bla_get_backbone_gw
351 * @bat_priv: the bat priv with all the soft interface information 354 * @bat_priv: the bat priv with all the soft interface information
352 * @orig: the mac address of the originator 355 * @orig: the mac address of the originator
353 * @vid: the VLAN ID 356 * @vid: the VLAN ID
@@ -355,21 +358,22 @@ out:
355 * searches for the backbone gw or creates a new one if it could not 358 * searches for the backbone gw or creates a new one if it could not
356 * be found. 359 * be found.
357 */ 360 */
358static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv, 361static struct batadv_backbone_gw *
359 uint8_t *orig, short vid) 362batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
363 short vid)
360{ 364{
361 struct backbone_gw *entry; 365 struct batadv_backbone_gw *entry;
362 struct orig_node *orig_node; 366 struct batadv_orig_node *orig_node;
363 int hash_added; 367 int hash_added;
364 368
365 entry = backbone_hash_find(bat_priv, orig, vid); 369 entry = batadv_backbone_hash_find(bat_priv, orig, vid);
366 370
367 if (entry) 371 if (entry)
368 return entry; 372 return entry;
369 373
370 bat_dbg(DBG_BLA, bat_priv, 374 batadv_dbg(BATADV_DBG_BLA, bat_priv,
371 "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n", 375 "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
372 orig, vid); 376 orig, vid);
373 377
374 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 378 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
375 if (!entry) 379 if (!entry)
@@ -377,7 +381,7 @@ static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
377 381
378 entry->vid = vid; 382 entry->vid = vid;
379 entry->lasttime = jiffies; 383 entry->lasttime = jiffies;
380 entry->crc = BLA_CRC_INIT; 384 entry->crc = BATADV_BLA_CRC_INIT;
381 entry->bat_priv = bat_priv; 385 entry->bat_priv = bat_priv;
382 atomic_set(&entry->request_sent, 0); 386 atomic_set(&entry->request_sent, 0);
383 memcpy(entry->orig, orig, ETH_ALEN); 387 memcpy(entry->orig, orig, ETH_ALEN);
@@ -385,8 +389,10 @@ static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
385 /* one for the hash, one for returning */ 389 /* one for the hash, one for returning */
386 atomic_set(&entry->refcount, 2); 390 atomic_set(&entry->refcount, 2);
387 391
388 hash_added = hash_add(bat_priv->backbone_hash, compare_backbone_gw, 392 hash_added = batadv_hash_add(bat_priv->backbone_hash,
389 choose_backbone_gw, entry, &entry->hash_entry); 393 batadv_compare_backbone_gw,
394 batadv_choose_backbone_gw, entry,
395 &entry->hash_entry);
390 396
391 if (unlikely(hash_added != 0)) { 397 if (unlikely(hash_added != 0)) {
392 /* hash failed, free the structure */ 398 /* hash failed, free the structure */
@@ -395,11 +401,11 @@ static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
395 } 401 }
396 402
397 /* this is a gateway now, remove any tt entries */ 403 /* this is a gateway now, remove any tt entries */
398 orig_node = orig_hash_find(bat_priv, orig); 404 orig_node = batadv_orig_hash_find(bat_priv, orig);
399 if (orig_node) { 405 if (orig_node) {
400 tt_global_del_orig(bat_priv, orig_node, 406 batadv_tt_global_del_orig(bat_priv, orig_node,
401 "became a backbone gateway"); 407 "became a backbone gateway");
402 orig_node_free_ref(orig_node); 408 batadv_orig_node_free_ref(orig_node);
403 } 409 }
404 return entry; 410 return entry;
405} 411}
@@ -407,43 +413,46 @@ static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
407/* update or add the own backbone gw to make sure we announce 413/* update or add the own backbone gw to make sure we announce
408 * where we receive other backbone gws 414 * where we receive other backbone gws
409 */ 415 */
410static void bla_update_own_backbone_gw(struct bat_priv *bat_priv, 416static void
411 struct hard_iface *primary_if, 417batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
412 short vid) 418 struct batadv_hard_iface *primary_if,
419 short vid)
413{ 420{
414 struct backbone_gw *backbone_gw; 421 struct batadv_backbone_gw *backbone_gw;
415 422
416 backbone_gw = bla_get_backbone_gw(bat_priv, 423 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
417 primary_if->net_dev->dev_addr, vid); 424 primary_if->net_dev->dev_addr,
425 vid);
418 if (unlikely(!backbone_gw)) 426 if (unlikely(!backbone_gw))
419 return; 427 return;
420 428
421 backbone_gw->lasttime = jiffies; 429 backbone_gw->lasttime = jiffies;
422 backbone_gw_free_ref(backbone_gw); 430 batadv_backbone_gw_free_ref(backbone_gw);
423} 431}
424 432
425/** 433/* @bat_priv: the bat priv with all the soft interface information
426 * @bat_priv: the bat priv with all the soft interface information
427 * @vid: the vid where the request came on 434 * @vid: the vid where the request came on
428 * 435 *
429 * Repeat all of our own claims, and finally send an ANNOUNCE frame 436 * Repeat all of our own claims, and finally send an ANNOUNCE frame
430 * to allow the requester another check if the CRC is correct now. 437 * to allow the requester another check if the CRC is correct now.
431 */ 438 */
432static void bla_answer_request(struct bat_priv *bat_priv, 439static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
433 struct hard_iface *primary_if, short vid) 440 struct batadv_hard_iface *primary_if,
441 short vid)
434{ 442{
435 struct hlist_node *node; 443 struct hlist_node *node;
436 struct hlist_head *head; 444 struct hlist_head *head;
437 struct hashtable_t *hash; 445 struct batadv_hashtable *hash;
438 struct claim *claim; 446 struct batadv_claim *claim;
439 struct backbone_gw *backbone_gw; 447 struct batadv_backbone_gw *backbone_gw;
440 int i; 448 int i;
441 449
442 bat_dbg(DBG_BLA, bat_priv, 450 batadv_dbg(BATADV_DBG_BLA, bat_priv,
443 "bla_answer_request(): received a claim request, send all of our own claims again\n"); 451 "bla_answer_request(): received a claim request, send all of our own claims again\n");
444 452
445 backbone_gw = backbone_hash_find(bat_priv, 453 backbone_gw = batadv_backbone_hash_find(bat_priv,
446 primary_if->net_dev->dev_addr, vid); 454 primary_if->net_dev->dev_addr,
455 vid);
447 if (!backbone_gw) 456 if (!backbone_gw)
448 return; 457 return;
449 458
@@ -457,36 +466,34 @@ static void bla_answer_request(struct bat_priv *bat_priv,
457 if (claim->backbone_gw != backbone_gw) 466 if (claim->backbone_gw != backbone_gw)
458 continue; 467 continue;
459 468
460 bla_send_claim(bat_priv, claim->addr, claim->vid, 469 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
461 CLAIM_TYPE_ADD); 470 BATADV_CLAIM_TYPE_ADD);
462 } 471 }
463 rcu_read_unlock(); 472 rcu_read_unlock();
464 } 473 }
465 474
466 /* finally, send an announcement frame */ 475 /* finally, send an announcement frame */
467 bla_send_announce(bat_priv, backbone_gw); 476 batadv_bla_send_announce(bat_priv, backbone_gw);
468 backbone_gw_free_ref(backbone_gw); 477 batadv_backbone_gw_free_ref(backbone_gw);
469} 478}
470 479
471/** 480/* @backbone_gw: the backbone gateway from whom we are out of sync
472 * @backbone_gw: the backbone gateway from whom we are out of sync
473 * 481 *
474 * When the crc is wrong, ask the backbone gateway for a full table update. 482 * When the crc is wrong, ask the backbone gateway for a full table update.
475 * After the request, it will repeat all of his own claims and finally 483 * After the request, it will repeat all of his own claims and finally
476 * send an announcement claim with which we can check again. 484 * send an announcement claim with which we can check again.
477 */ 485 */
478static void bla_send_request(struct backbone_gw *backbone_gw) 486static void batadv_bla_send_request(struct batadv_backbone_gw *backbone_gw)
479{ 487{
480 /* first, remove all old entries */ 488 /* first, remove all old entries */
481 bla_del_backbone_claims(backbone_gw); 489 batadv_bla_del_backbone_claims(backbone_gw);
482 490
483 bat_dbg(DBG_BLA, backbone_gw->bat_priv, 491 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
484 "Sending REQUEST to %pM\n", 492 "Sending REQUEST to %pM\n", backbone_gw->orig);
485 backbone_gw->orig);
486 493
487 /* send request */ 494 /* send request */
488 bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig, 495 batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
489 backbone_gw->vid, CLAIM_TYPE_REQUEST); 496 backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
490 497
491 /* no local broadcasts should be sent or received, for now. */ 498 /* no local broadcasts should be sent or received, for now. */
492 if (!atomic_read(&backbone_gw->request_sent)) { 499 if (!atomic_read(&backbone_gw->request_sent)) {
@@ -495,45 +502,45 @@ static void bla_send_request(struct backbone_gw *backbone_gw)
495 } 502 }
496} 503}
497 504
498/** 505/* @bat_priv: the bat priv with all the soft interface information
499 * @bat_priv: the bat priv with all the soft interface information
500 * @backbone_gw: our backbone gateway which should be announced 506 * @backbone_gw: our backbone gateway which should be announced
501 * 507 *
502 * This function sends an announcement. It is called from multiple 508 * This function sends an announcement. It is called from multiple
503 * places. 509 * places.
504 */ 510 */
505static void bla_send_announce(struct bat_priv *bat_priv, 511static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
506 struct backbone_gw *backbone_gw) 512 struct batadv_backbone_gw *backbone_gw)
507{ 513{
508 uint8_t mac[ETH_ALEN]; 514 uint8_t mac[ETH_ALEN];
509 uint16_t crc; 515 __be16 crc;
510 516
511 memcpy(mac, announce_mac, 4); 517 memcpy(mac, batadv_announce_mac, 4);
512 crc = htons(backbone_gw->crc); 518 crc = htons(backbone_gw->crc);
513 memcpy(&mac[4], (uint8_t *)&crc, 2); 519 memcpy(&mac[4], &crc, 2);
514 520
515 bla_send_claim(bat_priv, mac, backbone_gw->vid, CLAIM_TYPE_ANNOUNCE); 521 batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
522 BATADV_CLAIM_TYPE_ANNOUNCE);
516 523
517} 524}
518 525
519/** 526/**
527 * batadv_bla_add_claim - Adds a claim in the claim hash
520 * @bat_priv: the bat priv with all the soft interface information 528 * @bat_priv: the bat priv with all the soft interface information
521 * @mac: the mac address of the claim 529 * @mac: the mac address of the claim
522 * @vid: the VLAN ID of the frame 530 * @vid: the VLAN ID of the frame
523 * @backbone_gw: the backbone gateway which claims it 531 * @backbone_gw: the backbone gateway which claims it
524 *
525 * Adds a claim in the claim hash.
526 */ 532 */
527static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac, 533static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
528 const short vid, struct backbone_gw *backbone_gw) 534 const uint8_t *mac, const short vid,
535 struct batadv_backbone_gw *backbone_gw)
529{ 536{
530 struct claim *claim; 537 struct batadv_claim *claim;
531 struct claim search_claim; 538 struct batadv_claim search_claim;
532 int hash_added; 539 int hash_added;
533 540
534 memcpy(search_claim.addr, mac, ETH_ALEN); 541 memcpy(search_claim.addr, mac, ETH_ALEN);
535 search_claim.vid = vid; 542 search_claim.vid = vid;
536 claim = claim_hash_find(bat_priv, &search_claim); 543 claim = batadv_claim_hash_find(bat_priv, &search_claim);
537 544
538 /* create a new claim entry if it does not exist yet. */ 545 /* create a new claim entry if it does not exist yet. */
539 if (!claim) { 546 if (!claim) {
@@ -547,11 +554,13 @@ static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
547 claim->backbone_gw = backbone_gw; 554 claim->backbone_gw = backbone_gw;
548 555
549 atomic_set(&claim->refcount, 2); 556 atomic_set(&claim->refcount, 2);
550 bat_dbg(DBG_BLA, bat_priv, 557 batadv_dbg(BATADV_DBG_BLA, bat_priv,
551 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n", 558 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
552 mac, vid); 559 mac, vid);
553 hash_added = hash_add(bat_priv->claim_hash, compare_claim, 560 hash_added = batadv_hash_add(bat_priv->claim_hash,
554 choose_claim, claim, &claim->hash_entry); 561 batadv_compare_claim,
562 batadv_choose_claim, claim,
563 &claim->hash_entry);
555 564
556 if (unlikely(hash_added != 0)) { 565 if (unlikely(hash_added != 0)) {
557 /* only local changes happened. */ 566 /* only local changes happened. */
@@ -564,13 +573,13 @@ static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
564 /* no need to register a new backbone */ 573 /* no need to register a new backbone */
565 goto claim_free_ref; 574 goto claim_free_ref;
566 575
567 bat_dbg(DBG_BLA, bat_priv, 576 batadv_dbg(BATADV_DBG_BLA, bat_priv,
568 "bla_add_claim(): changing ownership for %pM, vid %d\n", 577 "bla_add_claim(): changing ownership for %pM, vid %d\n",
569 mac, vid); 578 mac, vid);
570 579
571 claim->backbone_gw->crc ^= 580 claim->backbone_gw->crc ^=
572 crc16(0, claim->addr, ETH_ALEN); 581 crc16(0, claim->addr, ETH_ALEN);
573 backbone_gw_free_ref(claim->backbone_gw); 582 batadv_backbone_gw_free_ref(claim->backbone_gw);
574 583
575 } 584 }
576 /* set (new) backbone gw */ 585 /* set (new) backbone gw */
@@ -581,45 +590,48 @@ static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
581 backbone_gw->lasttime = jiffies; 590 backbone_gw->lasttime = jiffies;
582 591
583claim_free_ref: 592claim_free_ref:
584 claim_free_ref(claim); 593 batadv_claim_free_ref(claim);
585} 594}
586 595
587/* Delete a claim from the claim hash which has the 596/* Delete a claim from the claim hash which has the
588 * given mac address and vid. 597 * given mac address and vid.
589 */ 598 */
590static void bla_del_claim(struct bat_priv *bat_priv, const uint8_t *mac, 599static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
591 const short vid) 600 const uint8_t *mac, const short vid)
592{ 601{
593 struct claim search_claim, *claim; 602 struct batadv_claim search_claim, *claim;
594 603
595 memcpy(search_claim.addr, mac, ETH_ALEN); 604 memcpy(search_claim.addr, mac, ETH_ALEN);
596 search_claim.vid = vid; 605 search_claim.vid = vid;
597 claim = claim_hash_find(bat_priv, &search_claim); 606 claim = batadv_claim_hash_find(bat_priv, &search_claim);
598 if (!claim) 607 if (!claim)
599 return; 608 return;
600 609
601 bat_dbg(DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", mac, vid); 610 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
611 mac, vid);
602 612
603 hash_remove(bat_priv->claim_hash, compare_claim, choose_claim, claim); 613 batadv_hash_remove(bat_priv->claim_hash, batadv_compare_claim,
604 claim_free_ref(claim); /* reference from the hash is gone */ 614 batadv_choose_claim, claim);
615 batadv_claim_free_ref(claim); /* reference from the hash is gone */
605 616
606 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); 617 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
607 618
608 /* don't need the reference from hash_find() anymore */ 619 /* don't need the reference from hash_find() anymore */
609 claim_free_ref(claim); 620 batadv_claim_free_ref(claim);
610} 621}
611 622
612/* check for ANNOUNCE frame, return 1 if handled */ 623/* check for ANNOUNCE frame, return 1 if handled */
613static int handle_announce(struct bat_priv *bat_priv, 624static int batadv_handle_announce(struct batadv_priv *bat_priv,
614 uint8_t *an_addr, uint8_t *backbone_addr, short vid) 625 uint8_t *an_addr, uint8_t *backbone_addr,
626 short vid)
615{ 627{
616 struct backbone_gw *backbone_gw; 628 struct batadv_backbone_gw *backbone_gw;
617 uint16_t crc; 629 uint16_t crc;
618 630
619 if (memcmp(an_addr, announce_mac, 4) != 0) 631 if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
620 return 0; 632 return 0;
621 633
622 backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid); 634 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid);
623 635
624 if (unlikely(!backbone_gw)) 636 if (unlikely(!backbone_gw))
625 return 1; 637 return 1;
@@ -627,19 +639,19 @@ static int handle_announce(struct bat_priv *bat_priv,
627 639
628 /* handle as ANNOUNCE frame */ 640 /* handle as ANNOUNCE frame */
629 backbone_gw->lasttime = jiffies; 641 backbone_gw->lasttime = jiffies;
630 crc = ntohs(*((uint16_t *)(&an_addr[4]))); 642 crc = ntohs(*((__be16 *)(&an_addr[4])));
631 643
632 bat_dbg(DBG_BLA, bat_priv, 644 batadv_dbg(BATADV_DBG_BLA, bat_priv,
633 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n", 645 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n",
634 vid, backbone_gw->orig, crc); 646 vid, backbone_gw->orig, crc);
635 647
636 if (backbone_gw->crc != crc) { 648 if (backbone_gw->crc != crc) {
637 bat_dbg(DBG_BLA, backbone_gw->bat_priv, 649 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
638 "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n", 650 "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n",
639 backbone_gw->orig, backbone_gw->vid, backbone_gw->crc, 651 backbone_gw->orig, backbone_gw->vid,
640 crc); 652 backbone_gw->crc, crc);
641 653
642 bla_send_request(backbone_gw); 654 batadv_bla_send_request(backbone_gw);
643 } else { 655 } else {
644 /* if we have sent a request and the crc was OK, 656 /* if we have sent a request and the crc was OK,
645 * we can allow traffic again. 657 * we can allow traffic again.
@@ -650,88 +662,92 @@ static int handle_announce(struct bat_priv *bat_priv,
650 } 662 }
651 } 663 }
652 664
653 backbone_gw_free_ref(backbone_gw); 665 batadv_backbone_gw_free_ref(backbone_gw);
654 return 1; 666 return 1;
655} 667}
656 668
657/* check for REQUEST frame, return 1 if handled */ 669/* check for REQUEST frame, return 1 if handled */
658static int handle_request(struct bat_priv *bat_priv, 670static int batadv_handle_request(struct batadv_priv *bat_priv,
659 struct hard_iface *primary_if, 671 struct batadv_hard_iface *primary_if,
660 uint8_t *backbone_addr, 672 uint8_t *backbone_addr,
661 struct ethhdr *ethhdr, short vid) 673 struct ethhdr *ethhdr, short vid)
662{ 674{
663 /* check for REQUEST frame */ 675 /* check for REQUEST frame */
664 if (!compare_eth(backbone_addr, ethhdr->h_dest)) 676 if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
665 return 0; 677 return 0;
666 678
667 /* sanity check, this should not happen on a normal switch, 679 /* sanity check, this should not happen on a normal switch,
668 * we ignore it in this case. 680 * we ignore it in this case.
669 */ 681 */
670 if (!compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr)) 682 if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
671 return 1; 683 return 1;
672 684
673 bat_dbg(DBG_BLA, bat_priv, 685 batadv_dbg(BATADV_DBG_BLA, bat_priv,
674 "handle_request(): REQUEST vid %d (sent by %pM)...\n", 686 "handle_request(): REQUEST vid %d (sent by %pM)...\n",
675 vid, ethhdr->h_source); 687 vid, ethhdr->h_source);
676 688
677 bla_answer_request(bat_priv, primary_if, vid); 689 batadv_bla_answer_request(bat_priv, primary_if, vid);
678 return 1; 690 return 1;
679} 691}
680 692
681/* check for UNCLAIM frame, return 1 if handled */ 693/* check for UNCLAIM frame, return 1 if handled */
682static int handle_unclaim(struct bat_priv *bat_priv, 694static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
683 struct hard_iface *primary_if, 695 struct batadv_hard_iface *primary_if,
684 uint8_t *backbone_addr, 696 uint8_t *backbone_addr,
685 uint8_t *claim_addr, short vid) 697 uint8_t *claim_addr, short vid)
686{ 698{
687 struct backbone_gw *backbone_gw; 699 struct batadv_backbone_gw *backbone_gw;
688 700
689 /* unclaim in any case if it is our own */ 701 /* unclaim in any case if it is our own */
690 if (primary_if && compare_eth(backbone_addr, 702 if (primary_if && batadv_compare_eth(backbone_addr,
691 primary_if->net_dev->dev_addr)) 703 primary_if->net_dev->dev_addr))
692 bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_DEL); 704 batadv_bla_send_claim(bat_priv, claim_addr, vid,
705 BATADV_CLAIM_TYPE_DEL);
693 706
694 backbone_gw = backbone_hash_find(bat_priv, backbone_addr, vid); 707 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
695 708
696 if (!backbone_gw) 709 if (!backbone_gw)
697 return 1; 710 return 1;
698 711
699 /* this must be an UNCLAIM frame */ 712 /* this must be an UNCLAIM frame */
700 bat_dbg(DBG_BLA, bat_priv, 713 batadv_dbg(BATADV_DBG_BLA, bat_priv,
701 "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n", 714 "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
702 claim_addr, vid, backbone_gw->orig); 715 claim_addr, vid, backbone_gw->orig);
703 716
704 bla_del_claim(bat_priv, claim_addr, vid); 717 batadv_bla_del_claim(bat_priv, claim_addr, vid);
705 backbone_gw_free_ref(backbone_gw); 718 batadv_backbone_gw_free_ref(backbone_gw);
706 return 1; 719 return 1;
707} 720}
708 721
709/* check for CLAIM frame, return 1 if handled */ 722/* check for CLAIM frame, return 1 if handled */
710static int handle_claim(struct bat_priv *bat_priv, 723static int batadv_handle_claim(struct batadv_priv *bat_priv,
711 struct hard_iface *primary_if, uint8_t *backbone_addr, 724 struct batadv_hard_iface *primary_if,
712 uint8_t *claim_addr, short vid) 725 uint8_t *backbone_addr, uint8_t *claim_addr,
726 short vid)
713{ 727{
714 struct backbone_gw *backbone_gw; 728 struct batadv_backbone_gw *backbone_gw;
715 729
716 /* register the gateway if not yet available, and add the claim. */ 730 /* register the gateway if not yet available, and add the claim. */
717 731
718 backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid); 732 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid);
719 733
720 if (unlikely(!backbone_gw)) 734 if (unlikely(!backbone_gw))
721 return 1; 735 return 1;
722 736
723 /* this must be a CLAIM frame */ 737 /* this must be a CLAIM frame */
724 bla_add_claim(bat_priv, claim_addr, vid, backbone_gw); 738 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
725 if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) 739 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
726 bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_ADD); 740 batadv_bla_send_claim(bat_priv, claim_addr, vid,
741 BATADV_CLAIM_TYPE_ADD);
727 742
728 /* TODO: we could call something like tt_local_del() here. */ 743 /* TODO: we could call something like tt_local_del() here. */
729 744
730 backbone_gw_free_ref(backbone_gw); 745 batadv_backbone_gw_free_ref(backbone_gw);
731 return 1; 746 return 1;
732} 747}
733 748
734/** 749/**
750 * batadv_check_claim_group
735 * @bat_priv: the bat priv with all the soft interface information 751 * @bat_priv: the bat priv with all the soft interface information
736 * @hw_src: the Hardware source in the ARP Header 752 * @hw_src: the Hardware source in the ARP Header
737 * @hw_dst: the Hardware destination in the ARP Header 753 * @hw_dst: the Hardware destination in the ARP Header
@@ -746,16 +762,16 @@ static int handle_claim(struct bat_priv *bat_priv,
746 * 1 - if is a claim packet from another group 762 * 1 - if is a claim packet from another group
747 * 0 - if it is not a claim packet 763 * 0 - if it is not a claim packet
748 */ 764 */
749static int check_claim_group(struct bat_priv *bat_priv, 765static int batadv_check_claim_group(struct batadv_priv *bat_priv,
750 struct hard_iface *primary_if, 766 struct batadv_hard_iface *primary_if,
751 uint8_t *hw_src, uint8_t *hw_dst, 767 uint8_t *hw_src, uint8_t *hw_dst,
752 struct ethhdr *ethhdr) 768 struct ethhdr *ethhdr)
753{ 769{
754 uint8_t *backbone_addr; 770 uint8_t *backbone_addr;
755 struct orig_node *orig_node; 771 struct batadv_orig_node *orig_node;
756 struct bla_claim_dst *bla_dst, *bla_dst_own; 772 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
757 773
758 bla_dst = (struct bla_claim_dst *)hw_dst; 774 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
759 bla_dst_own = &bat_priv->claim_dest; 775 bla_dst_own = &bat_priv->claim_dest;
760 776
761 /* check if it is a claim packet in general */ 777 /* check if it is a claim packet in general */
@@ -767,12 +783,12 @@ static int check_claim_group(struct bat_priv *bat_priv,
767 * otherwise assume it is in the hw_src 783 * otherwise assume it is in the hw_src
768 */ 784 */
769 switch (bla_dst->type) { 785 switch (bla_dst->type) {
770 case CLAIM_TYPE_ADD: 786 case BATADV_CLAIM_TYPE_ADD:
771 backbone_addr = hw_src; 787 backbone_addr = hw_src;
772 break; 788 break;
773 case CLAIM_TYPE_REQUEST: 789 case BATADV_CLAIM_TYPE_REQUEST:
774 case CLAIM_TYPE_ANNOUNCE: 790 case BATADV_CLAIM_TYPE_ANNOUNCE:
775 case CLAIM_TYPE_DEL: 791 case BATADV_CLAIM_TYPE_DEL:
776 backbone_addr = ethhdr->h_source; 792 backbone_addr = ethhdr->h_source;
777 break; 793 break;
778 default: 794 default:
@@ -780,7 +796,7 @@ static int check_claim_group(struct bat_priv *bat_priv,
780 } 796 }
781 797
782 /* don't accept claim frames from ourselves */ 798 /* don't accept claim frames from ourselves */
783 if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) 799 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
784 return 0; 800 return 0;
785 801
786 /* if its already the same group, it is fine. */ 802 /* if its already the same group, it is fine. */
@@ -788,7 +804,7 @@ static int check_claim_group(struct bat_priv *bat_priv,
788 return 2; 804 return 2;
789 805
790 /* lets see if this originator is in our mesh */ 806 /* lets see if this originator is in our mesh */
791 orig_node = orig_hash_find(bat_priv, backbone_addr); 807 orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
792 808
793 /* dont accept claims from gateways which are not in 809 /* dont accept claims from gateways which are not in
794 * the same mesh or group. 810 * the same mesh or group.
@@ -798,20 +814,19 @@ static int check_claim_group(struct bat_priv *bat_priv,
798 814
799 /* if our mesh friends mac is bigger, use it for ourselves. */ 815 /* if our mesh friends mac is bigger, use it for ourselves. */
800 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) { 816 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
801 bat_dbg(DBG_BLA, bat_priv, 817 batadv_dbg(BATADV_DBG_BLA, bat_priv,
802 "taking other backbones claim group: %04x\n", 818 "taking other backbones claim group: %04x\n",
803 ntohs(bla_dst->group)); 819 ntohs(bla_dst->group));
804 bla_dst_own->group = bla_dst->group; 820 bla_dst_own->group = bla_dst->group;
805 } 821 }
806 822
807 orig_node_free_ref(orig_node); 823 batadv_orig_node_free_ref(orig_node);
808 824
809 return 2; 825 return 2;
810} 826}
811 827
812 828
813/** 829/* @bat_priv: the bat priv with all the soft interface information
814 * @bat_priv: the bat priv with all the soft interface information
815 * @skb: the frame to be checked 830 * @skb: the frame to be checked
816 * 831 *
817 * Check if this is a claim frame, and process it accordingly. 832 * Check if this is a claim frame, and process it accordingly.
@@ -819,15 +834,15 @@ static int check_claim_group(struct bat_priv *bat_priv,
819 * returns 1 if it was a claim frame, otherwise return 0 to 834 * returns 1 if it was a claim frame, otherwise return 0 to
820 * tell the callee that it can use the frame on its own. 835 * tell the callee that it can use the frame on its own.
821 */ 836 */
822static int bla_process_claim(struct bat_priv *bat_priv, 837static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
823 struct hard_iface *primary_if, 838 struct batadv_hard_iface *primary_if,
824 struct sk_buff *skb) 839 struct sk_buff *skb)
825{ 840{
826 struct ethhdr *ethhdr; 841 struct ethhdr *ethhdr;
827 struct vlan_ethhdr *vhdr; 842 struct vlan_ethhdr *vhdr;
828 struct arphdr *arphdr; 843 struct arphdr *arphdr;
829 uint8_t *hw_src, *hw_dst; 844 uint8_t *hw_src, *hw_dst;
830 struct bla_claim_dst *bla_dst; 845 struct batadv_bla_claim_dst *bla_dst;
831 uint16_t proto; 846 uint16_t proto;
832 int headlen; 847 int headlen;
833 short vid = -1; 848 short vid = -1;
@@ -860,7 +875,6 @@ static int bla_process_claim(struct bat_priv *bat_priv,
860 /* Check whether the ARP frame carries a valid 875 /* Check whether the ARP frame carries a valid
861 * IP information 876 * IP information
862 */ 877 */
863
864 if (arphdr->ar_hrd != htons(ARPHRD_ETHER)) 878 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
865 return 0; 879 return 0;
866 if (arphdr->ar_pro != htons(ETH_P_IP)) 880 if (arphdr->ar_pro != htons(ETH_P_IP))
@@ -872,59 +886,62 @@ static int bla_process_claim(struct bat_priv *bat_priv,
872 886
873 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr); 887 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
874 hw_dst = hw_src + ETH_ALEN + 4; 888 hw_dst = hw_src + ETH_ALEN + 4;
875 bla_dst = (struct bla_claim_dst *)hw_dst; 889 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
876 890
877 /* check if it is a claim frame. */ 891 /* check if it is a claim frame. */
878 ret = check_claim_group(bat_priv, primary_if, hw_src, hw_dst, ethhdr); 892 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
893 ethhdr);
879 if (ret == 1) 894 if (ret == 1)
880 bat_dbg(DBG_BLA, bat_priv, 895 batadv_dbg(BATADV_DBG_BLA, bat_priv,
881 "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", 896 "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
882 ethhdr->h_source, vid, hw_src, hw_dst); 897 ethhdr->h_source, vid, hw_src, hw_dst);
883 898
884 if (ret < 2) 899 if (ret < 2)
885 return ret; 900 return ret;
886 901
887 /* become a backbone gw ourselves on this vlan if not happened yet */ 902 /* become a backbone gw ourselves on this vlan if not happened yet */
888 bla_update_own_backbone_gw(bat_priv, primary_if, vid); 903 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
889 904
890 /* check for the different types of claim frames ... */ 905 /* check for the different types of claim frames ... */
891 switch (bla_dst->type) { 906 switch (bla_dst->type) {
892 case CLAIM_TYPE_ADD: 907 case BATADV_CLAIM_TYPE_ADD:
893 if (handle_claim(bat_priv, primary_if, hw_src, 908 if (batadv_handle_claim(bat_priv, primary_if, hw_src,
894 ethhdr->h_source, vid)) 909 ethhdr->h_source, vid))
895 return 1; 910 return 1;
896 break; 911 break;
897 case CLAIM_TYPE_DEL: 912 case BATADV_CLAIM_TYPE_DEL:
898 if (handle_unclaim(bat_priv, primary_if, 913 if (batadv_handle_unclaim(bat_priv, primary_if,
899 ethhdr->h_source, hw_src, vid)) 914 ethhdr->h_source, hw_src, vid))
900 return 1; 915 return 1;
901 break; 916 break;
902 917
903 case CLAIM_TYPE_ANNOUNCE: 918 case BATADV_CLAIM_TYPE_ANNOUNCE:
904 if (handle_announce(bat_priv, hw_src, ethhdr->h_source, vid)) 919 if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
920 vid))
905 return 1; 921 return 1;
906 break; 922 break;
907 case CLAIM_TYPE_REQUEST: 923 case BATADV_CLAIM_TYPE_REQUEST:
908 if (handle_request(bat_priv, primary_if, hw_src, ethhdr, vid)) 924 if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
925 vid))
909 return 1; 926 return 1;
910 break; 927 break;
911 } 928 }
912 929
913 bat_dbg(DBG_BLA, bat_priv, 930 batadv_dbg(BATADV_DBG_BLA, bat_priv,
914 "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", 931 "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
915 ethhdr->h_source, vid, hw_src, hw_dst); 932 ethhdr->h_source, vid, hw_src, hw_dst);
916 return 1; 933 return 1;
917} 934}
918 935
919/* Check when we last heard from other nodes, and remove them in case of 936/* Check when we last heard from other nodes, and remove them in case of
920 * a time out, or clean all backbone gws if now is set. 937 * a time out, or clean all backbone gws if now is set.
921 */ 938 */
922static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now) 939static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
923{ 940{
924 struct backbone_gw *backbone_gw; 941 struct batadv_backbone_gw *backbone_gw;
925 struct hlist_node *node, *node_tmp; 942 struct hlist_node *node, *node_tmp;
926 struct hlist_head *head; 943 struct hlist_head *head;
927 struct hashtable_t *hash; 944 struct batadv_hashtable *hash;
928 spinlock_t *list_lock; /* protects write access to the hash lists */ 945 spinlock_t *list_lock; /* protects write access to the hash lists */
929 int i; 946 int i;
930 947
@@ -941,29 +958,30 @@ static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now)
941 head, hash_entry) { 958 head, hash_entry) {
942 if (now) 959 if (now)
943 goto purge_now; 960 goto purge_now;
944 if (!has_timed_out(backbone_gw->lasttime, 961 if (!batadv_has_timed_out(backbone_gw->lasttime,
945 BLA_BACKBONE_TIMEOUT)) 962 BATADV_BLA_BACKBONE_TIMEOUT))
946 continue; 963 continue;
947 964
948 bat_dbg(DBG_BLA, backbone_gw->bat_priv, 965 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
949 "bla_purge_backbone_gw(): backbone gw %pM timed out\n", 966 "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
950 backbone_gw->orig); 967 backbone_gw->orig);
951 968
952purge_now: 969purge_now:
953 /* don't wait for the pending request anymore */ 970 /* don't wait for the pending request anymore */
954 if (atomic_read(&backbone_gw->request_sent)) 971 if (atomic_read(&backbone_gw->request_sent))
955 atomic_dec(&bat_priv->bla_num_requests); 972 atomic_dec(&bat_priv->bla_num_requests);
956 973
957 bla_del_backbone_claims(backbone_gw); 974 batadv_bla_del_backbone_claims(backbone_gw);
958 975
959 hlist_del_rcu(node); 976 hlist_del_rcu(node);
960 backbone_gw_free_ref(backbone_gw); 977 batadv_backbone_gw_free_ref(backbone_gw);
961 } 978 }
962 spin_unlock_bh(list_lock); 979 spin_unlock_bh(list_lock);
963 } 980 }
964} 981}
965 982
966/** 983/**
984 * batadv_bla_purge_claims
967 * @bat_priv: the bat priv with all the soft interface information 985 * @bat_priv: the bat priv with all the soft interface information
968 * @primary_if: the selected primary interface, may be NULL if now is set 986 * @primary_if: the selected primary interface, may be NULL if now is set
969 * @now: whether the whole hash shall be wiped now 987 * @now: whether the whole hash shall be wiped now
@@ -971,13 +989,14 @@ purge_now:
971 * Check when we heard last time from our own claims, and remove them in case of 989 * Check when we heard last time from our own claims, and remove them in case of
972 * a time out, or clean all claims if now is set 990 * a time out, or clean all claims if now is set
973 */ 991 */
974static void bla_purge_claims(struct bat_priv *bat_priv, 992static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
975 struct hard_iface *primary_if, int now) 993 struct batadv_hard_iface *primary_if,
994 int now)
976{ 995{
977 struct claim *claim; 996 struct batadv_claim *claim;
978 struct hlist_node *node; 997 struct hlist_node *node;
979 struct hlist_head *head; 998 struct hlist_head *head;
980 struct hashtable_t *hash; 999 struct batadv_hashtable *hash;
981 int i; 1000 int i;
982 1001
983 hash = bat_priv->claim_hash; 1002 hash = bat_priv->claim_hash;
@@ -991,42 +1010,42 @@ static void bla_purge_claims(struct bat_priv *bat_priv,
991 hlist_for_each_entry_rcu(claim, node, head, hash_entry) { 1010 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
992 if (now) 1011 if (now)
993 goto purge_now; 1012 goto purge_now;
994 if (!compare_eth(claim->backbone_gw->orig, 1013 if (!batadv_compare_eth(claim->backbone_gw->orig,
995 primary_if->net_dev->dev_addr)) 1014 primary_if->net_dev->dev_addr))
996 continue; 1015 continue;
997 if (!has_timed_out(claim->lasttime, 1016 if (!batadv_has_timed_out(claim->lasttime,
998 BLA_CLAIM_TIMEOUT)) 1017 BATADV_BLA_CLAIM_TIMEOUT))
999 continue; 1018 continue;
1000 1019
1001 bat_dbg(DBG_BLA, bat_priv, 1020 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1002 "bla_purge_claims(): %pM, vid %d, time out\n", 1021 "bla_purge_claims(): %pM, vid %d, time out\n",
1003 claim->addr, claim->vid); 1022 claim->addr, claim->vid);
1004 1023
1005purge_now: 1024purge_now:
1006 handle_unclaim(bat_priv, primary_if, 1025 batadv_handle_unclaim(bat_priv, primary_if,
1007 claim->backbone_gw->orig, 1026 claim->backbone_gw->orig,
1008 claim->addr, claim->vid); 1027 claim->addr, claim->vid);
1009 } 1028 }
1010 rcu_read_unlock(); 1029 rcu_read_unlock();
1011 } 1030 }
1012} 1031}
1013 1032
1014/** 1033/**
1034 * batadv_bla_update_orig_address
1015 * @bat_priv: the bat priv with all the soft interface information 1035 * @bat_priv: the bat priv with all the soft interface information
1016 * @primary_if: the new selected primary_if 1036 * @primary_if: the new selected primary_if
1017 * @oldif: the old primary interface, may be NULL 1037 * @oldif: the old primary interface, may be NULL
1018 * 1038 *
1019 * Update the backbone gateways when the own orig address changes. 1039 * Update the backbone gateways when the own orig address changes.
1020 *
1021 */ 1040 */
1022void bla_update_orig_address(struct bat_priv *bat_priv, 1041void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1023 struct hard_iface *primary_if, 1042 struct batadv_hard_iface *primary_if,
1024 struct hard_iface *oldif) 1043 struct batadv_hard_iface *oldif)
1025{ 1044{
1026 struct backbone_gw *backbone_gw; 1045 struct batadv_backbone_gw *backbone_gw;
1027 struct hlist_node *node; 1046 struct hlist_node *node;
1028 struct hlist_head *head; 1047 struct hlist_head *head;
1029 struct hashtable_t *hash; 1048 struct batadv_hashtable *hash;
1030 int i; 1049 int i;
1031 1050
1032 /* reset bridge loop avoidance group id */ 1051 /* reset bridge loop avoidance group id */
@@ -1034,8 +1053,8 @@ void bla_update_orig_address(struct bat_priv *bat_priv,
1034 htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN)); 1053 htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1035 1054
1036 if (!oldif) { 1055 if (!oldif) {
1037 bla_purge_claims(bat_priv, NULL, 1); 1056 batadv_bla_purge_claims(bat_priv, NULL, 1);
1038 bla_purge_backbone_gw(bat_priv, 1); 1057 batadv_bla_purge_backbone_gw(bat_priv, 1);
1039 return; 1058 return;
1040 } 1059 }
1041 1060
@@ -1049,8 +1068,8 @@ void bla_update_orig_address(struct bat_priv *bat_priv,
1049 rcu_read_lock(); 1068 rcu_read_lock();
1050 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 1069 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1051 /* own orig still holds the old value. */ 1070 /* own orig still holds the old value. */
1052 if (!compare_eth(backbone_gw->orig, 1071 if (!batadv_compare_eth(backbone_gw->orig,
1053 oldif->net_dev->dev_addr)) 1072 oldif->net_dev->dev_addr))
1054 continue; 1073 continue;
1055 1074
1056 memcpy(backbone_gw->orig, 1075 memcpy(backbone_gw->orig,
@@ -1058,7 +1077,7 @@ void bla_update_orig_address(struct bat_priv *bat_priv,
1058 /* send an announce frame so others will ask for our 1077 /* send an announce frame so others will ask for our
1059 * claims and update their tables. 1078 * claims and update their tables.
1060 */ 1079 */
1061 bla_send_announce(bat_priv, backbone_gw); 1080 batadv_bla_send_announce(bat_priv, backbone_gw);
1062 } 1081 }
1063 rcu_read_unlock(); 1082 rcu_read_unlock();
1064 } 1083 }
@@ -1067,36 +1086,36 @@ void bla_update_orig_address(struct bat_priv *bat_priv,
1067 1086
1068 1087
1069/* (re)start the timer */ 1088/* (re)start the timer */
1070static void bla_start_timer(struct bat_priv *bat_priv) 1089static void batadv_bla_start_timer(struct batadv_priv *bat_priv)
1071{ 1090{
1072 INIT_DELAYED_WORK(&bat_priv->bla_work, bla_periodic_work); 1091 INIT_DELAYED_WORK(&bat_priv->bla_work, batadv_bla_periodic_work);
1073 queue_delayed_work(bat_event_workqueue, &bat_priv->bla_work, 1092 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla_work,
1074 msecs_to_jiffies(BLA_PERIOD_LENGTH)); 1093 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1075} 1094}
1076 1095
1077/* periodic work to do: 1096/* periodic work to do:
1078 * * purge structures when they are too old 1097 * * purge structures when they are too old
1079 * * send announcements 1098 * * send announcements
1080 */ 1099 */
1081static void bla_periodic_work(struct work_struct *work) 1100static void batadv_bla_periodic_work(struct work_struct *work)
1082{ 1101{
1083 struct delayed_work *delayed_work = 1102 struct delayed_work *delayed_work =
1084 container_of(work, struct delayed_work, work); 1103 container_of(work, struct delayed_work, work);
1085 struct bat_priv *bat_priv = 1104 struct batadv_priv *bat_priv;
1086 container_of(delayed_work, struct bat_priv, bla_work);
1087 struct hlist_node *node; 1105 struct hlist_node *node;
1088 struct hlist_head *head; 1106 struct hlist_head *head;
1089 struct backbone_gw *backbone_gw; 1107 struct batadv_backbone_gw *backbone_gw;
1090 struct hashtable_t *hash; 1108 struct batadv_hashtable *hash;
1091 struct hard_iface *primary_if; 1109 struct batadv_hard_iface *primary_if;
1092 int i; 1110 int i;
1093 1111
1094 primary_if = primary_if_get_selected(bat_priv); 1112 bat_priv = container_of(delayed_work, struct batadv_priv, bla_work);
1113 primary_if = batadv_primary_if_get_selected(bat_priv);
1095 if (!primary_if) 1114 if (!primary_if)
1096 goto out; 1115 goto out;
1097 1116
1098 bla_purge_claims(bat_priv, primary_if, 0); 1117 batadv_bla_purge_claims(bat_priv, primary_if, 0);
1099 bla_purge_backbone_gw(bat_priv, 0); 1118 batadv_bla_purge_backbone_gw(bat_priv, 0);
1100 1119
1101 if (!atomic_read(&bat_priv->bridge_loop_avoidance)) 1120 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1102 goto out; 1121 goto out;
@@ -1110,67 +1129,81 @@ static void bla_periodic_work(struct work_struct *work)
1110 1129
1111 rcu_read_lock(); 1130 rcu_read_lock();
1112 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 1131 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1113 if (!compare_eth(backbone_gw->orig, 1132 if (!batadv_compare_eth(backbone_gw->orig,
1114 primary_if->net_dev->dev_addr)) 1133 primary_if->net_dev->dev_addr))
1115 continue; 1134 continue;
1116 1135
1117 backbone_gw->lasttime = jiffies; 1136 backbone_gw->lasttime = jiffies;
1118 1137
1119 bla_send_announce(bat_priv, backbone_gw); 1138 batadv_bla_send_announce(bat_priv, backbone_gw);
1120 } 1139 }
1121 rcu_read_unlock(); 1140 rcu_read_unlock();
1122 } 1141 }
1123out: 1142out:
1124 if (primary_if) 1143 if (primary_if)
1125 hardif_free_ref(primary_if); 1144 batadv_hardif_free_ref(primary_if);
1126 1145
1127 bla_start_timer(bat_priv); 1146 batadv_bla_start_timer(bat_priv);
1128} 1147}
1129 1148
1149/* The hash for claim and backbone hash receive the same key because they
1150 * are getting initialized by hash_new with the same key. Reinitializing
1151 * them with to different keys to allow nested locking without generating
1152 * lockdep warnings
1153 */
1154static struct lock_class_key batadv_claim_hash_lock_class_key;
1155static struct lock_class_key batadv_backbone_hash_lock_class_key;
1156
1130/* initialize all bla structures */ 1157/* initialize all bla structures */
1131int bla_init(struct bat_priv *bat_priv) 1158int batadv_bla_init(struct batadv_priv *bat_priv)
1132{ 1159{
1133 int i; 1160 int i;
1134 uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00}; 1161 uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1135 struct hard_iface *primary_if; 1162 struct batadv_hard_iface *primary_if;
1136 1163
1137 bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n"); 1164 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1138 1165
1139 /* setting claim destination address */ 1166 /* setting claim destination address */
1140 memcpy(&bat_priv->claim_dest.magic, claim_dest, 3); 1167 memcpy(&bat_priv->claim_dest.magic, claim_dest, 3);
1141 bat_priv->claim_dest.type = 0; 1168 bat_priv->claim_dest.type = 0;
1142 primary_if = primary_if_get_selected(bat_priv); 1169 primary_if = batadv_primary_if_get_selected(bat_priv);
1143 if (primary_if) { 1170 if (primary_if) {
1144 bat_priv->claim_dest.group = 1171 bat_priv->claim_dest.group =
1145 htons(crc16(0, primary_if->net_dev->dev_addr, 1172 htons(crc16(0, primary_if->net_dev->dev_addr,
1146 ETH_ALEN)); 1173 ETH_ALEN));
1147 hardif_free_ref(primary_if); 1174 batadv_hardif_free_ref(primary_if);
1148 } else { 1175 } else {
1149 bat_priv->claim_dest.group = 0; /* will be set later */ 1176 bat_priv->claim_dest.group = 0; /* will be set later */
1150 } 1177 }
1151 1178
1152 /* initialize the duplicate list */ 1179 /* initialize the duplicate list */
1153 for (i = 0; i < DUPLIST_SIZE; i++) 1180 for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1154 bat_priv->bcast_duplist[i].entrytime = 1181 bat_priv->bcast_duplist[i].entrytime =
1155 jiffies - msecs_to_jiffies(DUPLIST_TIMEOUT); 1182 jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1156 bat_priv->bcast_duplist_curr = 0; 1183 bat_priv->bcast_duplist_curr = 0;
1157 1184
1158 if (bat_priv->claim_hash) 1185 if (bat_priv->claim_hash)
1159 return 1; 1186 return 0;
1160 1187
1161 bat_priv->claim_hash = hash_new(128); 1188 bat_priv->claim_hash = batadv_hash_new(128);
1162 bat_priv->backbone_hash = hash_new(32); 1189 bat_priv->backbone_hash = batadv_hash_new(32);
1163 1190
1164 if (!bat_priv->claim_hash || !bat_priv->backbone_hash) 1191 if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
1165 return -1; 1192 return -ENOMEM;
1166 1193
1167 bat_dbg(DBG_BLA, bat_priv, "bla hashes initialized\n"); 1194 batadv_hash_set_lock_class(bat_priv->claim_hash,
1195 &batadv_claim_hash_lock_class_key);
1196 batadv_hash_set_lock_class(bat_priv->backbone_hash,
1197 &batadv_backbone_hash_lock_class_key);
1168 1198
1169 bla_start_timer(bat_priv); 1199 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1170 return 1; 1200
1201 batadv_bla_start_timer(bat_priv);
1202 return 0;
1171} 1203}
1172 1204
1173/** 1205/**
1206 * batadv_bla_check_bcast_duplist
1174 * @bat_priv: the bat priv with all the soft interface information 1207 * @bat_priv: the bat priv with all the soft interface information
1175 * @bcast_packet: originator mac address 1208 * @bcast_packet: originator mac address
1176 * @hdr_size: maximum length of the frame 1209 * @hdr_size: maximum length of the frame
@@ -1183,17 +1216,15 @@ int bla_init(struct bat_priv *bat_priv)
1183 * with a good chance that it is the same packet. If it is furthermore 1216 * with a good chance that it is the same packet. If it is furthermore
1184 * sent by another host, drop it. We allow equal packets from 1217 * sent by another host, drop it. We allow equal packets from
1185 * the same host however as this might be intended. 1218 * the same host however as this might be intended.
1186 * 1219 */
1187 **/ 1220int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1188 1221 struct batadv_bcast_packet *bcast_packet,
1189int bla_check_bcast_duplist(struct bat_priv *bat_priv, 1222 int hdr_size)
1190 struct bcast_packet *bcast_packet,
1191 int hdr_size)
1192{ 1223{
1193 int i, length, curr; 1224 int i, length, curr;
1194 uint8_t *content; 1225 uint8_t *content;
1195 uint16_t crc; 1226 uint16_t crc;
1196 struct bcast_duplist_entry *entry; 1227 struct batadv_bcast_duplist_entry *entry;
1197 1228
1198 length = hdr_size - sizeof(*bcast_packet); 1229 length = hdr_size - sizeof(*bcast_packet);
1199 content = (uint8_t *)bcast_packet; 1230 content = (uint8_t *)bcast_packet;
@@ -1202,20 +1233,21 @@ int bla_check_bcast_duplist(struct bat_priv *bat_priv,
1202 /* calculate the crc ... */ 1233 /* calculate the crc ... */
1203 crc = crc16(0, content, length); 1234 crc = crc16(0, content, length);
1204 1235
1205 for (i = 0 ; i < DUPLIST_SIZE; i++) { 1236 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1206 curr = (bat_priv->bcast_duplist_curr + i) % DUPLIST_SIZE; 1237 curr = (bat_priv->bcast_duplist_curr + i) % BATADV_DUPLIST_SIZE;
1207 entry = &bat_priv->bcast_duplist[curr]; 1238 entry = &bat_priv->bcast_duplist[curr];
1208 1239
1209 /* we can stop searching if the entry is too old ; 1240 /* we can stop searching if the entry is too old ;
1210 * later entries will be even older 1241 * later entries will be even older
1211 */ 1242 */
1212 if (has_timed_out(entry->entrytime, DUPLIST_TIMEOUT)) 1243 if (batadv_has_timed_out(entry->entrytime,
1244 BATADV_DUPLIST_TIMEOUT))
1213 break; 1245 break;
1214 1246
1215 if (entry->crc != crc) 1247 if (entry->crc != crc)
1216 continue; 1248 continue;
1217 1249
1218 if (compare_eth(entry->orig, bcast_packet->orig)) 1250 if (batadv_compare_eth(entry->orig, bcast_packet->orig))
1219 continue; 1251 continue;
1220 1252
1221 /* this entry seems to match: same crc, not too old, 1253 /* this entry seems to match: same crc, not too old,
@@ -1224,7 +1256,8 @@ int bla_check_bcast_duplist(struct bat_priv *bat_priv,
1224 return 1; 1256 return 1;
1225 } 1257 }
1226 /* not found, add a new entry (overwrite the oldest entry) */ 1258 /* not found, add a new entry (overwrite the oldest entry) */
1227 curr = (bat_priv->bcast_duplist_curr + DUPLIST_SIZE - 1) % DUPLIST_SIZE; 1259 curr = (bat_priv->bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1260 curr %= BATADV_DUPLIST_SIZE;
1228 entry = &bat_priv->bcast_duplist[curr]; 1261 entry = &bat_priv->bcast_duplist[curr];
1229 entry->crc = crc; 1262 entry->crc = crc;
1230 entry->entrytime = jiffies; 1263 entry->entrytime = jiffies;
@@ -1237,22 +1270,19 @@ int bla_check_bcast_duplist(struct bat_priv *bat_priv,
1237 1270
1238 1271
1239 1272
1240/** 1273/* @bat_priv: the bat priv with all the soft interface information
1241 * @bat_priv: the bat priv with all the soft interface information
1242 * @orig: originator mac address 1274 * @orig: originator mac address
1243 * 1275 *
1244 * check if the originator is a gateway for any VLAN ID. 1276 * check if the originator is a gateway for any VLAN ID.
1245 * 1277 *
1246 * returns 1 if it is found, 0 otherwise 1278 * returns 1 if it is found, 0 otherwise
1247 *
1248 */ 1279 */
1249 1280int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
1250int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
1251{ 1281{
1252 struct hashtable_t *hash = bat_priv->backbone_hash; 1282 struct batadv_hashtable *hash = bat_priv->backbone_hash;
1253 struct hlist_head *head; 1283 struct hlist_head *head;
1254 struct hlist_node *node; 1284 struct hlist_node *node;
1255 struct backbone_gw *backbone_gw; 1285 struct batadv_backbone_gw *backbone_gw;
1256 int i; 1286 int i;
1257 1287
1258 if (!atomic_read(&bat_priv->bridge_loop_avoidance)) 1288 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
@@ -1266,7 +1296,7 @@ int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
1266 1296
1267 rcu_read_lock(); 1297 rcu_read_lock();
1268 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 1298 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1269 if (compare_eth(backbone_gw->orig, orig)) { 1299 if (batadv_compare_eth(backbone_gw->orig, orig)) {
1270 rcu_read_unlock(); 1300 rcu_read_unlock();
1271 return 1; 1301 return 1;
1272 } 1302 }
@@ -1279,6 +1309,7 @@ int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
1279 1309
1280 1310
1281/** 1311/**
1312 * batadv_bla_is_backbone_gw
1282 * @skb: the frame to be checked 1313 * @skb: the frame to be checked
1283 * @orig_node: the orig_node of the frame 1314 * @orig_node: the orig_node of the frame
1284 * @hdr_size: maximum length of the frame 1315 * @hdr_size: maximum length of the frame
@@ -1286,14 +1317,13 @@ int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
1286 * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1 1317 * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1
1287 * if the orig_node is also a gateway on the soft interface, otherwise it 1318 * if the orig_node is also a gateway on the soft interface, otherwise it
1288 * returns 0. 1319 * returns 0.
1289 *
1290 */ 1320 */
1291int bla_is_backbone_gw(struct sk_buff *skb, 1321int batadv_bla_is_backbone_gw(struct sk_buff *skb,
1292 struct orig_node *orig_node, int hdr_size) 1322 struct batadv_orig_node *orig_node, int hdr_size)
1293{ 1323{
1294 struct ethhdr *ethhdr; 1324 struct ethhdr *ethhdr;
1295 struct vlan_ethhdr *vhdr; 1325 struct vlan_ethhdr *vhdr;
1296 struct backbone_gw *backbone_gw; 1326 struct batadv_backbone_gw *backbone_gw;
1297 short vid = -1; 1327 short vid = -1;
1298 1328
1299 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance)) 1329 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
@@ -1315,39 +1345,39 @@ int bla_is_backbone_gw(struct sk_buff *skb,
1315 } 1345 }
1316 1346
1317 /* see if this originator is a backbone gw for this VLAN */ 1347 /* see if this originator is a backbone gw for this VLAN */
1318 1348 backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
1319 backbone_gw = backbone_hash_find(orig_node->bat_priv, 1349 orig_node->orig, vid);
1320 orig_node->orig, vid);
1321 if (!backbone_gw) 1350 if (!backbone_gw)
1322 return 0; 1351 return 0;
1323 1352
1324 backbone_gw_free_ref(backbone_gw); 1353 batadv_backbone_gw_free_ref(backbone_gw);
1325 return 1; 1354 return 1;
1326} 1355}
1327 1356
1328/* free all bla structures (for softinterface free or module unload) */ 1357/* free all bla structures (for softinterface free or module unload) */
1329void bla_free(struct bat_priv *bat_priv) 1358void batadv_bla_free(struct batadv_priv *bat_priv)
1330{ 1359{
1331 struct hard_iface *primary_if; 1360 struct batadv_hard_iface *primary_if;
1332 1361
1333 cancel_delayed_work_sync(&bat_priv->bla_work); 1362 cancel_delayed_work_sync(&bat_priv->bla_work);
1334 primary_if = primary_if_get_selected(bat_priv); 1363 primary_if = batadv_primary_if_get_selected(bat_priv);
1335 1364
1336 if (bat_priv->claim_hash) { 1365 if (bat_priv->claim_hash) {
1337 bla_purge_claims(bat_priv, primary_if, 1); 1366 batadv_bla_purge_claims(bat_priv, primary_if, 1);
1338 hash_destroy(bat_priv->claim_hash); 1367 batadv_hash_destroy(bat_priv->claim_hash);
1339 bat_priv->claim_hash = NULL; 1368 bat_priv->claim_hash = NULL;
1340 } 1369 }
1341 if (bat_priv->backbone_hash) { 1370 if (bat_priv->backbone_hash) {
1342 bla_purge_backbone_gw(bat_priv, 1); 1371 batadv_bla_purge_backbone_gw(bat_priv, 1);
1343 hash_destroy(bat_priv->backbone_hash); 1372 batadv_hash_destroy(bat_priv->backbone_hash);
1344 bat_priv->backbone_hash = NULL; 1373 bat_priv->backbone_hash = NULL;
1345 } 1374 }
1346 if (primary_if) 1375 if (primary_if)
1347 hardif_free_ref(primary_if); 1376 batadv_hardif_free_ref(primary_if);
1348} 1377}
1349 1378
1350/** 1379/**
1380 * batadv_bla_rx
1351 * @bat_priv: the bat priv with all the soft interface information 1381 * @bat_priv: the bat priv with all the soft interface information
1352 * @skb: the frame to be checked 1382 * @skb: the frame to be checked
1353 * @vid: the VLAN ID of the frame 1383 * @vid: the VLAN ID of the frame
@@ -1360,19 +1390,18 @@ void bla_free(struct bat_priv *bat_priv)
1360 * in these cases, the skb is further handled by this function and 1390 * in these cases, the skb is further handled by this function and
1361 * returns 1, otherwise it returns 0 and the caller shall further 1391 * returns 1, otherwise it returns 0 and the caller shall further
1362 * process the skb. 1392 * process the skb.
1363 *
1364 */ 1393 */
1365int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid, 1394int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
1366 bool is_bcast) 1395 bool is_bcast)
1367{ 1396{
1368 struct ethhdr *ethhdr; 1397 struct ethhdr *ethhdr;
1369 struct claim search_claim, *claim = NULL; 1398 struct batadv_claim search_claim, *claim = NULL;
1370 struct hard_iface *primary_if; 1399 struct batadv_hard_iface *primary_if;
1371 int ret; 1400 int ret;
1372 1401
1373 ethhdr = (struct ethhdr *)skb_mac_header(skb); 1402 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1374 1403
1375 primary_if = primary_if_get_selected(bat_priv); 1404 primary_if = batadv_primary_if_get_selected(bat_priv);
1376 if (!primary_if) 1405 if (!primary_if)
1377 goto handled; 1406 goto handled;
1378 1407
@@ -1387,21 +1416,21 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid,
1387 1416
1388 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN); 1417 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
1389 search_claim.vid = vid; 1418 search_claim.vid = vid;
1390 claim = claim_hash_find(bat_priv, &search_claim); 1419 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1391 1420
1392 if (!claim) { 1421 if (!claim) {
1393 /* possible optimization: race for a claim */ 1422 /* possible optimization: race for a claim */
1394 /* No claim exists yet, claim it for us! 1423 /* No claim exists yet, claim it for us!
1395 */ 1424 */
1396 handle_claim(bat_priv, primary_if, 1425 batadv_handle_claim(bat_priv, primary_if,
1397 primary_if->net_dev->dev_addr, 1426 primary_if->net_dev->dev_addr,
1398 ethhdr->h_source, vid); 1427 ethhdr->h_source, vid);
1399 goto allow; 1428 goto allow;
1400 } 1429 }
1401 1430
1402 /* if it is our own claim ... */ 1431 /* if it is our own claim ... */
1403 if (compare_eth(claim->backbone_gw->orig, 1432 if (batadv_compare_eth(claim->backbone_gw->orig,
1404 primary_if->net_dev->dev_addr)) { 1433 primary_if->net_dev->dev_addr)) {
1405 /* ... allow it in any case */ 1434 /* ... allow it in any case */
1406 claim->lasttime = jiffies; 1435 claim->lasttime = jiffies;
1407 goto allow; 1436 goto allow;
@@ -1421,13 +1450,13 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid,
1421 * send a claim and update the claim table 1450 * send a claim and update the claim table
1422 * immediately. 1451 * immediately.
1423 */ 1452 */
1424 handle_claim(bat_priv, primary_if, 1453 batadv_handle_claim(bat_priv, primary_if,
1425 primary_if->net_dev->dev_addr, 1454 primary_if->net_dev->dev_addr,
1426 ethhdr->h_source, vid); 1455 ethhdr->h_source, vid);
1427 goto allow; 1456 goto allow;
1428 } 1457 }
1429allow: 1458allow:
1430 bla_update_own_backbone_gw(bat_priv, primary_if, vid); 1459 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1431 ret = 0; 1460 ret = 0;
1432 goto out; 1461 goto out;
1433 1462
@@ -1437,13 +1466,14 @@ handled:
1437 1466
1438out: 1467out:
1439 if (primary_if) 1468 if (primary_if)
1440 hardif_free_ref(primary_if); 1469 batadv_hardif_free_ref(primary_if);
1441 if (claim) 1470 if (claim)
1442 claim_free_ref(claim); 1471 batadv_claim_free_ref(claim);
1443 return ret; 1472 return ret;
1444} 1473}
1445 1474
1446/** 1475/**
1476 * batadv_bla_tx
1447 * @bat_priv: the bat priv with all the soft interface information 1477 * @bat_priv: the bat priv with all the soft interface information
1448 * @skb: the frame to be checked 1478 * @skb: the frame to be checked
1449 * @vid: the VLAN ID of the frame 1479 * @vid: the VLAN ID of the frame
@@ -1455,16 +1485,15 @@ out:
1455 * in these cases, the skb is further handled by this function and 1485 * in these cases, the skb is further handled by this function and
1456 * returns 1, otherwise it returns 0 and the caller shall further 1486 * returns 1, otherwise it returns 0 and the caller shall further
1457 * process the skb. 1487 * process the skb.
1458 *
1459 */ 1488 */
1460int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid) 1489int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
1461{ 1490{
1462 struct ethhdr *ethhdr; 1491 struct ethhdr *ethhdr;
1463 struct claim search_claim, *claim = NULL; 1492 struct batadv_claim search_claim, *claim = NULL;
1464 struct hard_iface *primary_if; 1493 struct batadv_hard_iface *primary_if;
1465 int ret = 0; 1494 int ret = 0;
1466 1495
1467 primary_if = primary_if_get_selected(bat_priv); 1496 primary_if = batadv_primary_if_get_selected(bat_priv);
1468 if (!primary_if) 1497 if (!primary_if)
1469 goto out; 1498 goto out;
1470 1499
@@ -1474,7 +1503,7 @@ int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1474 /* in VLAN case, the mac header might not be set. */ 1503 /* in VLAN case, the mac header might not be set. */
1475 skb_reset_mac_header(skb); 1504 skb_reset_mac_header(skb);
1476 1505
1477 if (bla_process_claim(bat_priv, primary_if, skb)) 1506 if (batadv_bla_process_claim(bat_priv, primary_if, skb))
1478 goto handled; 1507 goto handled;
1479 1508
1480 ethhdr = (struct ethhdr *)skb_mac_header(skb); 1509 ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -1487,21 +1516,21 @@ int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1487 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN); 1516 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
1488 search_claim.vid = vid; 1517 search_claim.vid = vid;
1489 1518
1490 claim = claim_hash_find(bat_priv, &search_claim); 1519 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1491 1520
1492 /* if no claim exists, allow it. */ 1521 /* if no claim exists, allow it. */
1493 if (!claim) 1522 if (!claim)
1494 goto allow; 1523 goto allow;
1495 1524
1496 /* check if we are responsible. */ 1525 /* check if we are responsible. */
1497 if (compare_eth(claim->backbone_gw->orig, 1526 if (batadv_compare_eth(claim->backbone_gw->orig,
1498 primary_if->net_dev->dev_addr)) { 1527 primary_if->net_dev->dev_addr)) {
1499 /* if yes, the client has roamed and we have 1528 /* if yes, the client has roamed and we have
1500 * to unclaim it. 1529 * to unclaim it.
1501 */ 1530 */
1502 handle_unclaim(bat_priv, primary_if, 1531 batadv_handle_unclaim(bat_priv, primary_if,
1503 primary_if->net_dev->dev_addr, 1532 primary_if->net_dev->dev_addr,
1504 ethhdr->h_source, vid); 1533 ethhdr->h_source, vid);
1505 goto allow; 1534 goto allow;
1506 } 1535 }
1507 1536
@@ -1518,33 +1547,34 @@ int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1518 goto allow; 1547 goto allow;
1519 } 1548 }
1520allow: 1549allow:
1521 bla_update_own_backbone_gw(bat_priv, primary_if, vid); 1550 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1522 ret = 0; 1551 ret = 0;
1523 goto out; 1552 goto out;
1524handled: 1553handled:
1525 ret = 1; 1554 ret = 1;
1526out: 1555out:
1527 if (primary_if) 1556 if (primary_if)
1528 hardif_free_ref(primary_if); 1557 batadv_hardif_free_ref(primary_if);
1529 if (claim) 1558 if (claim)
1530 claim_free_ref(claim); 1559 batadv_claim_free_ref(claim);
1531 return ret; 1560 return ret;
1532} 1561}
1533 1562
1534int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) 1563int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1535{ 1564{
1536 struct net_device *net_dev = (struct net_device *)seq->private; 1565 struct net_device *net_dev = (struct net_device *)seq->private;
1537 struct bat_priv *bat_priv = netdev_priv(net_dev); 1566 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1538 struct hashtable_t *hash = bat_priv->claim_hash; 1567 struct batadv_hashtable *hash = bat_priv->claim_hash;
1539 struct claim *claim; 1568 struct batadv_claim *claim;
1540 struct hard_iface *primary_if; 1569 struct batadv_hard_iface *primary_if;
1541 struct hlist_node *node; 1570 struct hlist_node *node;
1542 struct hlist_head *head; 1571 struct hlist_head *head;
1543 uint32_t i; 1572 uint32_t i;
1544 bool is_own; 1573 bool is_own;
1545 int ret = 0; 1574 int ret = 0;
1575 uint8_t *primary_addr;
1546 1576
1547 primary_if = primary_if_get_selected(bat_priv); 1577 primary_if = batadv_primary_if_get_selected(bat_priv);
1548 if (!primary_if) { 1578 if (!primary_if) {
1549 ret = seq_printf(seq, 1579 ret = seq_printf(seq,
1550 "BATMAN mesh %s disabled - please specify interfaces to enable it\n", 1580 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
@@ -1552,16 +1582,17 @@ int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1552 goto out; 1582 goto out;
1553 } 1583 }
1554 1584
1555 if (primary_if->if_status != IF_ACTIVE) { 1585 if (primary_if->if_status != BATADV_IF_ACTIVE) {
1556 ret = seq_printf(seq, 1586 ret = seq_printf(seq,
1557 "BATMAN mesh %s disabled - primary interface not active\n", 1587 "BATMAN mesh %s disabled - primary interface not active\n",
1558 net_dev->name); 1588 net_dev->name);
1559 goto out; 1589 goto out;
1560 } 1590 }
1561 1591
1592 primary_addr = primary_if->net_dev->dev_addr;
1562 seq_printf(seq, 1593 seq_printf(seq,
1563 "Claims announced for the mesh %s (orig %pM, group id %04x)\n", 1594 "Claims announced for the mesh %s (orig %pM, group id %04x)\n",
1564 net_dev->name, primary_if->net_dev->dev_addr, 1595 net_dev->name, primary_addr,
1565 ntohs(bat_priv->claim_dest.group)); 1596 ntohs(bat_priv->claim_dest.group));
1566 seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n", 1597 seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n",
1567 "Client", "VID", "Originator", "CRC"); 1598 "Client", "VID", "Originator", "CRC");
@@ -1570,8 +1601,8 @@ int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1570 1601
1571 rcu_read_lock(); 1602 rcu_read_lock();
1572 hlist_for_each_entry_rcu(claim, node, head, hash_entry) { 1603 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
1573 is_own = compare_eth(claim->backbone_gw->orig, 1604 is_own = batadv_compare_eth(claim->backbone_gw->orig,
1574 primary_if->net_dev->dev_addr); 1605 primary_addr);
1575 seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n", 1606 seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n",
1576 claim->addr, claim->vid, 1607 claim->addr, claim->vid,
1577 claim->backbone_gw->orig, 1608 claim->backbone_gw->orig,
@@ -1582,6 +1613,6 @@ int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1582 } 1613 }
1583out: 1614out:
1584 if (primary_if) 1615 if (primary_if)
1585 hardif_free_ref(primary_if); 1616 batadv_hardif_free_ref(primary_if);
1586 return ret; 1617 return ret;
1587} 1618}
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index dc5227b398d..563cfbf94a7 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Simon Wunderlich 3 * Simon Wunderlich
5 * 4 *
@@ -16,81 +15,84 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_BLA_H_ 20#ifndef _NET_BATMAN_ADV_BLA_H_
23#define _NET_BATMAN_ADV_BLA_H_ 21#define _NET_BATMAN_ADV_BLA_H_
24 22
25#ifdef CONFIG_BATMAN_ADV_BLA 23#ifdef CONFIG_BATMAN_ADV_BLA
26int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid, 24int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
27 bool is_bcast); 25 bool is_bcast);
28int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); 26int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid);
29int bla_is_backbone_gw(struct sk_buff *skb, 27int batadv_bla_is_backbone_gw(struct sk_buff *skb,
30 struct orig_node *orig_node, int hdr_size); 28 struct batadv_orig_node *orig_node, int hdr_size);
31int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset); 29int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
32int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig); 30int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig);
33int bla_check_bcast_duplist(struct bat_priv *bat_priv, 31int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
34 struct bcast_packet *bcast_packet, int hdr_size); 32 struct batadv_bcast_packet *bcast_packet,
35void bla_update_orig_address(struct bat_priv *bat_priv, 33 int hdr_size);
36 struct hard_iface *primary_if, 34void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
37 struct hard_iface *oldif); 35 struct batadv_hard_iface *primary_if,
38int bla_init(struct bat_priv *bat_priv); 36 struct batadv_hard_iface *oldif);
39void bla_free(struct bat_priv *bat_priv); 37int batadv_bla_init(struct batadv_priv *bat_priv);
38void batadv_bla_free(struct batadv_priv *bat_priv);
40 39
41#define BLA_CRC_INIT 0 40#define BATADV_BLA_CRC_INIT 0
42#else /* ifdef CONFIG_BATMAN_ADV_BLA */ 41#else /* ifdef CONFIG_BATMAN_ADV_BLA */
43 42
44static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, 43static inline int batadv_bla_rx(struct batadv_priv *bat_priv,
45 short vid, bool is_bcast) 44 struct sk_buff *skb, short vid,
45 bool is_bcast)
46{ 46{
47 return 0; 47 return 0;
48} 48}
49 49
50static inline int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, 50static inline int batadv_bla_tx(struct batadv_priv *bat_priv,
51 short vid) 51 struct sk_buff *skb, short vid)
52{ 52{
53 return 0; 53 return 0;
54} 54}
55 55
56static inline int bla_is_backbone_gw(struct sk_buff *skb, 56static inline int batadv_bla_is_backbone_gw(struct sk_buff *skb,
57 struct orig_node *orig_node, 57 struct batadv_orig_node *orig_node,
58 int hdr_size) 58 int hdr_size)
59{ 59{
60 return 0; 60 return 0;
61} 61}
62 62
63static inline int bla_claim_table_seq_print_text(struct seq_file *seq, 63static inline int batadv_bla_claim_table_seq_print_text(struct seq_file *seq,
64 void *offset) 64 void *offset)
65{ 65{
66 return 0; 66 return 0;
67} 67}
68 68
69static inline int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, 69static inline int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
70 uint8_t *orig) 70 uint8_t *orig)
71{ 71{
72 return 0; 72 return 0;
73} 73}
74 74
75static inline int bla_check_bcast_duplist(struct bat_priv *bat_priv, 75static inline int
76 struct bcast_packet *bcast_packet, 76batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
77 int hdr_size) 77 struct batadv_bcast_packet *bcast_packet,
78 int hdr_size)
78{ 79{
79 return 0; 80 return 0;
80} 81}
81 82
82static inline void bla_update_orig_address(struct bat_priv *bat_priv, 83static inline void
83 struct hard_iface *primary_if, 84batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
84 struct hard_iface *oldif) 85 struct batadv_hard_iface *primary_if,
86 struct batadv_hard_iface *oldif)
85{ 87{
86} 88}
87 89
88static inline int bla_init(struct bat_priv *bat_priv) 90static inline int batadv_bla_init(struct batadv_priv *bat_priv)
89{ 91{
90 return 1; 92 return 1;
91} 93}
92 94
93static inline void bla_free(struct bat_priv *bat_priv) 95static inline void batadv_bla_free(struct batadv_priv *bat_priv)
94{ 96{
95} 97}
96 98
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
new file mode 100644
index 00000000000..34fbb1667bc
--- /dev/null
+++ b/net/batman-adv/debugfs.c
@@ -0,0 +1,409 @@
1/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
18 */
19
20#include "main.h"
21
22#include <linux/debugfs.h>
23
24#include "debugfs.h"
25#include "translation-table.h"
26#include "originator.h"
27#include "hard-interface.h"
28#include "gateway_common.h"
29#include "gateway_client.h"
30#include "soft-interface.h"
31#include "vis.h"
32#include "icmp_socket.h"
33#include "bridge_loop_avoidance.h"
34
35static struct dentry *batadv_debugfs;
36
37#ifdef CONFIG_BATMAN_ADV_DEBUG
38#define BATADV_LOG_BUFF_MASK (batadv_log_buff_len - 1)
39
40static const int batadv_log_buff_len = BATADV_LOG_BUF_LEN;
41
42static char *batadv_log_char_addr(struct batadv_debug_log *debug_log,
43 size_t idx)
44{
45 return &debug_log->log_buff[idx & BATADV_LOG_BUFF_MASK];
46}
47
48static void batadv_emit_log_char(struct batadv_debug_log *debug_log, char c)
49{
50 char *char_addr;
51
52 char_addr = batadv_log_char_addr(debug_log, debug_log->log_end);
53 *char_addr = c;
54 debug_log->log_end++;
55
56 if (debug_log->log_end - debug_log->log_start > batadv_log_buff_len)
57 debug_log->log_start = debug_log->log_end - batadv_log_buff_len;
58}
59
60__printf(2, 3)
61static int batadv_fdebug_log(struct batadv_debug_log *debug_log,
62 const char *fmt, ...)
63{
64 va_list args;
65 static char debug_log_buf[256];
66 char *p;
67
68 if (!debug_log)
69 return 0;
70
71 spin_lock_bh(&debug_log->lock);
72 va_start(args, fmt);
73 vscnprintf(debug_log_buf, sizeof(debug_log_buf), fmt, args);
74 va_end(args);
75
76 for (p = debug_log_buf; *p != 0; p++)
77 batadv_emit_log_char(debug_log, *p);
78
79 spin_unlock_bh(&debug_log->lock);
80
81 wake_up(&debug_log->queue_wait);
82
83 return 0;
84}
85
86int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
87{
88 va_list args;
89 char tmp_log_buf[256];
90
91 va_start(args, fmt);
92 vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args);
93 batadv_fdebug_log(bat_priv->debug_log, "[%10u] %s",
94 jiffies_to_msecs(jiffies), tmp_log_buf);
95 va_end(args);
96
97 return 0;
98}
99
100static int batadv_log_open(struct inode *inode, struct file *file)
101{
102 nonseekable_open(inode, file);
103 file->private_data = inode->i_private;
104 batadv_inc_module_count();
105 return 0;
106}
107
108static int batadv_log_release(struct inode *inode, struct file *file)
109{
110 batadv_dec_module_count();
111 return 0;
112}
113
114static int batadv_log_empty(struct batadv_debug_log *debug_log)
115{
116 return !(debug_log->log_start - debug_log->log_end);
117}
118
119static ssize_t batadv_log_read(struct file *file, char __user *buf,
120 size_t count, loff_t *ppos)
121{
122 struct batadv_priv *bat_priv = file->private_data;
123 struct batadv_debug_log *debug_log = bat_priv->debug_log;
124 int error, i = 0;
125 char *char_addr;
126 char c;
127
128 if ((file->f_flags & O_NONBLOCK) && batadv_log_empty(debug_log))
129 return -EAGAIN;
130
131 if (!buf)
132 return -EINVAL;
133
134 if (count == 0)
135 return 0;
136
137 if (!access_ok(VERIFY_WRITE, buf, count))
138 return -EFAULT;
139
140 error = wait_event_interruptible(debug_log->queue_wait,
141 (!batadv_log_empty(debug_log)));
142
143 if (error)
144 return error;
145
146 spin_lock_bh(&debug_log->lock);
147
148 while ((!error) && (i < count) &&
149 (debug_log->log_start != debug_log->log_end)) {
150 char_addr = batadv_log_char_addr(debug_log,
151 debug_log->log_start);
152 c = *char_addr;
153
154 debug_log->log_start++;
155
156 spin_unlock_bh(&debug_log->lock);
157
158 error = __put_user(c, buf);
159
160 spin_lock_bh(&debug_log->lock);
161
162 buf++;
163 i++;
164
165 }
166
167 spin_unlock_bh(&debug_log->lock);
168
169 if (!error)
170 return i;
171
172 return error;
173}
174
175static unsigned int batadv_log_poll(struct file *file, poll_table *wait)
176{
177 struct batadv_priv *bat_priv = file->private_data;
178 struct batadv_debug_log *debug_log = bat_priv->debug_log;
179
180 poll_wait(file, &debug_log->queue_wait, wait);
181
182 if (!batadv_log_empty(debug_log))
183 return POLLIN | POLLRDNORM;
184
185 return 0;
186}
187
188static const struct file_operations batadv_log_fops = {
189 .open = batadv_log_open,
190 .release = batadv_log_release,
191 .read = batadv_log_read,
192 .poll = batadv_log_poll,
193 .llseek = no_llseek,
194};
195
196static int batadv_debug_log_setup(struct batadv_priv *bat_priv)
197{
198 struct dentry *d;
199
200 if (!bat_priv->debug_dir)
201 goto err;
202
203 bat_priv->debug_log = kzalloc(sizeof(*bat_priv->debug_log), GFP_ATOMIC);
204 if (!bat_priv->debug_log)
205 goto err;
206
207 spin_lock_init(&bat_priv->debug_log->lock);
208 init_waitqueue_head(&bat_priv->debug_log->queue_wait);
209
210 d = debugfs_create_file("log", S_IFREG | S_IRUSR,
211 bat_priv->debug_dir, bat_priv,
212 &batadv_log_fops);
213 if (!d)
214 goto err;
215
216 return 0;
217
218err:
219 return -ENOMEM;
220}
221
222static void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
223{
224 kfree(bat_priv->debug_log);
225 bat_priv->debug_log = NULL;
226}
227#else /* CONFIG_BATMAN_ADV_DEBUG */
228static int batadv_debug_log_setup(struct batadv_priv *bat_priv)
229{
230 bat_priv->debug_log = NULL;
231 return 0;
232}
233
234static void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
235{
236 return;
237}
238#endif
239
240static int batadv_algorithms_open(struct inode *inode, struct file *file)
241{
242 return single_open(file, batadv_algo_seq_print_text, NULL);
243}
244
245static int batadv_originators_open(struct inode *inode, struct file *file)
246{
247 struct net_device *net_dev = (struct net_device *)inode->i_private;
248 return single_open(file, batadv_orig_seq_print_text, net_dev);
249}
250
251static int batadv_gateways_open(struct inode *inode, struct file *file)
252{
253 struct net_device *net_dev = (struct net_device *)inode->i_private;
254 return single_open(file, batadv_gw_client_seq_print_text, net_dev);
255}
256
257static int batadv_transtable_global_open(struct inode *inode, struct file *file)
258{
259 struct net_device *net_dev = (struct net_device *)inode->i_private;
260 return single_open(file, batadv_tt_global_seq_print_text, net_dev);
261}
262
263#ifdef CONFIG_BATMAN_ADV_BLA
264static int batadv_bla_claim_table_open(struct inode *inode, struct file *file)
265{
266 struct net_device *net_dev = (struct net_device *)inode->i_private;
267 return single_open(file, batadv_bla_claim_table_seq_print_text,
268 net_dev);
269}
270#endif
271
272static int batadv_transtable_local_open(struct inode *inode, struct file *file)
273{
274 struct net_device *net_dev = (struct net_device *)inode->i_private;
275 return single_open(file, batadv_tt_local_seq_print_text, net_dev);
276}
277
278static int batadv_vis_data_open(struct inode *inode, struct file *file)
279{
280 struct net_device *net_dev = (struct net_device *)inode->i_private;
281 return single_open(file, batadv_vis_seq_print_text, net_dev);
282}
283
284struct batadv_debuginfo {
285 struct attribute attr;
286 const struct file_operations fops;
287};
288
289#define BATADV_DEBUGINFO(_name, _mode, _open) \
290struct batadv_debuginfo batadv_debuginfo_##_name = { \
291 .attr = { .name = __stringify(_name), \
292 .mode = _mode, }, \
293 .fops = { .owner = THIS_MODULE, \
294 .open = _open, \
295 .read = seq_read, \
296 .llseek = seq_lseek, \
297 .release = single_release, \
298 } \
299};
300
301static BATADV_DEBUGINFO(routing_algos, S_IRUGO, batadv_algorithms_open);
302static BATADV_DEBUGINFO(originators, S_IRUGO, batadv_originators_open);
303static BATADV_DEBUGINFO(gateways, S_IRUGO, batadv_gateways_open);
304static BATADV_DEBUGINFO(transtable_global, S_IRUGO,
305 batadv_transtable_global_open);
306#ifdef CONFIG_BATMAN_ADV_BLA
307static BATADV_DEBUGINFO(bla_claim_table, S_IRUGO, batadv_bla_claim_table_open);
308#endif
309static BATADV_DEBUGINFO(transtable_local, S_IRUGO,
310 batadv_transtable_local_open);
311static BATADV_DEBUGINFO(vis_data, S_IRUGO, batadv_vis_data_open);
312
313static struct batadv_debuginfo *batadv_mesh_debuginfos[] = {
314 &batadv_debuginfo_originators,
315 &batadv_debuginfo_gateways,
316 &batadv_debuginfo_transtable_global,
317#ifdef CONFIG_BATMAN_ADV_BLA
318 &batadv_debuginfo_bla_claim_table,
319#endif
320 &batadv_debuginfo_transtable_local,
321 &batadv_debuginfo_vis_data,
322 NULL,
323};
324
325void batadv_debugfs_init(void)
326{
327 struct batadv_debuginfo *bat_debug;
328 struct dentry *file;
329
330 batadv_debugfs = debugfs_create_dir(BATADV_DEBUGFS_SUBDIR, NULL);
331 if (batadv_debugfs == ERR_PTR(-ENODEV))
332 batadv_debugfs = NULL;
333
334 if (!batadv_debugfs)
335 goto out;
336
337 bat_debug = &batadv_debuginfo_routing_algos;
338 file = debugfs_create_file(bat_debug->attr.name,
339 S_IFREG | bat_debug->attr.mode,
340 batadv_debugfs, NULL, &bat_debug->fops);
341 if (!file)
342 pr_err("Can't add debugfs file: %s\n", bat_debug->attr.name);
343
344out:
345 return;
346}
347
348void batadv_debugfs_destroy(void)
349{
350 if (batadv_debugfs) {
351 debugfs_remove_recursive(batadv_debugfs);
352 batadv_debugfs = NULL;
353 }
354}
355
356int batadv_debugfs_add_meshif(struct net_device *dev)
357{
358 struct batadv_priv *bat_priv = netdev_priv(dev);
359 struct batadv_debuginfo **bat_debug;
360 struct dentry *file;
361
362 if (!batadv_debugfs)
363 goto out;
364
365 bat_priv->debug_dir = debugfs_create_dir(dev->name, batadv_debugfs);
366 if (!bat_priv->debug_dir)
367 goto out;
368
369 if (batadv_socket_setup(bat_priv) < 0)
370 goto rem_attr;
371
372 if (batadv_debug_log_setup(bat_priv) < 0)
373 goto rem_attr;
374
375 for (bat_debug = batadv_mesh_debuginfos; *bat_debug; ++bat_debug) {
376 file = debugfs_create_file(((*bat_debug)->attr).name,
377 S_IFREG | ((*bat_debug)->attr).mode,
378 bat_priv->debug_dir,
379 dev, &(*bat_debug)->fops);
380 if (!file) {
381 batadv_err(dev, "Can't add debugfs file: %s/%s\n",
382 dev->name, ((*bat_debug)->attr).name);
383 goto rem_attr;
384 }
385 }
386
387 return 0;
388rem_attr:
389 debugfs_remove_recursive(bat_priv->debug_dir);
390 bat_priv->debug_dir = NULL;
391out:
392#ifdef CONFIG_DEBUG_FS
393 return -ENOMEM;
394#else
395 return 0;
396#endif /* CONFIG_DEBUG_FS */
397}
398
399void batadv_debugfs_del_meshif(struct net_device *dev)
400{
401 struct batadv_priv *bat_priv = netdev_priv(dev);
402
403 batadv_debug_log_cleanup(bat_priv);
404
405 if (batadv_debugfs) {
406 debugfs_remove_recursive(bat_priv->debug_dir);
407 bat_priv->debug_dir = NULL;
408 }
409}
diff --git a/net/batman-adv/bat_debugfs.h b/net/batman-adv/debugfs.h
index d605c674642..3319e1f21f5 100644
--- a/net/batman-adv/bat_debugfs.h
+++ b/net/batman-adv/debugfs.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,18 +15,16 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22
23#ifndef _NET_BATMAN_ADV_DEBUGFS_H_ 20#ifndef _NET_BATMAN_ADV_DEBUGFS_H_
24#define _NET_BATMAN_ADV_DEBUGFS_H_ 21#define _NET_BATMAN_ADV_DEBUGFS_H_
25 22
26#define DEBUGFS_BAT_SUBDIR "batman_adv" 23#define BATADV_DEBUGFS_SUBDIR "batman_adv"
27 24
28void debugfs_init(void); 25void batadv_debugfs_init(void);
29void debugfs_destroy(void); 26void batadv_debugfs_destroy(void);
30int debugfs_add_meshif(struct net_device *dev); 27int batadv_debugfs_add_meshif(struct net_device *dev);
31void debugfs_del_meshif(struct net_device *dev); 28void batadv_debugfs_del_meshif(struct net_device *dev);
32 29
33#endif /* _NET_BATMAN_ADV_DEBUGFS_H_ */ 30#endif /* _NET_BATMAN_ADV_DEBUGFS_H_ */
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 47f7186dcef..b421cc49d2c 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,11 +15,10 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
23#include "bat_sysfs.h" 21#include "sysfs.h"
24#include "gateway_client.h" 22#include "gateway_client.h"
25#include "gateway_common.h" 23#include "gateway_common.h"
26#include "hard-interface.h" 24#include "hard-interface.h"
@@ -33,19 +31,21 @@
33#include <linux/if_vlan.h> 31#include <linux/if_vlan.h>
34 32
35/* This is the offset of the options field in a dhcp packet starting at 33/* This is the offset of the options field in a dhcp packet starting at
36 * the beginning of the dhcp header */ 34 * the beginning of the dhcp header
37#define DHCP_OPTIONS_OFFSET 240 35 */
38#define DHCP_REQUEST 3 36#define BATADV_DHCP_OPTIONS_OFFSET 240
37#define BATADV_DHCP_REQUEST 3
39 38
40static void gw_node_free_ref(struct gw_node *gw_node) 39static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node)
41{ 40{
42 if (atomic_dec_and_test(&gw_node->refcount)) 41 if (atomic_dec_and_test(&gw_node->refcount))
43 kfree_rcu(gw_node, rcu); 42 kfree_rcu(gw_node, rcu);
44} 43}
45 44
46static struct gw_node *gw_get_selected_gw_node(struct bat_priv *bat_priv) 45static struct batadv_gw_node *
46batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
47{ 47{
48 struct gw_node *gw_node; 48 struct batadv_gw_node *gw_node;
49 49
50 rcu_read_lock(); 50 rcu_read_lock();
51 gw_node = rcu_dereference(bat_priv->curr_gw); 51 gw_node = rcu_dereference(bat_priv->curr_gw);
@@ -60,12 +60,13 @@ out:
60 return gw_node; 60 return gw_node;
61} 61}
62 62
63struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv) 63struct batadv_orig_node *
64batadv_gw_get_selected_orig(struct batadv_priv *bat_priv)
64{ 65{
65 struct gw_node *gw_node; 66 struct batadv_gw_node *gw_node;
66 struct orig_node *orig_node = NULL; 67 struct batadv_orig_node *orig_node = NULL;
67 68
68 gw_node = gw_get_selected_gw_node(bat_priv); 69 gw_node = batadv_gw_get_selected_gw_node(bat_priv);
69 if (!gw_node) 70 if (!gw_node)
70 goto out; 71 goto out;
71 72
@@ -81,13 +82,14 @@ unlock:
81 rcu_read_unlock(); 82 rcu_read_unlock();
82out: 83out:
83 if (gw_node) 84 if (gw_node)
84 gw_node_free_ref(gw_node); 85 batadv_gw_node_free_ref(gw_node);
85 return orig_node; 86 return orig_node;
86} 87}
87 88
88static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node) 89static void batadv_gw_select(struct batadv_priv *bat_priv,
90 struct batadv_gw_node *new_gw_node)
89{ 91{
90 struct gw_node *curr_gw_node; 92 struct batadv_gw_node *curr_gw_node;
91 93
92 spin_lock_bh(&bat_priv->gw_list_lock); 94 spin_lock_bh(&bat_priv->gw_list_lock);
93 95
@@ -98,31 +100,34 @@ static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
98 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node); 100 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
99 101
100 if (curr_gw_node) 102 if (curr_gw_node)
101 gw_node_free_ref(curr_gw_node); 103 batadv_gw_node_free_ref(curr_gw_node);
102 104
103 spin_unlock_bh(&bat_priv->gw_list_lock); 105 spin_unlock_bh(&bat_priv->gw_list_lock);
104} 106}
105 107
106void gw_deselect(struct bat_priv *bat_priv) 108void batadv_gw_deselect(struct batadv_priv *bat_priv)
107{ 109{
108 atomic_set(&bat_priv->gw_reselect, 1); 110 atomic_set(&bat_priv->gw_reselect, 1);
109} 111}
110 112
111static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv) 113static struct batadv_gw_node *
114batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
112{ 115{
113 struct neigh_node *router; 116 struct batadv_neigh_node *router;
114 struct hlist_node *node; 117 struct hlist_node *node;
115 struct gw_node *gw_node, *curr_gw = NULL; 118 struct batadv_gw_node *gw_node, *curr_gw = NULL;
116 uint32_t max_gw_factor = 0, tmp_gw_factor = 0; 119 uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
117 uint8_t max_tq = 0; 120 uint8_t max_tq = 0;
118 int down, up; 121 int down, up;
122 struct batadv_orig_node *orig_node;
119 123
120 rcu_read_lock(); 124 rcu_read_lock();
121 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 125 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
122 if (gw_node->deleted) 126 if (gw_node->deleted)
123 continue; 127 continue;
124 128
125 router = orig_node_get_router(gw_node->orig_node); 129 orig_node = gw_node->orig_node;
130 router = batadv_orig_node_get_router(orig_node);
126 if (!router) 131 if (!router)
127 continue; 132 continue;
128 133
@@ -131,35 +136,34 @@ static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv)
131 136
132 switch (atomic_read(&bat_priv->gw_sel_class)) { 137 switch (atomic_read(&bat_priv->gw_sel_class)) {
133 case 1: /* fast connection */ 138 case 1: /* fast connection */
134 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, 139 batadv_gw_bandwidth_to_kbit(orig_node->gw_flags,
135 &down, &up); 140 &down, &up);
136 141
137 tmp_gw_factor = (router->tq_avg * router->tq_avg * 142 tmp_gw_factor = (router->tq_avg * router->tq_avg *
138 down * 100 * 100) / 143 down * 100 * 100) /
139 (TQ_LOCAL_WINDOW_SIZE * 144 (BATADV_TQ_LOCAL_WINDOW_SIZE *
140 TQ_LOCAL_WINDOW_SIZE * 64); 145 BATADV_TQ_LOCAL_WINDOW_SIZE * 64);
141 146
142 if ((tmp_gw_factor > max_gw_factor) || 147 if ((tmp_gw_factor > max_gw_factor) ||
143 ((tmp_gw_factor == max_gw_factor) && 148 ((tmp_gw_factor == max_gw_factor) &&
144 (router->tq_avg > max_tq))) { 149 (router->tq_avg > max_tq))) {
145 if (curr_gw) 150 if (curr_gw)
146 gw_node_free_ref(curr_gw); 151 batadv_gw_node_free_ref(curr_gw);
147 curr_gw = gw_node; 152 curr_gw = gw_node;
148 atomic_inc(&curr_gw->refcount); 153 atomic_inc(&curr_gw->refcount);
149 } 154 }
150 break; 155 break;
151 156
152 default: /** 157 default: /* 2: stable connection (use best statistic)
153 * 2: stable connection (use best statistic)
154 * 3: fast-switch (use best statistic but change as 158 * 3: fast-switch (use best statistic but change as
155 * soon as a better gateway appears) 159 * soon as a better gateway appears)
156 * XX: late-switch (use best statistic but change as 160 * XX: late-switch (use best statistic but change as
157 * soon as a better gateway appears which has 161 * soon as a better gateway appears which has
158 * $routing_class more tq points) 162 * $routing_class more tq points)
159 **/ 163 */
160 if (router->tq_avg > max_tq) { 164 if (router->tq_avg > max_tq) {
161 if (curr_gw) 165 if (curr_gw)
162 gw_node_free_ref(curr_gw); 166 batadv_gw_node_free_ref(curr_gw);
163 curr_gw = gw_node; 167 curr_gw = gw_node;
164 atomic_inc(&curr_gw->refcount); 168 atomic_inc(&curr_gw->refcount);
165 } 169 }
@@ -172,37 +176,36 @@ static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv)
172 if (tmp_gw_factor > max_gw_factor) 176 if (tmp_gw_factor > max_gw_factor)
173 max_gw_factor = tmp_gw_factor; 177 max_gw_factor = tmp_gw_factor;
174 178
175 gw_node_free_ref(gw_node); 179 batadv_gw_node_free_ref(gw_node);
176 180
177next: 181next:
178 neigh_node_free_ref(router); 182 batadv_neigh_node_free_ref(router);
179 } 183 }
180 rcu_read_unlock(); 184 rcu_read_unlock();
181 185
182 return curr_gw; 186 return curr_gw;
183} 187}
184 188
185void gw_election(struct bat_priv *bat_priv) 189void batadv_gw_election(struct batadv_priv *bat_priv)
186{ 190{
187 struct gw_node *curr_gw = NULL, *next_gw = NULL; 191 struct batadv_gw_node *curr_gw = NULL, *next_gw = NULL;
188 struct neigh_node *router = NULL; 192 struct batadv_neigh_node *router = NULL;
189 char gw_addr[18] = { '\0' }; 193 char gw_addr[18] = { '\0' };
190 194
191 /** 195 /* The batman daemon checks here if we already passed a full originator
192 * The batman daemon checks here if we already passed a full originator
193 * cycle in order to make sure we don't choose the first gateway we 196 * cycle in order to make sure we don't choose the first gateway we
194 * hear about. This check is based on the daemon's uptime which we 197 * hear about. This check is based on the daemon's uptime which we
195 * don't have. 198 * don't have.
196 **/ 199 */
197 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT) 200 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT)
198 goto out; 201 goto out;
199 202
200 if (!atomic_dec_not_zero(&bat_priv->gw_reselect)) 203 if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect))
201 goto out; 204 goto out;
202 205
203 curr_gw = gw_get_selected_gw_node(bat_priv); 206 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
204 207
205 next_gw = gw_get_best_gw_node(bat_priv); 208 next_gw = batadv_gw_get_best_gw_node(bat_priv);
206 209
207 if (curr_gw == next_gw) 210 if (curr_gw == next_gw)
208 goto out; 211 goto out;
@@ -210,53 +213,57 @@ void gw_election(struct bat_priv *bat_priv)
210 if (next_gw) { 213 if (next_gw) {
211 sprintf(gw_addr, "%pM", next_gw->orig_node->orig); 214 sprintf(gw_addr, "%pM", next_gw->orig_node->orig);
212 215
213 router = orig_node_get_router(next_gw->orig_node); 216 router = batadv_orig_node_get_router(next_gw->orig_node);
214 if (!router) { 217 if (!router) {
215 gw_deselect(bat_priv); 218 batadv_gw_deselect(bat_priv);
216 goto out; 219 goto out;
217 } 220 }
218 } 221 }
219 222
220 if ((curr_gw) && (!next_gw)) { 223 if ((curr_gw) && (!next_gw)) {
221 bat_dbg(DBG_BATMAN, bat_priv, 224 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
222 "Removing selected gateway - no gateway in range\n"); 225 "Removing selected gateway - no gateway in range\n");
223 throw_uevent(bat_priv, UEV_GW, UEV_DEL, NULL); 226 batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL,
227 NULL);
224 } else if ((!curr_gw) && (next_gw)) { 228 } else if ((!curr_gw) && (next_gw)) {
225 bat_dbg(DBG_BATMAN, bat_priv, 229 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
226 "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n", 230 "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n",
227 next_gw->orig_node->orig, next_gw->orig_node->gw_flags, 231 next_gw->orig_node->orig,
228 router->tq_avg); 232 next_gw->orig_node->gw_flags, router->tq_avg);
229 throw_uevent(bat_priv, UEV_GW, UEV_ADD, gw_addr); 233 batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_ADD,
234 gw_addr);
230 } else { 235 } else {
231 bat_dbg(DBG_BATMAN, bat_priv, 236 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
232 "Changing route to gateway %pM (gw_flags: %i, tq: %i)\n", 237 "Changing route to gateway %pM (gw_flags: %i, tq: %i)\n",
233 next_gw->orig_node->orig, next_gw->orig_node->gw_flags, 238 next_gw->orig_node->orig,
234 router->tq_avg); 239 next_gw->orig_node->gw_flags, router->tq_avg);
235 throw_uevent(bat_priv, UEV_GW, UEV_CHANGE, gw_addr); 240 batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_CHANGE,
241 gw_addr);
236 } 242 }
237 243
238 gw_select(bat_priv, next_gw); 244 batadv_gw_select(bat_priv, next_gw);
239 245
240out: 246out:
241 if (curr_gw) 247 if (curr_gw)
242 gw_node_free_ref(curr_gw); 248 batadv_gw_node_free_ref(curr_gw);
243 if (next_gw) 249 if (next_gw)
244 gw_node_free_ref(next_gw); 250 batadv_gw_node_free_ref(next_gw);
245 if (router) 251 if (router)
246 neigh_node_free_ref(router); 252 batadv_neigh_node_free_ref(router);
247} 253}
248 254
249void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node) 255void batadv_gw_check_election(struct batadv_priv *bat_priv,
256 struct batadv_orig_node *orig_node)
250{ 257{
251 struct orig_node *curr_gw_orig; 258 struct batadv_orig_node *curr_gw_orig;
252 struct neigh_node *router_gw = NULL, *router_orig = NULL; 259 struct batadv_neigh_node *router_gw = NULL, *router_orig = NULL;
253 uint8_t gw_tq_avg, orig_tq_avg; 260 uint8_t gw_tq_avg, orig_tq_avg;
254 261
255 curr_gw_orig = gw_get_selected_orig(bat_priv); 262 curr_gw_orig = batadv_gw_get_selected_orig(bat_priv);
256 if (!curr_gw_orig) 263 if (!curr_gw_orig)
257 goto deselect; 264 goto deselect;
258 265
259 router_gw = orig_node_get_router(curr_gw_orig); 266 router_gw = batadv_orig_node_get_router(curr_gw_orig);
260 if (!router_gw) 267 if (!router_gw)
261 goto deselect; 268 goto deselect;
262 269
@@ -264,7 +271,7 @@ void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
264 if (curr_gw_orig == orig_node) 271 if (curr_gw_orig == orig_node)
265 goto out; 272 goto out;
266 273
267 router_orig = orig_node_get_router(orig_node); 274 router_orig = batadv_orig_node_get_router(orig_node);
268 if (!router_orig) 275 if (!router_orig)
269 goto out; 276 goto out;
270 277
@@ -275,35 +282,35 @@ void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
275 if (orig_tq_avg < gw_tq_avg) 282 if (orig_tq_avg < gw_tq_avg)
276 goto out; 283 goto out;
277 284
278 /** 285 /* if the routing class is greater than 3 the value tells us how much
279 * if the routing class is greater than 3 the value tells us how much
280 * greater the TQ value of the new gateway must be 286 * greater the TQ value of the new gateway must be
281 **/ 287 */
282 if ((atomic_read(&bat_priv->gw_sel_class) > 3) && 288 if ((atomic_read(&bat_priv->gw_sel_class) > 3) &&
283 (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class))) 289 (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class)))
284 goto out; 290 goto out;
285 291
286 bat_dbg(DBG_BATMAN, bat_priv, 292 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
287 "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n", 293 "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n",
288 gw_tq_avg, orig_tq_avg); 294 gw_tq_avg, orig_tq_avg);
289 295
290deselect: 296deselect:
291 gw_deselect(bat_priv); 297 batadv_gw_deselect(bat_priv);
292out: 298out:
293 if (curr_gw_orig) 299 if (curr_gw_orig)
294 orig_node_free_ref(curr_gw_orig); 300 batadv_orig_node_free_ref(curr_gw_orig);
295 if (router_gw) 301 if (router_gw)
296 neigh_node_free_ref(router_gw); 302 batadv_neigh_node_free_ref(router_gw);
297 if (router_orig) 303 if (router_orig)
298 neigh_node_free_ref(router_orig); 304 batadv_neigh_node_free_ref(router_orig);
299 305
300 return; 306 return;
301} 307}
302 308
303static void gw_node_add(struct bat_priv *bat_priv, 309static void batadv_gw_node_add(struct batadv_priv *bat_priv,
304 struct orig_node *orig_node, uint8_t new_gwflags) 310 struct batadv_orig_node *orig_node,
311 uint8_t new_gwflags)
305{ 312{
306 struct gw_node *gw_node; 313 struct batadv_gw_node *gw_node;
307 int down, up; 314 int down, up;
308 315
309 gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC); 316 gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
@@ -318,47 +325,47 @@ static void gw_node_add(struct bat_priv *bat_priv,
318 hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list); 325 hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list);
319 spin_unlock_bh(&bat_priv->gw_list_lock); 326 spin_unlock_bh(&bat_priv->gw_list_lock);
320 327
321 gw_bandwidth_to_kbit(new_gwflags, &down, &up); 328 batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up);
322 bat_dbg(DBG_BATMAN, bat_priv, 329 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
323 "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n", 330 "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n",
324 orig_node->orig, new_gwflags, 331 orig_node->orig, new_gwflags,
325 (down > 2048 ? down / 1024 : down), 332 (down > 2048 ? down / 1024 : down),
326 (down > 2048 ? "MBit" : "KBit"), 333 (down > 2048 ? "MBit" : "KBit"),
327 (up > 2048 ? up / 1024 : up), 334 (up > 2048 ? up / 1024 : up),
328 (up > 2048 ? "MBit" : "KBit")); 335 (up > 2048 ? "MBit" : "KBit"));
329} 336}
330 337
331void gw_node_update(struct bat_priv *bat_priv, 338void batadv_gw_node_update(struct batadv_priv *bat_priv,
332 struct orig_node *orig_node, uint8_t new_gwflags) 339 struct batadv_orig_node *orig_node,
340 uint8_t new_gwflags)
333{ 341{
334 struct hlist_node *node; 342 struct hlist_node *node;
335 struct gw_node *gw_node, *curr_gw; 343 struct batadv_gw_node *gw_node, *curr_gw;
336 344
337 /** 345 /* Note: We don't need a NULL check here, since curr_gw never gets
338 * Note: We don't need a NULL check here, since curr_gw never gets
339 * dereferenced. If curr_gw is NULL we also should not exit as we may 346 * dereferenced. If curr_gw is NULL we also should not exit as we may
340 * have this gateway in our list (duplication check!) even though we 347 * have this gateway in our list (duplication check!) even though we
341 * have no currently selected gateway. 348 * have no currently selected gateway.
342 */ 349 */
343 curr_gw = gw_get_selected_gw_node(bat_priv); 350 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
344 351
345 rcu_read_lock(); 352 rcu_read_lock();
346 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 353 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
347 if (gw_node->orig_node != orig_node) 354 if (gw_node->orig_node != orig_node)
348 continue; 355 continue;
349 356
350 bat_dbg(DBG_BATMAN, bat_priv, 357 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
351 "Gateway class of originator %pM changed from %i to %i\n", 358 "Gateway class of originator %pM changed from %i to %i\n",
352 orig_node->orig, gw_node->orig_node->gw_flags, 359 orig_node->orig, gw_node->orig_node->gw_flags,
353 new_gwflags); 360 new_gwflags);
354 361
355 gw_node->deleted = 0; 362 gw_node->deleted = 0;
356 363
357 if (new_gwflags == NO_FLAGS) { 364 if (new_gwflags == BATADV_NO_FLAGS) {
358 gw_node->deleted = jiffies; 365 gw_node->deleted = jiffies;
359 bat_dbg(DBG_BATMAN, bat_priv, 366 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
360 "Gateway %pM removed from gateway list\n", 367 "Gateway %pM removed from gateway list\n",
361 orig_node->orig); 368 orig_node->orig);
362 369
363 if (gw_node == curr_gw) 370 if (gw_node == curr_gw)
364 goto deselect; 371 goto deselect;
@@ -367,34 +374,35 @@ void gw_node_update(struct bat_priv *bat_priv,
367 goto unlock; 374 goto unlock;
368 } 375 }
369 376
370 if (new_gwflags == NO_FLAGS) 377 if (new_gwflags == BATADV_NO_FLAGS)
371 goto unlock; 378 goto unlock;
372 379
373 gw_node_add(bat_priv, orig_node, new_gwflags); 380 batadv_gw_node_add(bat_priv, orig_node, new_gwflags);
374 goto unlock; 381 goto unlock;
375 382
376deselect: 383deselect:
377 gw_deselect(bat_priv); 384 batadv_gw_deselect(bat_priv);
378unlock: 385unlock:
379 rcu_read_unlock(); 386 rcu_read_unlock();
380 387
381 if (curr_gw) 388 if (curr_gw)
382 gw_node_free_ref(curr_gw); 389 batadv_gw_node_free_ref(curr_gw);
383} 390}
384 391
385void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node) 392void batadv_gw_node_delete(struct batadv_priv *bat_priv,
393 struct batadv_orig_node *orig_node)
386{ 394{
387 gw_node_update(bat_priv, orig_node, 0); 395 batadv_gw_node_update(bat_priv, orig_node, 0);
388} 396}
389 397
390void gw_node_purge(struct bat_priv *bat_priv) 398void batadv_gw_node_purge(struct batadv_priv *bat_priv)
391{ 399{
392 struct gw_node *gw_node, *curr_gw; 400 struct batadv_gw_node *gw_node, *curr_gw;
393 struct hlist_node *node, *node_tmp; 401 struct hlist_node *node, *node_tmp;
394 unsigned long timeout = msecs_to_jiffies(2 * PURGE_TIMEOUT); 402 unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT);
395 int do_deselect = 0; 403 int do_deselect = 0;
396 404
397 curr_gw = gw_get_selected_gw_node(bat_priv); 405 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
398 406
399 spin_lock_bh(&bat_priv->gw_list_lock); 407 spin_lock_bh(&bat_priv->gw_list_lock);
400 408
@@ -402,43 +410,42 @@ void gw_node_purge(struct bat_priv *bat_priv)
402 &bat_priv->gw_list, list) { 410 &bat_priv->gw_list, list) {
403 if (((!gw_node->deleted) || 411 if (((!gw_node->deleted) ||
404 (time_before(jiffies, gw_node->deleted + timeout))) && 412 (time_before(jiffies, gw_node->deleted + timeout))) &&
405 atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) 413 atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE)
406 continue; 414 continue;
407 415
408 if (curr_gw == gw_node) 416 if (curr_gw == gw_node)
409 do_deselect = 1; 417 do_deselect = 1;
410 418
411 hlist_del_rcu(&gw_node->list); 419 hlist_del_rcu(&gw_node->list);
412 gw_node_free_ref(gw_node); 420 batadv_gw_node_free_ref(gw_node);
413 } 421 }
414 422
415 spin_unlock_bh(&bat_priv->gw_list_lock); 423 spin_unlock_bh(&bat_priv->gw_list_lock);
416 424
417 /* gw_deselect() needs to acquire the gw_list_lock */ 425 /* gw_deselect() needs to acquire the gw_list_lock */
418 if (do_deselect) 426 if (do_deselect)
419 gw_deselect(bat_priv); 427 batadv_gw_deselect(bat_priv);
420 428
421 if (curr_gw) 429 if (curr_gw)
422 gw_node_free_ref(curr_gw); 430 batadv_gw_node_free_ref(curr_gw);
423} 431}
424 432
425/** 433/* fails if orig_node has no router */
426 * fails if orig_node has no router 434static int batadv_write_buffer_text(struct batadv_priv *bat_priv,
427 */ 435 struct seq_file *seq,
428static int _write_buffer_text(struct bat_priv *bat_priv, struct seq_file *seq, 436 const struct batadv_gw_node *gw_node)
429 const struct gw_node *gw_node)
430{ 437{
431 struct gw_node *curr_gw; 438 struct batadv_gw_node *curr_gw;
432 struct neigh_node *router; 439 struct batadv_neigh_node *router;
433 int down, up, ret = -1; 440 int down, up, ret = -1;
434 441
435 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up); 442 batadv_gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
436 443
437 router = orig_node_get_router(gw_node->orig_node); 444 router = batadv_orig_node_get_router(gw_node->orig_node);
438 if (!router) 445 if (!router)
439 goto out; 446 goto out;
440 447
441 curr_gw = gw_get_selected_gw_node(bat_priv); 448 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
442 449
443 ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n", 450 ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
444 (curr_gw == gw_node ? "=>" : " "), 451 (curr_gw == gw_node ? "=>" : " "),
@@ -451,23 +458,23 @@ static int _write_buffer_text(struct bat_priv *bat_priv, struct seq_file *seq,
451 (up > 2048 ? up / 1024 : up), 458 (up > 2048 ? up / 1024 : up),
452 (up > 2048 ? "MBit" : "KBit")); 459 (up > 2048 ? "MBit" : "KBit"));
453 460
454 neigh_node_free_ref(router); 461 batadv_neigh_node_free_ref(router);
455 if (curr_gw) 462 if (curr_gw)
456 gw_node_free_ref(curr_gw); 463 batadv_gw_node_free_ref(curr_gw);
457out: 464out:
458 return ret; 465 return ret;
459} 466}
460 467
461int gw_client_seq_print_text(struct seq_file *seq, void *offset) 468int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
462{ 469{
463 struct net_device *net_dev = (struct net_device *)seq->private; 470 struct net_device *net_dev = (struct net_device *)seq->private;
464 struct bat_priv *bat_priv = netdev_priv(net_dev); 471 struct batadv_priv *bat_priv = netdev_priv(net_dev);
465 struct hard_iface *primary_if; 472 struct batadv_hard_iface *primary_if;
466 struct gw_node *gw_node; 473 struct batadv_gw_node *gw_node;
467 struct hlist_node *node; 474 struct hlist_node *node;
468 int gw_count = 0, ret = 0; 475 int gw_count = 0, ret = 0;
469 476
470 primary_if = primary_if_get_selected(bat_priv); 477 primary_if = batadv_primary_if_get_selected(bat_priv);
471 if (!primary_if) { 478 if (!primary_if) {
472 ret = seq_printf(seq, 479 ret = seq_printf(seq,
473 "BATMAN mesh %s disabled - please specify interfaces to enable it\n", 480 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
@@ -475,7 +482,7 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
475 goto out; 482 goto out;
476 } 483 }
477 484
478 if (primary_if->if_status != IF_ACTIVE) { 485 if (primary_if->if_status != BATADV_IF_ACTIVE) {
479 ret = seq_printf(seq, 486 ret = seq_printf(seq,
480 "BATMAN mesh %s disabled - primary interface not active\n", 487 "BATMAN mesh %s disabled - primary interface not active\n",
481 net_dev->name); 488 net_dev->name);
@@ -484,8 +491,8 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
484 491
485 seq_printf(seq, 492 seq_printf(seq,
486 " %-12s (%s/%i) %17s [%10s]: gw_class ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", 493 " %-12s (%s/%i) %17s [%10s]: gw_class ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
487 "Gateway", "#", TQ_MAX_VALUE, "Nexthop", "outgoingIF", 494 "Gateway", "#", BATADV_TQ_MAX_VALUE, "Nexthop", "outgoingIF",
488 SOURCE_VERSION, primary_if->net_dev->name, 495 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
489 primary_if->net_dev->dev_addr, net_dev->name); 496 primary_if->net_dev->dev_addr, net_dev->name);
490 497
491 rcu_read_lock(); 498 rcu_read_lock();
@@ -494,7 +501,7 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
494 continue; 501 continue;
495 502
496 /* fails if orig_node has no router */ 503 /* fails if orig_node has no router */
497 if (_write_buffer_text(bat_priv, seq, gw_node) < 0) 504 if (batadv_write_buffer_text(bat_priv, seq, gw_node) < 0)
498 continue; 505 continue;
499 506
500 gw_count++; 507 gw_count++;
@@ -506,11 +513,11 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
506 513
507out: 514out:
508 if (primary_if) 515 if (primary_if)
509 hardif_free_ref(primary_if); 516 batadv_hardif_free_ref(primary_if);
510 return ret; 517 return ret;
511} 518}
512 519
513static bool is_type_dhcprequest(struct sk_buff *skb, int header_len) 520static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
514{ 521{
515 int ret = false; 522 int ret = false;
516 unsigned char *p; 523 unsigned char *p;
@@ -521,27 +528,29 @@ static bool is_type_dhcprequest(struct sk_buff *skb, int header_len)
521 528
522 pkt_len = skb_headlen(skb); 529 pkt_len = skb_headlen(skb);
523 530
524 if (pkt_len < header_len + DHCP_OPTIONS_OFFSET + 1) 531 if (pkt_len < header_len + BATADV_DHCP_OPTIONS_OFFSET + 1)
525 goto out; 532 goto out;
526 533
527 p = skb->data + header_len + DHCP_OPTIONS_OFFSET; 534 p = skb->data + header_len + BATADV_DHCP_OPTIONS_OFFSET;
528 pkt_len -= header_len + DHCP_OPTIONS_OFFSET + 1; 535 pkt_len -= header_len + BATADV_DHCP_OPTIONS_OFFSET + 1;
529 536
530 /* Access the dhcp option lists. Each entry is made up by: 537 /* Access the dhcp option lists. Each entry is made up by:
531 * - octet 1: option type 538 * - octet 1: option type
532 * - octet 2: option data len (only if type != 255 and 0) 539 * - octet 2: option data len (only if type != 255 and 0)
533 * - octet 3: option data */ 540 * - octet 3: option data
541 */
534 while (*p != 255 && !ret) { 542 while (*p != 255 && !ret) {
535 /* p now points to the first octet: option type */ 543 /* p now points to the first octet: option type */
536 if (*p == 53) { 544 if (*p == 53) {
537 /* type 53 is the message type option. 545 /* type 53 is the message type option.
538 * Jump the len octet and go to the data octet */ 546 * Jump the len octet and go to the data octet
547 */
539 if (pkt_len < 2) 548 if (pkt_len < 2)
540 goto out; 549 goto out;
541 p += 2; 550 p += 2;
542 551
543 /* check if the message type is what we need */ 552 /* check if the message type is what we need */
544 if (*p == DHCP_REQUEST) 553 if (*p == BATADV_DHCP_REQUEST)
545 ret = true; 554 ret = true;
546 break; 555 break;
547 } else if (*p == 0) { 556 } else if (*p == 0) {
@@ -568,7 +577,7 @@ out:
568 return ret; 577 return ret;
569} 578}
570 579
571bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) 580bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
572{ 581{
573 struct ethhdr *ethhdr; 582 struct ethhdr *ethhdr;
574 struct iphdr *iphdr; 583 struct iphdr *iphdr;
@@ -634,40 +643,41 @@ bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
634 return true; 643 return true;
635} 644}
636 645
637bool gw_out_of_range(struct bat_priv *bat_priv, 646bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
638 struct sk_buff *skb, struct ethhdr *ethhdr) 647 struct sk_buff *skb, struct ethhdr *ethhdr)
639{ 648{
640 struct neigh_node *neigh_curr = NULL, *neigh_old = NULL; 649 struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
641 struct orig_node *orig_dst_node = NULL; 650 struct batadv_orig_node *orig_dst_node = NULL;
642 struct gw_node *curr_gw = NULL; 651 struct batadv_gw_node *curr_gw = NULL;
643 bool ret, out_of_range = false; 652 bool ret, out_of_range = false;
644 unsigned int header_len = 0; 653 unsigned int header_len = 0;
645 uint8_t curr_tq_avg; 654 uint8_t curr_tq_avg;
646 655
647 ret = gw_is_dhcp_target(skb, &header_len); 656 ret = batadv_gw_is_dhcp_target(skb, &header_len);
648 if (!ret) 657 if (!ret)
649 goto out; 658 goto out;
650 659
651 orig_dst_node = transtable_search(bat_priv, ethhdr->h_source, 660 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
652 ethhdr->h_dest); 661 ethhdr->h_dest);
653 if (!orig_dst_node) 662 if (!orig_dst_node)
654 goto out; 663 goto out;
655 664
656 if (!orig_dst_node->gw_flags) 665 if (!orig_dst_node->gw_flags)
657 goto out; 666 goto out;
658 667
659 ret = is_type_dhcprequest(skb, header_len); 668 ret = batadv_is_type_dhcprequest(skb, header_len);
660 if (!ret) 669 if (!ret)
661 goto out; 670 goto out;
662 671
663 switch (atomic_read(&bat_priv->gw_mode)) { 672 switch (atomic_read(&bat_priv->gw_mode)) {
664 case GW_MODE_SERVER: 673 case BATADV_GW_MODE_SERVER:
665 /* If we are a GW then we are our best GW. We can artificially 674 /* If we are a GW then we are our best GW. We can artificially
666 * set the tq towards ourself as the maximum value */ 675 * set the tq towards ourself as the maximum value
667 curr_tq_avg = TQ_MAX_VALUE; 676 */
677 curr_tq_avg = BATADV_TQ_MAX_VALUE;
668 break; 678 break;
669 case GW_MODE_CLIENT: 679 case BATADV_GW_MODE_CLIENT:
670 curr_gw = gw_get_selected_gw_node(bat_priv); 680 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
671 if (!curr_gw) 681 if (!curr_gw)
672 goto out; 682 goto out;
673 683
@@ -677,33 +687,35 @@ bool gw_out_of_range(struct bat_priv *bat_priv,
677 687
678 /* If the dhcp packet has been sent to a different gw, 688 /* If the dhcp packet has been sent to a different gw,
679 * we have to evaluate whether the old gw is still 689 * we have to evaluate whether the old gw is still
680 * reliable enough */ 690 * reliable enough
681 neigh_curr = find_router(bat_priv, curr_gw->orig_node, NULL); 691 */
692 neigh_curr = batadv_find_router(bat_priv, curr_gw->orig_node,
693 NULL);
682 if (!neigh_curr) 694 if (!neigh_curr)
683 goto out; 695 goto out;
684 696
685 curr_tq_avg = neigh_curr->tq_avg; 697 curr_tq_avg = neigh_curr->tq_avg;
686 break; 698 break;
687 case GW_MODE_OFF: 699 case BATADV_GW_MODE_OFF:
688 default: 700 default:
689 goto out; 701 goto out;
690 } 702 }
691 703
692 neigh_old = find_router(bat_priv, orig_dst_node, NULL); 704 neigh_old = batadv_find_router(bat_priv, orig_dst_node, NULL);
693 if (!neigh_old) 705 if (!neigh_old)
694 goto out; 706 goto out;
695 707
696 if (curr_tq_avg - neigh_old->tq_avg > GW_THRESHOLD) 708 if (curr_tq_avg - neigh_old->tq_avg > BATADV_GW_THRESHOLD)
697 out_of_range = true; 709 out_of_range = true;
698 710
699out: 711out:
700 if (orig_dst_node) 712 if (orig_dst_node)
701 orig_node_free_ref(orig_dst_node); 713 batadv_orig_node_free_ref(orig_dst_node);
702 if (curr_gw) 714 if (curr_gw)
703 gw_node_free_ref(curr_gw); 715 batadv_gw_node_free_ref(curr_gw);
704 if (neigh_old) 716 if (neigh_old)
705 neigh_node_free_ref(neigh_old); 717 batadv_neigh_node_free_ref(neigh_old);
706 if (neigh_curr) 718 if (neigh_curr)
707 neigh_node_free_ref(neigh_curr); 719 batadv_neigh_node_free_ref(neigh_curr);
708 return out_of_range; 720 return out_of_range;
709} 721}
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index bf56a5aea10..f0d129e323c 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,23 +15,26 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ 20#ifndef _NET_BATMAN_ADV_GATEWAY_CLIENT_H_
23#define _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ 21#define _NET_BATMAN_ADV_GATEWAY_CLIENT_H_
24 22
25void gw_deselect(struct bat_priv *bat_priv); 23void batadv_gw_deselect(struct batadv_priv *bat_priv);
26void gw_election(struct bat_priv *bat_priv); 24void batadv_gw_election(struct batadv_priv *bat_priv);
27struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv); 25struct batadv_orig_node *
28void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node); 26batadv_gw_get_selected_orig(struct batadv_priv *bat_priv);
29void gw_node_update(struct bat_priv *bat_priv, 27void batadv_gw_check_election(struct batadv_priv *bat_priv,
30 struct orig_node *orig_node, uint8_t new_gwflags); 28 struct batadv_orig_node *orig_node);
31void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node); 29void batadv_gw_node_update(struct batadv_priv *bat_priv,
32void gw_node_purge(struct bat_priv *bat_priv); 30 struct batadv_orig_node *orig_node,
33int gw_client_seq_print_text(struct seq_file *seq, void *offset); 31 uint8_t new_gwflags);
34bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len); 32void batadv_gw_node_delete(struct batadv_priv *bat_priv,
35bool gw_out_of_range(struct bat_priv *bat_priv, 33 struct batadv_orig_node *orig_node);
36 struct sk_buff *skb, struct ethhdr *ethhdr); 34void batadv_gw_node_purge(struct batadv_priv *bat_priv);
35int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
36bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
37bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
38 struct sk_buff *skb, struct ethhdr *ethhdr);
37 39
38#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ 40#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index ca57ac7d73b..9001208d175 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -24,7 +22,7 @@
24#include "gateway_client.h" 22#include "gateway_client.h"
25 23
26/* calculates the gateway class from kbit */ 24/* calculates the gateway class from kbit */
27static void kbit_to_gw_bandwidth(int down, int up, long *gw_srv_class) 25static void batadv_kbit_to_gw_bandwidth(int down, int up, long *gw_srv_class)
28{ 26{
29 int mdown = 0, tdown, tup, difference; 27 int mdown = 0, tdown, tup, difference;
30 uint8_t sbit, part; 28 uint8_t sbit, part;
@@ -59,7 +57,7 @@ static void kbit_to_gw_bandwidth(int down, int up, long *gw_srv_class)
59} 57}
60 58
61/* returns the up and downspeeds in kbit, calculated from the class */ 59/* returns the up and downspeeds in kbit, calculated from the class */
62void gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up) 60void batadv_gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up)
63{ 61{
64 int sbit = (gw_srv_class & 0x80) >> 7; 62 int sbit = (gw_srv_class & 0x80) >> 7;
65 int dpart = (gw_srv_class & 0x78) >> 3; 63 int dpart = (gw_srv_class & 0x78) >> 3;
@@ -75,8 +73,8 @@ void gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up)
75 *up = ((upart + 1) * (*down)) / 8; 73 *up = ((upart + 1) * (*down)) / 8;
76} 74}
77 75
78static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff, 76static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
79 int *up, int *down) 77 int *up, int *down)
80{ 78{
81 int ret, multi = 1; 79 int ret, multi = 1;
82 char *slash_ptr, *tmp_ptr; 80 char *slash_ptr, *tmp_ptr;
@@ -99,9 +97,9 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
99 97
100 ret = kstrtol(buff, 10, &ldown); 98 ret = kstrtol(buff, 10, &ldown);
101 if (ret) { 99 if (ret) {
102 bat_err(net_dev, 100 batadv_err(net_dev,
103 "Download speed of gateway mode invalid: %s\n", 101 "Download speed of gateway mode invalid: %s\n",
104 buff); 102 buff);
105 return false; 103 return false;
106 } 104 }
107 105
@@ -124,9 +122,9 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
124 122
125 ret = kstrtol(slash_ptr + 1, 10, &lup); 123 ret = kstrtol(slash_ptr + 1, 10, &lup);
126 if (ret) { 124 if (ret) {
127 bat_err(net_dev, 125 batadv_err(net_dev,
128 "Upload speed of gateway mode invalid: %s\n", 126 "Upload speed of gateway mode invalid: %s\n",
129 slash_ptr + 1); 127 slash_ptr + 1);
130 return false; 128 return false;
131 } 129 }
132 130
@@ -136,14 +134,15 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
136 return true; 134 return true;
137} 135}
138 136
139ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count) 137ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
138 size_t count)
140{ 139{
141 struct bat_priv *bat_priv = netdev_priv(net_dev); 140 struct batadv_priv *bat_priv = netdev_priv(net_dev);
142 long gw_bandwidth_tmp = 0; 141 long gw_bandwidth_tmp = 0;
143 int up = 0, down = 0; 142 int up = 0, down = 0;
144 bool ret; 143 bool ret;
145 144
146 ret = parse_gw_bandwidth(net_dev, buff, &up, &down); 145 ret = batadv_parse_gw_bandwidth(net_dev, buff, &up, &down);
147 if (!ret) 146 if (!ret)
148 goto end; 147 goto end;
149 148
@@ -153,23 +152,25 @@ ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count)
153 if (!up) 152 if (!up)
154 up = down / 5; 153 up = down / 5;
155 154
156 kbit_to_gw_bandwidth(down, up, &gw_bandwidth_tmp); 155 batadv_kbit_to_gw_bandwidth(down, up, &gw_bandwidth_tmp);
157 156
158 /** 157 /* the gw bandwidth we guessed above might not match the given
159 * the gw bandwidth we guessed above might not match the given
160 * speeds, hence we need to calculate it back to show the number 158 * speeds, hence we need to calculate it back to show the number
161 * that is going to be propagated 159 * that is going to be propagated
162 **/ 160 */
163 gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up); 161 batadv_gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up);
164 162
165 gw_deselect(bat_priv); 163 if (atomic_read(&bat_priv->gw_bandwidth) == gw_bandwidth_tmp)
166 bat_info(net_dev, 164 return count;
167 "Changing gateway bandwidth from: '%i' to: '%ld' (propagating: %d%s/%d%s)\n", 165
168 atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp, 166 batadv_gw_deselect(bat_priv);
169 (down > 2048 ? down / 1024 : down), 167 batadv_info(net_dev,
170 (down > 2048 ? "MBit" : "KBit"), 168 "Changing gateway bandwidth from: '%i' to: '%ld' (propagating: %d%s/%d%s)\n",
171 (up > 2048 ? up / 1024 : up), 169 atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp,
172 (up > 2048 ? "MBit" : "KBit")); 170 (down > 2048 ? down / 1024 : down),
171 (down > 2048 ? "MBit" : "KBit"),
172 (up > 2048 ? up / 1024 : up),
173 (up > 2048 ? "MBit" : "KBit"));
173 174
174 atomic_set(&bat_priv->gw_bandwidth, gw_bandwidth_tmp); 175 atomic_set(&bat_priv->gw_bandwidth, gw_bandwidth_tmp);
175 176
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index b8fb11c4f92..13697f6e711 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,23 +15,23 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_GATEWAY_COMMON_H_ 20#ifndef _NET_BATMAN_ADV_GATEWAY_COMMON_H_
23#define _NET_BATMAN_ADV_GATEWAY_COMMON_H_ 21#define _NET_BATMAN_ADV_GATEWAY_COMMON_H_
24 22
25enum gw_modes { 23enum batadv_gw_modes {
26 GW_MODE_OFF, 24 BATADV_GW_MODE_OFF,
27 GW_MODE_CLIENT, 25 BATADV_GW_MODE_CLIENT,
28 GW_MODE_SERVER, 26 BATADV_GW_MODE_SERVER,
29}; 27};
30 28
31#define GW_MODE_OFF_NAME "off" 29#define BATADV_GW_MODE_OFF_NAME "off"
32#define GW_MODE_CLIENT_NAME "client" 30#define BATADV_GW_MODE_CLIENT_NAME "client"
33#define GW_MODE_SERVER_NAME "server" 31#define BATADV_GW_MODE_SERVER_NAME "server"
34 32
35void gw_bandwidth_to_kbit(uint8_t gw_class, int *down, int *up); 33void batadv_gw_bandwidth_to_kbit(uint8_t gw_class, int *down, int *up);
36ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count); 34ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
35 size_t count);
37 36
38#endif /* _NET_BATMAN_ADV_GATEWAY_COMMON_H_ */ 37#endif /* _NET_BATMAN_ADV_GATEWAY_COMMON_H_ */
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index dc334fa8984..282bf6e9353 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -25,28 +23,29 @@
25#include "send.h" 23#include "send.h"
26#include "translation-table.h" 24#include "translation-table.h"
27#include "routing.h" 25#include "routing.h"
28#include "bat_sysfs.h" 26#include "sysfs.h"
29#include "originator.h" 27#include "originator.h"
30#include "hash.h" 28#include "hash.h"
31#include "bridge_loop_avoidance.h" 29#include "bridge_loop_avoidance.h"
32 30
33#include <linux/if_arp.h> 31#include <linux/if_arp.h>
34 32
35void hardif_free_rcu(struct rcu_head *rcu) 33void batadv_hardif_free_rcu(struct rcu_head *rcu)
36{ 34{
37 struct hard_iface *hard_iface; 35 struct batadv_hard_iface *hard_iface;
38 36
39 hard_iface = container_of(rcu, struct hard_iface, rcu); 37 hard_iface = container_of(rcu, struct batadv_hard_iface, rcu);
40 dev_put(hard_iface->net_dev); 38 dev_put(hard_iface->net_dev);
41 kfree(hard_iface); 39 kfree(hard_iface);
42} 40}
43 41
44struct hard_iface *hardif_get_by_netdev(const struct net_device *net_dev) 42struct batadv_hard_iface *
43batadv_hardif_get_by_netdev(const struct net_device *net_dev)
45{ 44{
46 struct hard_iface *hard_iface; 45 struct batadv_hard_iface *hard_iface;
47 46
48 rcu_read_lock(); 47 rcu_read_lock();
49 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 48 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
50 if (hard_iface->net_dev == net_dev && 49 if (hard_iface->net_dev == net_dev &&
51 atomic_inc_not_zero(&hard_iface->refcount)) 50 atomic_inc_not_zero(&hard_iface->refcount))
52 goto out; 51 goto out;
@@ -59,7 +58,7 @@ out:
59 return hard_iface; 58 return hard_iface;
60} 59}
61 60
62static int is_valid_iface(const struct net_device *net_dev) 61static int batadv_is_valid_iface(const struct net_device *net_dev)
63{ 62{
64 if (net_dev->flags & IFF_LOOPBACK) 63 if (net_dev->flags & IFF_LOOPBACK)
65 return 0; 64 return 0;
@@ -71,26 +70,23 @@ static int is_valid_iface(const struct net_device *net_dev)
71 return 0; 70 return 0;
72 71
73 /* no batman over batman */ 72 /* no batman over batman */
74 if (softif_is_valid(net_dev)) 73 if (batadv_softif_is_valid(net_dev))
75 return 0; 74 return 0;
76 75
77 /* Device is being bridged */
78 /* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
79 return 0; */
80
81 return 1; 76 return 1;
82} 77}
83 78
84static struct hard_iface *hardif_get_active(const struct net_device *soft_iface) 79static struct batadv_hard_iface *
80batadv_hardif_get_active(const struct net_device *soft_iface)
85{ 81{
86 struct hard_iface *hard_iface; 82 struct batadv_hard_iface *hard_iface;
87 83
88 rcu_read_lock(); 84 rcu_read_lock();
89 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 85 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
90 if (hard_iface->soft_iface != soft_iface) 86 if (hard_iface->soft_iface != soft_iface)
91 continue; 87 continue;
92 88
93 if (hard_iface->if_status == IF_ACTIVE && 89 if (hard_iface->if_status == BATADV_IF_ACTIVE &&
94 atomic_inc_not_zero(&hard_iface->refcount)) 90 atomic_inc_not_zero(&hard_iface->refcount))
95 goto out; 91 goto out;
96 } 92 }
@@ -102,32 +98,32 @@ out:
102 return hard_iface; 98 return hard_iface;
103} 99}
104 100
105static void primary_if_update_addr(struct bat_priv *bat_priv, 101static void batadv_primary_if_update_addr(struct batadv_priv *bat_priv,
106 struct hard_iface *oldif) 102 struct batadv_hard_iface *oldif)
107{ 103{
108 struct vis_packet *vis_packet; 104 struct batadv_vis_packet *vis_packet;
109 struct hard_iface *primary_if; 105 struct batadv_hard_iface *primary_if;
110 106
111 primary_if = primary_if_get_selected(bat_priv); 107 primary_if = batadv_primary_if_get_selected(bat_priv);
112 if (!primary_if) 108 if (!primary_if)
113 goto out; 109 goto out;
114 110
115 vis_packet = (struct vis_packet *) 111 vis_packet = (struct batadv_vis_packet *)
116 bat_priv->my_vis_info->skb_packet->data; 112 bat_priv->my_vis_info->skb_packet->data;
117 memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN); 113 memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
118 memcpy(vis_packet->sender_orig, 114 memcpy(vis_packet->sender_orig,
119 primary_if->net_dev->dev_addr, ETH_ALEN); 115 primary_if->net_dev->dev_addr, ETH_ALEN);
120 116
121 bla_update_orig_address(bat_priv, primary_if, oldif); 117 batadv_bla_update_orig_address(bat_priv, primary_if, oldif);
122out: 118out:
123 if (primary_if) 119 if (primary_if)
124 hardif_free_ref(primary_if); 120 batadv_hardif_free_ref(primary_if);
125} 121}
126 122
127static void primary_if_select(struct bat_priv *bat_priv, 123static void batadv_primary_if_select(struct batadv_priv *bat_priv,
128 struct hard_iface *new_hard_iface) 124 struct batadv_hard_iface *new_hard_iface)
129{ 125{
130 struct hard_iface *curr_hard_iface; 126 struct batadv_hard_iface *curr_hard_iface;
131 127
132 ASSERT_RTNL(); 128 ASSERT_RTNL();
133 129
@@ -141,14 +137,15 @@ static void primary_if_select(struct bat_priv *bat_priv,
141 goto out; 137 goto out;
142 138
143 bat_priv->bat_algo_ops->bat_primary_iface_set(new_hard_iface); 139 bat_priv->bat_algo_ops->bat_primary_iface_set(new_hard_iface);
144 primary_if_update_addr(bat_priv, curr_hard_iface); 140 batadv_primary_if_update_addr(bat_priv, curr_hard_iface);
145 141
146out: 142out:
147 if (curr_hard_iface) 143 if (curr_hard_iface)
148 hardif_free_ref(curr_hard_iface); 144 batadv_hardif_free_ref(curr_hard_iface);
149} 145}
150 146
151static bool hardif_is_iface_up(const struct hard_iface *hard_iface) 147static bool
148batadv_hardif_is_iface_up(const struct batadv_hard_iface *hard_iface)
152{ 149{
153 if (hard_iface->net_dev->flags & IFF_UP) 150 if (hard_iface->net_dev->flags & IFF_UP)
154 return true; 151 return true;
@@ -156,21 +153,21 @@ static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
156 return false; 153 return false;
157} 154}
158 155
159static void check_known_mac_addr(const struct net_device *net_dev) 156static void batadv_check_known_mac_addr(const struct net_device *net_dev)
160{ 157{
161 const struct hard_iface *hard_iface; 158 const struct batadv_hard_iface *hard_iface;
162 159
163 rcu_read_lock(); 160 rcu_read_lock();
164 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 161 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
165 if ((hard_iface->if_status != IF_ACTIVE) && 162 if ((hard_iface->if_status != BATADV_IF_ACTIVE) &&
166 (hard_iface->if_status != IF_TO_BE_ACTIVATED)) 163 (hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED))
167 continue; 164 continue;
168 165
169 if (hard_iface->net_dev == net_dev) 166 if (hard_iface->net_dev == net_dev)
170 continue; 167 continue;
171 168
172 if (!compare_eth(hard_iface->net_dev->dev_addr, 169 if (!batadv_compare_eth(hard_iface->net_dev->dev_addr,
173 net_dev->dev_addr)) 170 net_dev->dev_addr))
174 continue; 171 continue;
175 172
176 pr_warn("The newly added mac address (%pM) already exists on: %s\n", 173 pr_warn("The newly added mac address (%pM) already exists on: %s\n",
@@ -180,27 +177,29 @@ static void check_known_mac_addr(const struct net_device *net_dev)
180 rcu_read_unlock(); 177 rcu_read_unlock();
181} 178}
182 179
183int hardif_min_mtu(struct net_device *soft_iface) 180int batadv_hardif_min_mtu(struct net_device *soft_iface)
184{ 181{
185 const struct bat_priv *bat_priv = netdev_priv(soft_iface); 182 const struct batadv_priv *bat_priv = netdev_priv(soft_iface);
186 const struct hard_iface *hard_iface; 183 const struct batadv_hard_iface *hard_iface;
187 /* allow big frames if all devices are capable to do so 184 /* allow big frames if all devices are capable to do so
188 * (have MTU > 1500 + BAT_HEADER_LEN) */ 185 * (have MTU > 1500 + BAT_HEADER_LEN)
186 */
189 int min_mtu = ETH_DATA_LEN; 187 int min_mtu = ETH_DATA_LEN;
190 188
191 if (atomic_read(&bat_priv->fragmentation)) 189 if (atomic_read(&bat_priv->fragmentation))
192 goto out; 190 goto out;
193 191
194 rcu_read_lock(); 192 rcu_read_lock();
195 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 193 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
196 if ((hard_iface->if_status != IF_ACTIVE) && 194 if ((hard_iface->if_status != BATADV_IF_ACTIVE) &&
197 (hard_iface->if_status != IF_TO_BE_ACTIVATED)) 195 (hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED))
198 continue; 196 continue;
199 197
200 if (hard_iface->soft_iface != soft_iface) 198 if (hard_iface->soft_iface != soft_iface)
201 continue; 199 continue;
202 200
203 min_mtu = min_t(int, hard_iface->net_dev->mtu - BAT_HEADER_LEN, 201 min_mtu = min_t(int,
202 hard_iface->net_dev->mtu - BATADV_HEADER_LEN,
204 min_mtu); 203 min_mtu);
205 } 204 }
206 rcu_read_unlock(); 205 rcu_read_unlock();
@@ -209,68 +208,70 @@ out:
209} 208}
210 209
211/* adjusts the MTU if a new interface with a smaller MTU appeared. */ 210/* adjusts the MTU if a new interface with a smaller MTU appeared. */
212void update_min_mtu(struct net_device *soft_iface) 211void batadv_update_min_mtu(struct net_device *soft_iface)
213{ 212{
214 int min_mtu; 213 int min_mtu;
215 214
216 min_mtu = hardif_min_mtu(soft_iface); 215 min_mtu = batadv_hardif_min_mtu(soft_iface);
217 if (soft_iface->mtu != min_mtu) 216 if (soft_iface->mtu != min_mtu)
218 soft_iface->mtu = min_mtu; 217 soft_iface->mtu = min_mtu;
219} 218}
220 219
221static void hardif_activate_interface(struct hard_iface *hard_iface) 220static void
221batadv_hardif_activate_interface(struct batadv_hard_iface *hard_iface)
222{ 222{
223 struct bat_priv *bat_priv; 223 struct batadv_priv *bat_priv;
224 struct hard_iface *primary_if = NULL; 224 struct batadv_hard_iface *primary_if = NULL;
225 225
226 if (hard_iface->if_status != IF_INACTIVE) 226 if (hard_iface->if_status != BATADV_IF_INACTIVE)
227 goto out; 227 goto out;
228 228
229 bat_priv = netdev_priv(hard_iface->soft_iface); 229 bat_priv = netdev_priv(hard_iface->soft_iface);
230 230
231 bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface); 231 bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface);
232 hard_iface->if_status = IF_TO_BE_ACTIVATED; 232 hard_iface->if_status = BATADV_IF_TO_BE_ACTIVATED;
233 233
234 /** 234 /* the first active interface becomes our primary interface or
235 * the first active interface becomes our primary interface or
236 * the next active interface after the old primary interface was removed 235 * the next active interface after the old primary interface was removed
237 */ 236 */
238 primary_if = primary_if_get_selected(bat_priv); 237 primary_if = batadv_primary_if_get_selected(bat_priv);
239 if (!primary_if) 238 if (!primary_if)
240 primary_if_select(bat_priv, hard_iface); 239 batadv_primary_if_select(bat_priv, hard_iface);
241 240
242 bat_info(hard_iface->soft_iface, "Interface activated: %s\n", 241 batadv_info(hard_iface->soft_iface, "Interface activated: %s\n",
243 hard_iface->net_dev->name); 242 hard_iface->net_dev->name);
244 243
245 update_min_mtu(hard_iface->soft_iface); 244 batadv_update_min_mtu(hard_iface->soft_iface);
246 245
247out: 246out:
248 if (primary_if) 247 if (primary_if)
249 hardif_free_ref(primary_if); 248 batadv_hardif_free_ref(primary_if);
250} 249}
251 250
252static void hardif_deactivate_interface(struct hard_iface *hard_iface) 251static void
252batadv_hardif_deactivate_interface(struct batadv_hard_iface *hard_iface)
253{ 253{
254 if ((hard_iface->if_status != IF_ACTIVE) && 254 if ((hard_iface->if_status != BATADV_IF_ACTIVE) &&
255 (hard_iface->if_status != IF_TO_BE_ACTIVATED)) 255 (hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED))
256 return; 256 return;
257 257
258 hard_iface->if_status = IF_INACTIVE; 258 hard_iface->if_status = BATADV_IF_INACTIVE;
259 259
260 bat_info(hard_iface->soft_iface, "Interface deactivated: %s\n", 260 batadv_info(hard_iface->soft_iface, "Interface deactivated: %s\n",
261 hard_iface->net_dev->name); 261 hard_iface->net_dev->name);
262 262
263 update_min_mtu(hard_iface->soft_iface); 263 batadv_update_min_mtu(hard_iface->soft_iface);
264} 264}
265 265
266int hardif_enable_interface(struct hard_iface *hard_iface, 266int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
267 const char *iface_name) 267 const char *iface_name)
268{ 268{
269 struct bat_priv *bat_priv; 269 struct batadv_priv *bat_priv;
270 struct net_device *soft_iface; 270 struct net_device *soft_iface;
271 __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN);
271 int ret; 272 int ret;
272 273
273 if (hard_iface->if_status != IF_NOT_IN_USE) 274 if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
274 goto out; 275 goto out;
275 276
276 if (!atomic_inc_not_zero(&hard_iface->refcount)) 277 if (!atomic_inc_not_zero(&hard_iface->refcount))
@@ -284,7 +285,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
284 soft_iface = dev_get_by_name(&init_net, iface_name); 285 soft_iface = dev_get_by_name(&init_net, iface_name);
285 286
286 if (!soft_iface) { 287 if (!soft_iface) {
287 soft_iface = softif_create(iface_name); 288 soft_iface = batadv_softif_create(iface_name);
288 289
289 if (!soft_iface) { 290 if (!soft_iface) {
290 ret = -ENOMEM; 291 ret = -ENOMEM;
@@ -295,7 +296,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
295 dev_hold(soft_iface); 296 dev_hold(soft_iface);
296 } 297 }
297 298
298 if (!softif_is_valid(soft_iface)) { 299 if (!batadv_softif_is_valid(soft_iface)) {
299 pr_err("Can't create batman mesh interface %s: already exists as regular interface\n", 300 pr_err("Can't create batman mesh interface %s: already exists as regular interface\n",
300 soft_iface->name); 301 soft_iface->name);
301 ret = -EINVAL; 302 ret = -EINVAL;
@@ -306,48 +307,46 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
306 bat_priv = netdev_priv(hard_iface->soft_iface); 307 bat_priv = netdev_priv(hard_iface->soft_iface);
307 308
308 ret = bat_priv->bat_algo_ops->bat_iface_enable(hard_iface); 309 ret = bat_priv->bat_algo_ops->bat_iface_enable(hard_iface);
309 if (ret < 0) { 310 if (ret < 0)
310 ret = -ENOMEM;
311 goto err_dev; 311 goto err_dev;
312 }
313 312
314 hard_iface->if_num = bat_priv->num_ifaces; 313 hard_iface->if_num = bat_priv->num_ifaces;
315 bat_priv->num_ifaces++; 314 bat_priv->num_ifaces++;
316 hard_iface->if_status = IF_INACTIVE; 315 hard_iface->if_status = BATADV_IF_INACTIVE;
317 orig_hash_add_if(hard_iface, bat_priv->num_ifaces); 316 batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
318 317
319 hard_iface->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN); 318 hard_iface->batman_adv_ptype.type = ethertype;
320 hard_iface->batman_adv_ptype.func = batman_skb_recv; 319 hard_iface->batman_adv_ptype.func = batadv_batman_skb_recv;
321 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev; 320 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
322 dev_add_pack(&hard_iface->batman_adv_ptype); 321 dev_add_pack(&hard_iface->batman_adv_ptype);
323 322
324 atomic_set(&hard_iface->frag_seqno, 1); 323 atomic_set(&hard_iface->frag_seqno, 1);
325 bat_info(hard_iface->soft_iface, "Adding interface: %s\n", 324 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
326 hard_iface->net_dev->name); 325 hard_iface->net_dev->name);
327 326
328 if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu < 327 if (atomic_read(&bat_priv->fragmentation) &&
329 ETH_DATA_LEN + BAT_HEADER_LEN) 328 hard_iface->net_dev->mtu < ETH_DATA_LEN + BATADV_HEADER_LEN)
330 bat_info(hard_iface->soft_iface, 329 batadv_info(hard_iface->soft_iface,
331 "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %zi would solve the problem.\n", 330 "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %zi would solve the problem.\n",
332 hard_iface->net_dev->name, hard_iface->net_dev->mtu, 331 hard_iface->net_dev->name, hard_iface->net_dev->mtu,
333 ETH_DATA_LEN + BAT_HEADER_LEN); 332 ETH_DATA_LEN + BATADV_HEADER_LEN);
334 333
335 if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu < 334 if (!atomic_read(&bat_priv->fragmentation) &&
336 ETH_DATA_LEN + BAT_HEADER_LEN) 335 hard_iface->net_dev->mtu < ETH_DATA_LEN + BATADV_HEADER_LEN)
337 bat_info(hard_iface->soft_iface, 336 batadv_info(hard_iface->soft_iface,
338 "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %zi.\n", 337 "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %zi.\n",
339 hard_iface->net_dev->name, hard_iface->net_dev->mtu, 338 hard_iface->net_dev->name, hard_iface->net_dev->mtu,
340 ETH_DATA_LEN + BAT_HEADER_LEN); 339 ETH_DATA_LEN + BATADV_HEADER_LEN);
341 340
342 if (hardif_is_iface_up(hard_iface)) 341 if (batadv_hardif_is_iface_up(hard_iface))
343 hardif_activate_interface(hard_iface); 342 batadv_hardif_activate_interface(hard_iface);
344 else 343 else
345 bat_err(hard_iface->soft_iface, 344 batadv_err(hard_iface->soft_iface,
346 "Not using interface %s (retrying later): interface not active\n", 345 "Not using interface %s (retrying later): interface not active\n",
347 hard_iface->net_dev->name); 346 hard_iface->net_dev->name);
348 347
349 /* begin scheduling originator messages on that interface */ 348 /* begin scheduling originator messages on that interface */
350 schedule_bat_ogm(hard_iface); 349 batadv_schedule_bat_ogm(hard_iface);
351 350
352out: 351out:
353 return 0; 352 return 0;
@@ -355,67 +354,68 @@ out:
355err_dev: 354err_dev:
356 dev_put(soft_iface); 355 dev_put(soft_iface);
357err: 356err:
358 hardif_free_ref(hard_iface); 357 batadv_hardif_free_ref(hard_iface);
359 return ret; 358 return ret;
360} 359}
361 360
362void hardif_disable_interface(struct hard_iface *hard_iface) 361void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface)
363{ 362{
364 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 363 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
365 struct hard_iface *primary_if = NULL; 364 struct batadv_hard_iface *primary_if = NULL;
366 365
367 if (hard_iface->if_status == IF_ACTIVE) 366 if (hard_iface->if_status == BATADV_IF_ACTIVE)
368 hardif_deactivate_interface(hard_iface); 367 batadv_hardif_deactivate_interface(hard_iface);
369 368
370 if (hard_iface->if_status != IF_INACTIVE) 369 if (hard_iface->if_status != BATADV_IF_INACTIVE)
371 goto out; 370 goto out;
372 371
373 bat_info(hard_iface->soft_iface, "Removing interface: %s\n", 372 batadv_info(hard_iface->soft_iface, "Removing interface: %s\n",
374 hard_iface->net_dev->name); 373 hard_iface->net_dev->name);
375 dev_remove_pack(&hard_iface->batman_adv_ptype); 374 dev_remove_pack(&hard_iface->batman_adv_ptype);
376 375
377 bat_priv->num_ifaces--; 376 bat_priv->num_ifaces--;
378 orig_hash_del_if(hard_iface, bat_priv->num_ifaces); 377 batadv_orig_hash_del_if(hard_iface, bat_priv->num_ifaces);
379 378
380 primary_if = primary_if_get_selected(bat_priv); 379 primary_if = batadv_primary_if_get_selected(bat_priv);
381 if (hard_iface == primary_if) { 380 if (hard_iface == primary_if) {
382 struct hard_iface *new_if; 381 struct batadv_hard_iface *new_if;
383 382
384 new_if = hardif_get_active(hard_iface->soft_iface); 383 new_if = batadv_hardif_get_active(hard_iface->soft_iface);
385 primary_if_select(bat_priv, new_if); 384 batadv_primary_if_select(bat_priv, new_if);
386 385
387 if (new_if) 386 if (new_if)
388 hardif_free_ref(new_if); 387 batadv_hardif_free_ref(new_if);
389 } 388 }
390 389
391 bat_priv->bat_algo_ops->bat_iface_disable(hard_iface); 390 bat_priv->bat_algo_ops->bat_iface_disable(hard_iface);
392 hard_iface->if_status = IF_NOT_IN_USE; 391 hard_iface->if_status = BATADV_IF_NOT_IN_USE;
393 392
394 /* delete all references to this hard_iface */ 393 /* delete all references to this hard_iface */
395 purge_orig_ref(bat_priv); 394 batadv_purge_orig_ref(bat_priv);
396 purge_outstanding_packets(bat_priv, hard_iface); 395 batadv_purge_outstanding_packets(bat_priv, hard_iface);
397 dev_put(hard_iface->soft_iface); 396 dev_put(hard_iface->soft_iface);
398 397
399 /* nobody uses this interface anymore */ 398 /* nobody uses this interface anymore */
400 if (!bat_priv->num_ifaces) 399 if (!bat_priv->num_ifaces)
401 softif_destroy(hard_iface->soft_iface); 400 batadv_softif_destroy(hard_iface->soft_iface);
402 401
403 hard_iface->soft_iface = NULL; 402 hard_iface->soft_iface = NULL;
404 hardif_free_ref(hard_iface); 403 batadv_hardif_free_ref(hard_iface);
405 404
406out: 405out:
407 if (primary_if) 406 if (primary_if)
408 hardif_free_ref(primary_if); 407 batadv_hardif_free_ref(primary_if);
409} 408}
410 409
411static struct hard_iface *hardif_add_interface(struct net_device *net_dev) 410static struct batadv_hard_iface *
411batadv_hardif_add_interface(struct net_device *net_dev)
412{ 412{
413 struct hard_iface *hard_iface; 413 struct batadv_hard_iface *hard_iface;
414 int ret; 414 int ret;
415 415
416 ASSERT_RTNL(); 416 ASSERT_RTNL();
417 417
418 ret = is_valid_iface(net_dev); 418 ret = batadv_is_valid_iface(net_dev);
419 if (ret != 1) 419 if (ret != 1)
420 goto out; 420 goto out;
421 421
@@ -425,23 +425,22 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
425 if (!hard_iface) 425 if (!hard_iface)
426 goto release_dev; 426 goto release_dev;
427 427
428 ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev); 428 ret = batadv_sysfs_add_hardif(&hard_iface->hardif_obj, net_dev);
429 if (ret) 429 if (ret)
430 goto free_if; 430 goto free_if;
431 431
432 hard_iface->if_num = -1; 432 hard_iface->if_num = -1;
433 hard_iface->net_dev = net_dev; 433 hard_iface->net_dev = net_dev;
434 hard_iface->soft_iface = NULL; 434 hard_iface->soft_iface = NULL;
435 hard_iface->if_status = IF_NOT_IN_USE; 435 hard_iface->if_status = BATADV_IF_NOT_IN_USE;
436 INIT_LIST_HEAD(&hard_iface->list); 436 INIT_LIST_HEAD(&hard_iface->list);
437 /* extra reference for return */ 437 /* extra reference for return */
438 atomic_set(&hard_iface->refcount, 2); 438 atomic_set(&hard_iface->refcount, 2);
439 439
440 check_known_mac_addr(hard_iface->net_dev); 440 batadv_check_known_mac_addr(hard_iface->net_dev);
441 list_add_tail_rcu(&hard_iface->list, &hardif_list); 441 list_add_tail_rcu(&hard_iface->list, &batadv_hardif_list);
442 442
443 /** 443 /* This can't be called via a bat_priv callback because
444 * This can't be called via a bat_priv callback because
445 * we have no bat_priv yet. 444 * we have no bat_priv yet.
446 */ 445 */
447 atomic_set(&hard_iface->seqno, 1); 446 atomic_set(&hard_iface->seqno, 1);
@@ -457,102 +456,104 @@ out:
457 return NULL; 456 return NULL;
458} 457}
459 458
460static void hardif_remove_interface(struct hard_iface *hard_iface) 459static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface)
461{ 460{
462 ASSERT_RTNL(); 461 ASSERT_RTNL();
463 462
464 /* first deactivate interface */ 463 /* first deactivate interface */
465 if (hard_iface->if_status != IF_NOT_IN_USE) 464 if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
466 hardif_disable_interface(hard_iface); 465 batadv_hardif_disable_interface(hard_iface);
467 466
468 if (hard_iface->if_status != IF_NOT_IN_USE) 467 if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
469 return; 468 return;
470 469
471 hard_iface->if_status = IF_TO_BE_REMOVED; 470 hard_iface->if_status = BATADV_IF_TO_BE_REMOVED;
472 sysfs_del_hardif(&hard_iface->hardif_obj); 471 batadv_sysfs_del_hardif(&hard_iface->hardif_obj);
473 hardif_free_ref(hard_iface); 472 batadv_hardif_free_ref(hard_iface);
474} 473}
475 474
476void hardif_remove_interfaces(void) 475void batadv_hardif_remove_interfaces(void)
477{ 476{
478 struct hard_iface *hard_iface, *hard_iface_tmp; 477 struct batadv_hard_iface *hard_iface, *hard_iface_tmp;
479 478
480 rtnl_lock(); 479 rtnl_lock();
481 list_for_each_entry_safe(hard_iface, hard_iface_tmp, 480 list_for_each_entry_safe(hard_iface, hard_iface_tmp,
482 &hardif_list, list) { 481 &batadv_hardif_list, list) {
483 list_del_rcu(&hard_iface->list); 482 list_del_rcu(&hard_iface->list);
484 hardif_remove_interface(hard_iface); 483 batadv_hardif_remove_interface(hard_iface);
485 } 484 }
486 rtnl_unlock(); 485 rtnl_unlock();
487} 486}
488 487
489static int hard_if_event(struct notifier_block *this, 488static int batadv_hard_if_event(struct notifier_block *this,
490 unsigned long event, void *ptr) 489 unsigned long event, void *ptr)
491{ 490{
492 struct net_device *net_dev = ptr; 491 struct net_device *net_dev = ptr;
493 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); 492 struct batadv_hard_iface *hard_iface;
494 struct hard_iface *primary_if = NULL; 493 struct batadv_hard_iface *primary_if = NULL;
495 struct bat_priv *bat_priv; 494 struct batadv_priv *bat_priv;
496 495
496 hard_iface = batadv_hardif_get_by_netdev(net_dev);
497 if (!hard_iface && event == NETDEV_REGISTER) 497 if (!hard_iface && event == NETDEV_REGISTER)
498 hard_iface = hardif_add_interface(net_dev); 498 hard_iface = batadv_hardif_add_interface(net_dev);
499 499
500 if (!hard_iface) 500 if (!hard_iface)
501 goto out; 501 goto out;
502 502
503 switch (event) { 503 switch (event) {
504 case NETDEV_UP: 504 case NETDEV_UP:
505 hardif_activate_interface(hard_iface); 505 batadv_hardif_activate_interface(hard_iface);
506 break; 506 break;
507 case NETDEV_GOING_DOWN: 507 case NETDEV_GOING_DOWN:
508 case NETDEV_DOWN: 508 case NETDEV_DOWN:
509 hardif_deactivate_interface(hard_iface); 509 batadv_hardif_deactivate_interface(hard_iface);
510 break; 510 break;
511 case NETDEV_UNREGISTER: 511 case NETDEV_UNREGISTER:
512 list_del_rcu(&hard_iface->list); 512 list_del_rcu(&hard_iface->list);
513 513
514 hardif_remove_interface(hard_iface); 514 batadv_hardif_remove_interface(hard_iface);
515 break; 515 break;
516 case NETDEV_CHANGEMTU: 516 case NETDEV_CHANGEMTU:
517 if (hard_iface->soft_iface) 517 if (hard_iface->soft_iface)
518 update_min_mtu(hard_iface->soft_iface); 518 batadv_update_min_mtu(hard_iface->soft_iface);
519 break; 519 break;
520 case NETDEV_CHANGEADDR: 520 case NETDEV_CHANGEADDR:
521 if (hard_iface->if_status == IF_NOT_IN_USE) 521 if (hard_iface->if_status == BATADV_IF_NOT_IN_USE)
522 goto hardif_put; 522 goto hardif_put;
523 523
524 check_known_mac_addr(hard_iface->net_dev); 524 batadv_check_known_mac_addr(hard_iface->net_dev);
525 525
526 bat_priv = netdev_priv(hard_iface->soft_iface); 526 bat_priv = netdev_priv(hard_iface->soft_iface);
527 bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface); 527 bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface);
528 528
529 primary_if = primary_if_get_selected(bat_priv); 529 primary_if = batadv_primary_if_get_selected(bat_priv);
530 if (!primary_if) 530 if (!primary_if)
531 goto hardif_put; 531 goto hardif_put;
532 532
533 if (hard_iface == primary_if) 533 if (hard_iface == primary_if)
534 primary_if_update_addr(bat_priv, NULL); 534 batadv_primary_if_update_addr(bat_priv, NULL);
535 break; 535 break;
536 default: 536 default:
537 break; 537 break;
538 } 538 }
539 539
540hardif_put: 540hardif_put:
541 hardif_free_ref(hard_iface); 541 batadv_hardif_free_ref(hard_iface);
542out: 542out:
543 if (primary_if) 543 if (primary_if)
544 hardif_free_ref(primary_if); 544 batadv_hardif_free_ref(primary_if);
545 return NOTIFY_DONE; 545 return NOTIFY_DONE;
546} 546}
547 547
548/* This function returns true if the interface represented by ifindex is a 548/* This function returns true if the interface represented by ifindex is a
549 * 802.11 wireless device */ 549 * 802.11 wireless device
550bool is_wifi_iface(int ifindex) 550 */
551bool batadv_is_wifi_iface(int ifindex)
551{ 552{
552 struct net_device *net_device = NULL; 553 struct net_device *net_device = NULL;
553 bool ret = false; 554 bool ret = false;
554 555
555 if (ifindex == NULL_IFINDEX) 556 if (ifindex == BATADV_NULL_IFINDEX)
556 goto out; 557 goto out;
557 558
558 net_device = dev_get_by_index(&init_net, ifindex); 559 net_device = dev_get_by_index(&init_net, ifindex);
@@ -561,7 +562,8 @@ bool is_wifi_iface(int ifindex)
561 562
562#ifdef CONFIG_WIRELESS_EXT 563#ifdef CONFIG_WIRELESS_EXT
563 /* pre-cfg80211 drivers have to implement WEXT, so it is possible to 564 /* pre-cfg80211 drivers have to implement WEXT, so it is possible to
564 * check for wireless_handlers != NULL */ 565 * check for wireless_handlers != NULL
566 */
565 if (net_device->wireless_handlers) 567 if (net_device->wireless_handlers)
566 ret = true; 568 ret = true;
567 else 569 else
@@ -575,6 +577,6 @@ out:
575 return ret; 577 return ret;
576} 578}
577 579
578struct notifier_block hard_if_notifier = { 580struct notifier_block batadv_hard_if_notifier = {
579 .notifier_call = hard_if_event, 581 .notifier_call = batadv_hard_if_event,
580}; 582};
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index e68c5655e61..3732366e744 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,44 +15,44 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_HARD_INTERFACE_H_ 20#ifndef _NET_BATMAN_ADV_HARD_INTERFACE_H_
23#define _NET_BATMAN_ADV_HARD_INTERFACE_H_ 21#define _NET_BATMAN_ADV_HARD_INTERFACE_H_
24 22
25enum hard_if_state { 23enum batadv_hard_if_state {
26 IF_NOT_IN_USE, 24 BATADV_IF_NOT_IN_USE,
27 IF_TO_BE_REMOVED, 25 BATADV_IF_TO_BE_REMOVED,
28 IF_INACTIVE, 26 BATADV_IF_INACTIVE,
29 IF_ACTIVE, 27 BATADV_IF_ACTIVE,
30 IF_TO_BE_ACTIVATED, 28 BATADV_IF_TO_BE_ACTIVATED,
31 IF_I_WANT_YOU 29 BATADV_IF_I_WANT_YOU,
32}; 30};
33 31
34extern struct notifier_block hard_if_notifier; 32extern struct notifier_block batadv_hard_if_notifier;
35 33
36struct hard_iface* 34struct batadv_hard_iface*
37hardif_get_by_netdev(const struct net_device *net_dev); 35batadv_hardif_get_by_netdev(const struct net_device *net_dev);
38int hardif_enable_interface(struct hard_iface *hard_iface, 36int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
39 const char *iface_name); 37 const char *iface_name);
40void hardif_disable_interface(struct hard_iface *hard_iface); 38void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface);
41void hardif_remove_interfaces(void); 39void batadv_hardif_remove_interfaces(void);
42int hardif_min_mtu(struct net_device *soft_iface); 40int batadv_hardif_min_mtu(struct net_device *soft_iface);
43void update_min_mtu(struct net_device *soft_iface); 41void batadv_update_min_mtu(struct net_device *soft_iface);
44void hardif_free_rcu(struct rcu_head *rcu); 42void batadv_hardif_free_rcu(struct rcu_head *rcu);
45bool is_wifi_iface(int ifindex); 43bool batadv_is_wifi_iface(int ifindex);
46 44
47static inline void hardif_free_ref(struct hard_iface *hard_iface) 45static inline void
46batadv_hardif_free_ref(struct batadv_hard_iface *hard_iface)
48{ 47{
49 if (atomic_dec_and_test(&hard_iface->refcount)) 48 if (atomic_dec_and_test(&hard_iface->refcount))
50 call_rcu(&hard_iface->rcu, hardif_free_rcu); 49 call_rcu(&hard_iface->rcu, batadv_hardif_free_rcu);
51} 50}
52 51
53static inline struct hard_iface *primary_if_get_selected( 52static inline struct batadv_hard_iface *
54 struct bat_priv *bat_priv) 53batadv_primary_if_get_selected(struct batadv_priv *bat_priv)
55{ 54{
56 struct hard_iface *hard_iface; 55 struct batadv_hard_iface *hard_iface;
57 56
58 rcu_read_lock(); 57 rcu_read_lock();
59 hard_iface = rcu_dereference(bat_priv->primary_if); 58 hard_iface = rcu_dereference(bat_priv->primary_if);
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index 117687bedf2..15a849c2d41 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Simon Wunderlich, Marek Lindner 3 * Simon Wunderlich, Marek Lindner
5 * 4 *
@@ -16,25 +15,24 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
23#include "hash.h" 21#include "hash.h"
24 22
25/* clears the hash */ 23/* clears the hash */
26static void hash_init(struct hashtable_t *hash) 24static void batadv_hash_init(struct batadv_hashtable *hash)
27{ 25{
28 uint32_t i; 26 uint32_t i;
29 27
30 for (i = 0 ; i < hash->size; i++) { 28 for (i = 0; i < hash->size; i++) {
31 INIT_HLIST_HEAD(&hash->table[i]); 29 INIT_HLIST_HEAD(&hash->table[i]);
32 spin_lock_init(&hash->list_locks[i]); 30 spin_lock_init(&hash->list_locks[i]);
33 } 31 }
34} 32}
35 33
36/* free only the hashtable and the hash itself. */ 34/* free only the hashtable and the hash itself. */
37void hash_destroy(struct hashtable_t *hash) 35void batadv_hash_destroy(struct batadv_hashtable *hash)
38{ 36{
39 kfree(hash->list_locks); 37 kfree(hash->list_locks);
40 kfree(hash->table); 38 kfree(hash->table);
@@ -42,9 +40,9 @@ void hash_destroy(struct hashtable_t *hash)
42} 40}
43 41
44/* allocates and clears the hash */ 42/* allocates and clears the hash */
45struct hashtable_t *hash_new(uint32_t size) 43struct batadv_hashtable *batadv_hash_new(uint32_t size)
46{ 44{
47 struct hashtable_t *hash; 45 struct batadv_hashtable *hash;
48 46
49 hash = kmalloc(sizeof(*hash), GFP_ATOMIC); 47 hash = kmalloc(sizeof(*hash), GFP_ATOMIC);
50 if (!hash) 48 if (!hash)
@@ -60,7 +58,7 @@ struct hashtable_t *hash_new(uint32_t size)
60 goto free_table; 58 goto free_table;
61 59
62 hash->size = size; 60 hash->size = size;
63 hash_init(hash); 61 batadv_hash_init(hash);
64 return hash; 62 return hash;
65 63
66free_table: 64free_table:
@@ -69,3 +67,12 @@ free_hash:
69 kfree(hash); 67 kfree(hash);
70 return NULL; 68 return NULL;
71} 69}
70
71void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
72 struct lock_class_key *key)
73{
74 uint32_t i;
75
76 for (i = 0; i < hash->size; i++)
77 lockdep_set_class(&hash->list_locks[i], key);
78}
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index d4bd7862719..977de9c75fc 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Simon Wunderlich, Marek Lindner 3 * Simon Wunderlich, Marek Lindner
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_HASH_H_ 20#ifndef _NET_BATMAN_ADV_HASH_H_
@@ -24,35 +22,42 @@
24 22
25#include <linux/list.h> 23#include <linux/list.h>
26 24
27/* callback to a compare function. should 25/* callback to a compare function. should compare 2 element datas for their
28 * compare 2 element datas for their keys, 26 * keys, return 0 if same and not 0 if not same
29 * return 0 if same and not 0 if not 27 */
30 * same */ 28typedef int (*batadv_hashdata_compare_cb)(const struct hlist_node *,
31typedef int (*hashdata_compare_cb)(const struct hlist_node *, const void *); 29 const void *);
32 30
33/* the hashfunction, should return an index 31/* the hashfunction, should return an index
34 * based on the key in the data of the first 32 * based on the key in the data of the first
35 * argument and the size the second */ 33 * argument and the size the second
36typedef uint32_t (*hashdata_choose_cb)(const void *, uint32_t); 34 */
37typedef void (*hashdata_free_cb)(struct hlist_node *, void *); 35typedef uint32_t (*batadv_hashdata_choose_cb)(const void *, uint32_t);
36typedef void (*batadv_hashdata_free_cb)(struct hlist_node *, void *);
38 37
39struct hashtable_t { 38struct batadv_hashtable {
40 struct hlist_head *table; /* the hashtable itself with the buckets */ 39 struct hlist_head *table; /* the hashtable itself with the buckets */
41 spinlock_t *list_locks; /* spinlock for each hash list entry */ 40 spinlock_t *list_locks; /* spinlock for each hash list entry */
42 uint32_t size; /* size of hashtable */ 41 uint32_t size; /* size of hashtable */
43}; 42};
44 43
45/* allocates and clears the hash */ 44/* allocates and clears the hash */
46struct hashtable_t *hash_new(uint32_t size); 45struct batadv_hashtable *batadv_hash_new(uint32_t size);
46
47/* set class key for all locks */
48void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
49 struct lock_class_key *key);
47 50
48/* free only the hashtable and the hash itself. */ 51/* free only the hashtable and the hash itself. */
49void hash_destroy(struct hashtable_t *hash); 52void batadv_hash_destroy(struct batadv_hashtable *hash);
50 53
51/* remove the hash structure. if hashdata_free_cb != NULL, this function will be 54/* remove the hash structure. if hashdata_free_cb != NULL, this function will be
52 * called to remove the elements inside of the hash. if you don't remove the 55 * called to remove the elements inside of the hash. if you don't remove the
53 * elements, memory might be leaked. */ 56 * elements, memory might be leaked.
54static inline void hash_delete(struct hashtable_t *hash, 57 */
55 hashdata_free_cb free_cb, void *arg) 58static inline void batadv_hash_delete(struct batadv_hashtable *hash,
59 batadv_hashdata_free_cb free_cb,
60 void *arg)
56{ 61{
57 struct hlist_head *head; 62 struct hlist_head *head;
58 struct hlist_node *node, *node_tmp; 63 struct hlist_node *node, *node_tmp;
@@ -73,11 +78,11 @@ static inline void hash_delete(struct hashtable_t *hash,
73 spin_unlock_bh(list_lock); 78 spin_unlock_bh(list_lock);
74 } 79 }
75 80
76 hash_destroy(hash); 81 batadv_hash_destroy(hash);
77} 82}
78 83
79/** 84/**
80 * hash_add - adds data to the hashtable 85 * batadv_hash_add - adds data to the hashtable
81 * @hash: storage hash table 86 * @hash: storage hash table
82 * @compare: callback to determine if 2 hash elements are identical 87 * @compare: callback to determine if 2 hash elements are identical
83 * @choose: callback calculating the hash index 88 * @choose: callback calculating the hash index
@@ -87,11 +92,11 @@ static inline void hash_delete(struct hashtable_t *hash,
87 * Returns 0 on success, 1 if the element already is in the hash 92 * Returns 0 on success, 1 if the element already is in the hash
88 * and -1 on error. 93 * and -1 on error.
89 */ 94 */
90 95static inline int batadv_hash_add(struct batadv_hashtable *hash,
91static inline int hash_add(struct hashtable_t *hash, 96 batadv_hashdata_compare_cb compare,
92 hashdata_compare_cb compare, 97 batadv_hashdata_choose_cb choose,
93 hashdata_choose_cb choose, 98 const void *data,
94 const void *data, struct hlist_node *data_node) 99 struct hlist_node *data_node)
95{ 100{
96 uint32_t index; 101 uint32_t index;
97 int ret = -1; 102 int ret = -1;
@@ -106,26 +111,23 @@ static inline int hash_add(struct hashtable_t *hash,
106 head = &hash->table[index]; 111 head = &hash->table[index];
107 list_lock = &hash->list_locks[index]; 112 list_lock = &hash->list_locks[index];
108 113
109 rcu_read_lock(); 114 spin_lock_bh(list_lock);
110 __hlist_for_each_rcu(node, head) { 115
116 hlist_for_each(node, head) {
111 if (!compare(node, data)) 117 if (!compare(node, data))
112 continue; 118 continue;
113 119
114 ret = 1; 120 ret = 1;
115 goto err_unlock; 121 goto unlock;
116 } 122 }
117 rcu_read_unlock();
118 123
119 /* no duplicate found in list, add new element */ 124 /* no duplicate found in list, add new element */
120 spin_lock_bh(list_lock);
121 hlist_add_head_rcu(data_node, head); 125 hlist_add_head_rcu(data_node, head);
122 spin_unlock_bh(list_lock);
123 126
124 ret = 0; 127 ret = 0;
125 goto out;
126 128
127err_unlock: 129unlock:
128 rcu_read_unlock(); 130 spin_unlock_bh(list_lock);
129out: 131out:
130 return ret; 132 return ret;
131} 133}
@@ -133,10 +135,12 @@ out:
133/* removes data from hash, if found. returns pointer do data on success, so you 135/* removes data from hash, if found. returns pointer do data on success, so you
134 * can remove the used structure yourself, or NULL on error . data could be the 136 * can remove the used structure yourself, or NULL on error . data could be the
135 * structure you use with just the key filled, we just need the key for 137 * structure you use with just the key filled, we just need the key for
136 * comparing. */ 138 * comparing.
137static inline void *hash_remove(struct hashtable_t *hash, 139 */
138 hashdata_compare_cb compare, 140static inline void *batadv_hash_remove(struct batadv_hashtable *hash,
139 hashdata_choose_cb choose, void *data) 141 batadv_hashdata_compare_cb compare,
142 batadv_hashdata_choose_cb choose,
143 void *data)
140{ 144{
141 uint32_t index; 145 uint32_t index;
142 struct hlist_node *node; 146 struct hlist_node *node;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 2e98a57f340..bde3cf74750 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -28,21 +26,21 @@
28#include "originator.h" 26#include "originator.h"
29#include "hard-interface.h" 27#include "hard-interface.h"
30 28
31static struct socket_client *socket_client_hash[256]; 29static struct batadv_socket_client *batadv_socket_client_hash[256];
32 30
33static void bat_socket_add_packet(struct socket_client *socket_client, 31static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
34 struct icmp_packet_rr *icmp_packet, 32 struct batadv_icmp_packet_rr *icmp_packet,
35 size_t icmp_len); 33 size_t icmp_len);
36 34
37void bat_socket_init(void) 35void batadv_socket_init(void)
38{ 36{
39 memset(socket_client_hash, 0, sizeof(socket_client_hash)); 37 memset(batadv_socket_client_hash, 0, sizeof(batadv_socket_client_hash));
40} 38}
41 39
42static int bat_socket_open(struct inode *inode, struct file *file) 40static int batadv_socket_open(struct inode *inode, struct file *file)
43{ 41{
44 unsigned int i; 42 unsigned int i;
45 struct socket_client *socket_client; 43 struct batadv_socket_client *socket_client;
46 44
47 nonseekable_open(inode, file); 45 nonseekable_open(inode, file);
48 46
@@ -51,14 +49,14 @@ static int bat_socket_open(struct inode *inode, struct file *file)
51 if (!socket_client) 49 if (!socket_client)
52 return -ENOMEM; 50 return -ENOMEM;
53 51
54 for (i = 0; i < ARRAY_SIZE(socket_client_hash); i++) { 52 for (i = 0; i < ARRAY_SIZE(batadv_socket_client_hash); i++) {
55 if (!socket_client_hash[i]) { 53 if (!batadv_socket_client_hash[i]) {
56 socket_client_hash[i] = socket_client; 54 batadv_socket_client_hash[i] = socket_client;
57 break; 55 break;
58 } 56 }
59 } 57 }
60 58
61 if (i == ARRAY_SIZE(socket_client_hash)) { 59 if (i == ARRAY_SIZE(batadv_socket_client_hash)) {
62 pr_err("Error - can't add another packet client: maximum number of clients reached\n"); 60 pr_err("Error - can't add another packet client: maximum number of clients reached\n");
63 kfree(socket_client); 61 kfree(socket_client);
64 return -EXFULL; 62 return -EXFULL;
@@ -73,14 +71,14 @@ static int bat_socket_open(struct inode *inode, struct file *file)
73 71
74 file->private_data = socket_client; 72 file->private_data = socket_client;
75 73
76 inc_module_count(); 74 batadv_inc_module_count();
77 return 0; 75 return 0;
78} 76}
79 77
80static int bat_socket_release(struct inode *inode, struct file *file) 78static int batadv_socket_release(struct inode *inode, struct file *file)
81{ 79{
82 struct socket_client *socket_client = file->private_data; 80 struct batadv_socket_client *socket_client = file->private_data;
83 struct socket_packet *socket_packet; 81 struct batadv_socket_packet *socket_packet;
84 struct list_head *list_pos, *list_pos_tmp; 82 struct list_head *list_pos, *list_pos_tmp;
85 83
86 spin_lock_bh(&socket_client->lock); 84 spin_lock_bh(&socket_client->lock);
@@ -88,33 +86,33 @@ static int bat_socket_release(struct inode *inode, struct file *file)
88 /* for all packets in the queue ... */ 86 /* for all packets in the queue ... */
89 list_for_each_safe(list_pos, list_pos_tmp, &socket_client->queue_list) { 87 list_for_each_safe(list_pos, list_pos_tmp, &socket_client->queue_list) {
90 socket_packet = list_entry(list_pos, 88 socket_packet = list_entry(list_pos,
91 struct socket_packet, list); 89 struct batadv_socket_packet, list);
92 90
93 list_del(list_pos); 91 list_del(list_pos);
94 kfree(socket_packet); 92 kfree(socket_packet);
95 } 93 }
96 94
97 socket_client_hash[socket_client->index] = NULL; 95 batadv_socket_client_hash[socket_client->index] = NULL;
98 spin_unlock_bh(&socket_client->lock); 96 spin_unlock_bh(&socket_client->lock);
99 97
100 kfree(socket_client); 98 kfree(socket_client);
101 dec_module_count(); 99 batadv_dec_module_count();
102 100
103 return 0; 101 return 0;
104} 102}
105 103
106static ssize_t bat_socket_read(struct file *file, char __user *buf, 104static ssize_t batadv_socket_read(struct file *file, char __user *buf,
107 size_t count, loff_t *ppos) 105 size_t count, loff_t *ppos)
108{ 106{
109 struct socket_client *socket_client = file->private_data; 107 struct batadv_socket_client *socket_client = file->private_data;
110 struct socket_packet *socket_packet; 108 struct batadv_socket_packet *socket_packet;
111 size_t packet_len; 109 size_t packet_len;
112 int error; 110 int error;
113 111
114 if ((file->f_flags & O_NONBLOCK) && (socket_client->queue_len == 0)) 112 if ((file->f_flags & O_NONBLOCK) && (socket_client->queue_len == 0))
115 return -EAGAIN; 113 return -EAGAIN;
116 114
117 if ((!buf) || (count < sizeof(struct icmp_packet))) 115 if ((!buf) || (count < sizeof(struct batadv_icmp_packet)))
118 return -EINVAL; 116 return -EINVAL;
119 117
120 if (!access_ok(VERIFY_WRITE, buf, count)) 118 if (!access_ok(VERIFY_WRITE, buf, count))
@@ -129,7 +127,7 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf,
129 spin_lock_bh(&socket_client->lock); 127 spin_lock_bh(&socket_client->lock);
130 128
131 socket_packet = list_first_entry(&socket_client->queue_list, 129 socket_packet = list_first_entry(&socket_client->queue_list,
132 struct socket_packet, list); 130 struct batadv_socket_packet, list);
133 list_del(&socket_packet->list); 131 list_del(&socket_packet->list);
134 socket_client->queue_len--; 132 socket_client->queue_len--;
135 133
@@ -146,34 +144,34 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf,
146 return packet_len; 144 return packet_len;
147} 145}
148 146
149static ssize_t bat_socket_write(struct file *file, const char __user *buff, 147static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
150 size_t len, loff_t *off) 148 size_t len, loff_t *off)
151{ 149{
152 struct socket_client *socket_client = file->private_data; 150 struct batadv_socket_client *socket_client = file->private_data;
153 struct bat_priv *bat_priv = socket_client->bat_priv; 151 struct batadv_priv *bat_priv = socket_client->bat_priv;
154 struct hard_iface *primary_if = NULL; 152 struct batadv_hard_iface *primary_if = NULL;
155 struct sk_buff *skb; 153 struct sk_buff *skb;
156 struct icmp_packet_rr *icmp_packet; 154 struct batadv_icmp_packet_rr *icmp_packet;
157 155
158 struct orig_node *orig_node = NULL; 156 struct batadv_orig_node *orig_node = NULL;
159 struct neigh_node *neigh_node = NULL; 157 struct batadv_neigh_node *neigh_node = NULL;
160 size_t packet_len = sizeof(struct icmp_packet); 158 size_t packet_len = sizeof(struct batadv_icmp_packet);
161 159
162 if (len < sizeof(struct icmp_packet)) { 160 if (len < sizeof(struct batadv_icmp_packet)) {
163 bat_dbg(DBG_BATMAN, bat_priv, 161 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
164 "Error - can't send packet from char device: invalid packet size\n"); 162 "Error - can't send packet from char device: invalid packet size\n");
165 return -EINVAL; 163 return -EINVAL;
166 } 164 }
167 165
168 primary_if = primary_if_get_selected(bat_priv); 166 primary_if = batadv_primary_if_get_selected(bat_priv);
169 167
170 if (!primary_if) { 168 if (!primary_if) {
171 len = -EFAULT; 169 len = -EFAULT;
172 goto out; 170 goto out;
173 } 171 }
174 172
175 if (len >= sizeof(struct icmp_packet_rr)) 173 if (len >= sizeof(struct batadv_icmp_packet_rr))
176 packet_len = sizeof(struct icmp_packet_rr); 174 packet_len = sizeof(struct batadv_icmp_packet_rr);
177 175
178 skb = dev_alloc_skb(packet_len + ETH_HLEN); 176 skb = dev_alloc_skb(packet_len + ETH_HLEN);
179 if (!skb) { 177 if (!skb) {
@@ -182,81 +180,82 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
182 } 180 }
183 181
184 skb_reserve(skb, ETH_HLEN); 182 skb_reserve(skb, ETH_HLEN);
185 icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len); 183 icmp_packet = (struct batadv_icmp_packet_rr *)skb_put(skb, packet_len);
186 184
187 if (copy_from_user(icmp_packet, buff, packet_len)) { 185 if (copy_from_user(icmp_packet, buff, packet_len)) {
188 len = -EFAULT; 186 len = -EFAULT;
189 goto free_skb; 187 goto free_skb;
190 } 188 }
191 189
192 if (icmp_packet->header.packet_type != BAT_ICMP) { 190 if (icmp_packet->header.packet_type != BATADV_ICMP) {
193 bat_dbg(DBG_BATMAN, bat_priv, 191 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
194 "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n"); 192 "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
195 len = -EINVAL; 193 len = -EINVAL;
196 goto free_skb; 194 goto free_skb;
197 } 195 }
198 196
199 if (icmp_packet->msg_type != ECHO_REQUEST) { 197 if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
200 bat_dbg(DBG_BATMAN, bat_priv, 198 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
201 "Error - can't send packet from char device: got bogus message type (expected: ECHO_REQUEST)\n"); 199 "Error - can't send packet from char device: got bogus message type (expected: ECHO_REQUEST)\n");
202 len = -EINVAL; 200 len = -EINVAL;
203 goto free_skb; 201 goto free_skb;
204 } 202 }
205 203
206 icmp_packet->uid = socket_client->index; 204 icmp_packet->uid = socket_client->index;
207 205
208 if (icmp_packet->header.version != COMPAT_VERSION) { 206 if (icmp_packet->header.version != BATADV_COMPAT_VERSION) {
209 icmp_packet->msg_type = PARAMETER_PROBLEM; 207 icmp_packet->msg_type = BATADV_PARAMETER_PROBLEM;
210 icmp_packet->header.version = COMPAT_VERSION; 208 icmp_packet->header.version = BATADV_COMPAT_VERSION;
211 bat_socket_add_packet(socket_client, icmp_packet, packet_len); 209 batadv_socket_add_packet(socket_client, icmp_packet,
210 packet_len);
212 goto free_skb; 211 goto free_skb;
213 } 212 }
214 213
215 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) 214 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
216 goto dst_unreach; 215 goto dst_unreach;
217 216
218 orig_node = orig_hash_find(bat_priv, icmp_packet->dst); 217 orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->dst);
219 if (!orig_node) 218 if (!orig_node)
220 goto dst_unreach; 219 goto dst_unreach;
221 220
222 neigh_node = orig_node_get_router(orig_node); 221 neigh_node = batadv_orig_node_get_router(orig_node);
223 if (!neigh_node) 222 if (!neigh_node)
224 goto dst_unreach; 223 goto dst_unreach;
225 224
226 if (!neigh_node->if_incoming) 225 if (!neigh_node->if_incoming)
227 goto dst_unreach; 226 goto dst_unreach;
228 227
229 if (neigh_node->if_incoming->if_status != IF_ACTIVE) 228 if (neigh_node->if_incoming->if_status != BATADV_IF_ACTIVE)
230 goto dst_unreach; 229 goto dst_unreach;
231 230
232 memcpy(icmp_packet->orig, 231 memcpy(icmp_packet->orig,
233 primary_if->net_dev->dev_addr, ETH_ALEN); 232 primary_if->net_dev->dev_addr, ETH_ALEN);
234 233
235 if (packet_len == sizeof(struct icmp_packet_rr)) 234 if (packet_len == sizeof(struct batadv_icmp_packet_rr))
236 memcpy(icmp_packet->rr, 235 memcpy(icmp_packet->rr,
237 neigh_node->if_incoming->net_dev->dev_addr, ETH_ALEN); 236 neigh_node->if_incoming->net_dev->dev_addr, ETH_ALEN);
238 237
239 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 238 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
240 goto out; 239 goto out;
241 240
242dst_unreach: 241dst_unreach:
243 icmp_packet->msg_type = DESTINATION_UNREACHABLE; 242 icmp_packet->msg_type = BATADV_DESTINATION_UNREACHABLE;
244 bat_socket_add_packet(socket_client, icmp_packet, packet_len); 243 batadv_socket_add_packet(socket_client, icmp_packet, packet_len);
245free_skb: 244free_skb:
246 kfree_skb(skb); 245 kfree_skb(skb);
247out: 246out:
248 if (primary_if) 247 if (primary_if)
249 hardif_free_ref(primary_if); 248 batadv_hardif_free_ref(primary_if);
250 if (neigh_node) 249 if (neigh_node)
251 neigh_node_free_ref(neigh_node); 250 batadv_neigh_node_free_ref(neigh_node);
252 if (orig_node) 251 if (orig_node)
253 orig_node_free_ref(orig_node); 252 batadv_orig_node_free_ref(orig_node);
254 return len; 253 return len;
255} 254}
256 255
257static unsigned int bat_socket_poll(struct file *file, poll_table *wait) 256static unsigned int batadv_socket_poll(struct file *file, poll_table *wait)
258{ 257{
259 struct socket_client *socket_client = file->private_data; 258 struct batadv_socket_client *socket_client = file->private_data;
260 259
261 poll_wait(file, &socket_client->queue_wait, wait); 260 poll_wait(file, &socket_client->queue_wait, wait);
262 261
@@ -266,39 +265,39 @@ static unsigned int bat_socket_poll(struct file *file, poll_table *wait)
266 return 0; 265 return 0;
267} 266}
268 267
269static const struct file_operations fops = { 268static const struct file_operations batadv_fops = {
270 .owner = THIS_MODULE, 269 .owner = THIS_MODULE,
271 .open = bat_socket_open, 270 .open = batadv_socket_open,
272 .release = bat_socket_release, 271 .release = batadv_socket_release,
273 .read = bat_socket_read, 272 .read = batadv_socket_read,
274 .write = bat_socket_write, 273 .write = batadv_socket_write,
275 .poll = bat_socket_poll, 274 .poll = batadv_socket_poll,
276 .llseek = no_llseek, 275 .llseek = no_llseek,
277}; 276};
278 277
279int bat_socket_setup(struct bat_priv *bat_priv) 278int batadv_socket_setup(struct batadv_priv *bat_priv)
280{ 279{
281 struct dentry *d; 280 struct dentry *d;
282 281
283 if (!bat_priv->debug_dir) 282 if (!bat_priv->debug_dir)
284 goto err; 283 goto err;
285 284
286 d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR, 285 d = debugfs_create_file(BATADV_ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR,
287 bat_priv->debug_dir, bat_priv, &fops); 286 bat_priv->debug_dir, bat_priv, &batadv_fops);
288 if (d) 287 if (!d)
289 goto err; 288 goto err;
290 289
291 return 0; 290 return 0;
292 291
293err: 292err:
294 return 1; 293 return -ENOMEM;
295} 294}
296 295
297static void bat_socket_add_packet(struct socket_client *socket_client, 296static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
298 struct icmp_packet_rr *icmp_packet, 297 struct batadv_icmp_packet_rr *icmp_packet,
299 size_t icmp_len) 298 size_t icmp_len)
300{ 299{
301 struct socket_packet *socket_packet; 300 struct batadv_socket_packet *socket_packet;
302 301
303 socket_packet = kmalloc(sizeof(*socket_packet), GFP_ATOMIC); 302 socket_packet = kmalloc(sizeof(*socket_packet), GFP_ATOMIC);
304 303
@@ -312,8 +311,9 @@ static void bat_socket_add_packet(struct socket_client *socket_client,
312 spin_lock_bh(&socket_client->lock); 311 spin_lock_bh(&socket_client->lock);
313 312
314 /* while waiting for the lock the socket_client could have been 313 /* while waiting for the lock the socket_client could have been
315 * deleted */ 314 * deleted
316 if (!socket_client_hash[icmp_packet->uid]) { 315 */
316 if (!batadv_socket_client_hash[icmp_packet->uid]) {
317 spin_unlock_bh(&socket_client->lock); 317 spin_unlock_bh(&socket_client->lock);
318 kfree(socket_packet); 318 kfree(socket_packet);
319 return; 319 return;
@@ -324,7 +324,8 @@ static void bat_socket_add_packet(struct socket_client *socket_client,
324 324
325 if (socket_client->queue_len > 100) { 325 if (socket_client->queue_len > 100) {
326 socket_packet = list_first_entry(&socket_client->queue_list, 326 socket_packet = list_first_entry(&socket_client->queue_list,
327 struct socket_packet, list); 327 struct batadv_socket_packet,
328 list);
328 329
329 list_del(&socket_packet->list); 330 list_del(&socket_packet->list);
330 kfree(socket_packet); 331 kfree(socket_packet);
@@ -336,11 +337,12 @@ static void bat_socket_add_packet(struct socket_client *socket_client,
336 wake_up(&socket_client->queue_wait); 337 wake_up(&socket_client->queue_wait);
337} 338}
338 339
339void bat_socket_receive_packet(struct icmp_packet_rr *icmp_packet, 340void batadv_socket_receive_packet(struct batadv_icmp_packet_rr *icmp_packet,
340 size_t icmp_len) 341 size_t icmp_len)
341{ 342{
342 struct socket_client *hash = socket_client_hash[icmp_packet->uid]; 343 struct batadv_socket_client *hash;
343 344
345 hash = batadv_socket_client_hash[icmp_packet->uid];
344 if (hash) 346 if (hash)
345 bat_socket_add_packet(hash, icmp_packet, icmp_len); 347 batadv_socket_add_packet(hash, icmp_packet, icmp_len);
346} 348}
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index 380ed4c2443..29443a1dbb5 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,17 +15,16 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_ICMP_SOCKET_H_ 20#ifndef _NET_BATMAN_ADV_ICMP_SOCKET_H_
23#define _NET_BATMAN_ADV_ICMP_SOCKET_H_ 21#define _NET_BATMAN_ADV_ICMP_SOCKET_H_
24 22
25#define ICMP_SOCKET "socket" 23#define BATADV_ICMP_SOCKET "socket"
26 24
27void bat_socket_init(void); 25void batadv_socket_init(void);
28int bat_socket_setup(struct bat_priv *bat_priv); 26int batadv_socket_setup(struct batadv_priv *bat_priv);
29void bat_socket_receive_packet(struct icmp_packet_rr *icmp_packet, 27void batadv_socket_receive_packet(struct batadv_icmp_packet_rr *icmp_packet,
30 size_t icmp_len); 28 size_t icmp_len);
31 29
32#endif /* _NET_BATMAN_ADV_ICMP_SOCKET_H_ */ 30#endif /* _NET_BATMAN_ADV_ICMP_SOCKET_H_ */
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 083a2993efe..13c88b25ab3 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,12 +15,11 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
23#include "bat_sysfs.h" 21#include "sysfs.h"
24#include "bat_debugfs.h" 22#include "debugfs.h"
25#include "routing.h" 23#include "routing.h"
26#include "send.h" 24#include "send.h"
27#include "originator.h" 25#include "originator.h"
@@ -37,61 +35,65 @@
37 35
38 36
39/* List manipulations on hardif_list have to be rtnl_lock()'ed, 37/* List manipulations on hardif_list have to be rtnl_lock()'ed,
40 * list traversals just rcu-locked */ 38 * list traversals just rcu-locked
41struct list_head hardif_list; 39 */
42static int (*recv_packet_handler[256])(struct sk_buff *, struct hard_iface *); 40struct list_head batadv_hardif_list;
43char bat_routing_algo[20] = "BATMAN IV"; 41static int (*batadv_rx_handler[256])(struct sk_buff *,
44static struct hlist_head bat_algo_list; 42 struct batadv_hard_iface *);
43char batadv_routing_algo[20] = "BATMAN_IV";
44static struct hlist_head batadv_algo_list;
45 45
46unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 46unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
47 47
48struct workqueue_struct *bat_event_workqueue; 48struct workqueue_struct *batadv_event_workqueue;
49 49
50static void recv_handler_init(void); 50static void batadv_recv_handler_init(void);
51 51
52static int __init batman_init(void) 52static int __init batadv_init(void)
53{ 53{
54 INIT_LIST_HEAD(&hardif_list); 54 INIT_LIST_HEAD(&batadv_hardif_list);
55 INIT_HLIST_HEAD(&bat_algo_list); 55 INIT_HLIST_HEAD(&batadv_algo_list);
56 56
57 recv_handler_init(); 57 batadv_recv_handler_init();
58 58
59 bat_iv_init(); 59 batadv_iv_init();
60 60
61 /* the name should not be longer than 10 chars - see 61 /* the name should not be longer than 10 chars - see
62 * http://lwn.net/Articles/23634/ */ 62 * http://lwn.net/Articles/23634/
63 bat_event_workqueue = create_singlethread_workqueue("bat_events"); 63 */
64 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
64 65
65 if (!bat_event_workqueue) 66 if (!batadv_event_workqueue)
66 return -ENOMEM; 67 return -ENOMEM;
67 68
68 bat_socket_init(); 69 batadv_socket_init();
69 debugfs_init(); 70 batadv_debugfs_init();
70 71
71 register_netdevice_notifier(&hard_if_notifier); 72 register_netdevice_notifier(&batadv_hard_if_notifier);
72 73
73 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n", 74 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
74 SOURCE_VERSION, COMPAT_VERSION); 75 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
75 76
76 return 0; 77 return 0;
77} 78}
78 79
79static void __exit batman_exit(void) 80static void __exit batadv_exit(void)
80{ 81{
81 debugfs_destroy(); 82 batadv_debugfs_destroy();
82 unregister_netdevice_notifier(&hard_if_notifier); 83 unregister_netdevice_notifier(&batadv_hard_if_notifier);
83 hardif_remove_interfaces(); 84 batadv_hardif_remove_interfaces();
84 85
85 flush_workqueue(bat_event_workqueue); 86 flush_workqueue(batadv_event_workqueue);
86 destroy_workqueue(bat_event_workqueue); 87 destroy_workqueue(batadv_event_workqueue);
87 bat_event_workqueue = NULL; 88 batadv_event_workqueue = NULL;
88 89
89 rcu_barrier(); 90 rcu_barrier();
90} 91}
91 92
92int mesh_init(struct net_device *soft_iface) 93int batadv_mesh_init(struct net_device *soft_iface)
93{ 94{
94 struct bat_priv *bat_priv = netdev_priv(soft_iface); 95 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
96 int ret;
95 97
96 spin_lock_init(&bat_priv->forw_bat_list_lock); 98 spin_lock_init(&bat_priv->forw_bat_list_lock);
97 spin_lock_init(&bat_priv->forw_bcast_list_lock); 99 spin_lock_init(&bat_priv->forw_bcast_list_lock);
@@ -110,72 +112,77 @@ int mesh_init(struct net_device *soft_iface)
110 INIT_LIST_HEAD(&bat_priv->tt_req_list); 112 INIT_LIST_HEAD(&bat_priv->tt_req_list);
111 INIT_LIST_HEAD(&bat_priv->tt_roam_list); 113 INIT_LIST_HEAD(&bat_priv->tt_roam_list);
112 114
113 if (originator_init(bat_priv) < 1) 115 ret = batadv_originator_init(bat_priv);
116 if (ret < 0)
114 goto err; 117 goto err;
115 118
116 if (tt_init(bat_priv) < 1) 119 ret = batadv_tt_init(bat_priv);
120 if (ret < 0)
117 goto err; 121 goto err;
118 122
119 tt_local_add(soft_iface, soft_iface->dev_addr, NULL_IFINDEX); 123 batadv_tt_local_add(soft_iface, soft_iface->dev_addr,
124 BATADV_NULL_IFINDEX);
120 125
121 if (vis_init(bat_priv) < 1) 126 ret = batadv_vis_init(bat_priv);
127 if (ret < 0)
122 goto err; 128 goto err;
123 129
124 if (bla_init(bat_priv) < 1) 130 ret = batadv_bla_init(bat_priv);
131 if (ret < 0)
125 goto err; 132 goto err;
126 133
127 atomic_set(&bat_priv->gw_reselect, 0); 134 atomic_set(&bat_priv->gw_reselect, 0);
128 atomic_set(&bat_priv->mesh_state, MESH_ACTIVE); 135 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
129 goto end;
130
131err:
132 mesh_free(soft_iface);
133 return -1;
134 136
135end:
136 return 0; 137 return 0;
138
139err:
140 batadv_mesh_free(soft_iface);
141 return ret;
137} 142}
138 143
139void mesh_free(struct net_device *soft_iface) 144void batadv_mesh_free(struct net_device *soft_iface)
140{ 145{
141 struct bat_priv *bat_priv = netdev_priv(soft_iface); 146 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
142 147
143 atomic_set(&bat_priv->mesh_state, MESH_DEACTIVATING); 148 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
144 149
145 purge_outstanding_packets(bat_priv, NULL); 150 batadv_purge_outstanding_packets(bat_priv, NULL);
146 151
147 vis_quit(bat_priv); 152 batadv_vis_quit(bat_priv);
148 153
149 gw_node_purge(bat_priv); 154 batadv_gw_node_purge(bat_priv);
150 originator_free(bat_priv); 155 batadv_originator_free(bat_priv);
151 156
152 tt_free(bat_priv); 157 batadv_tt_free(bat_priv);
153 158
154 bla_free(bat_priv); 159 batadv_bla_free(bat_priv);
155 160
156 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); 161 free_percpu(bat_priv->bat_counters);
162
163 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
157} 164}
158 165
159void inc_module_count(void) 166void batadv_inc_module_count(void)
160{ 167{
161 try_module_get(THIS_MODULE); 168 try_module_get(THIS_MODULE);
162} 169}
163 170
164void dec_module_count(void) 171void batadv_dec_module_count(void)
165{ 172{
166 module_put(THIS_MODULE); 173 module_put(THIS_MODULE);
167} 174}
168 175
169int is_my_mac(const uint8_t *addr) 176int batadv_is_my_mac(const uint8_t *addr)
170{ 177{
171 const struct hard_iface *hard_iface; 178 const struct batadv_hard_iface *hard_iface;
172 179
173 rcu_read_lock(); 180 rcu_read_lock();
174 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 181 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
175 if (hard_iface->if_status != IF_ACTIVE) 182 if (hard_iface->if_status != BATADV_IF_ACTIVE)
176 continue; 183 continue;
177 184
178 if (compare_eth(hard_iface->net_dev->dev_addr, addr)) { 185 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
179 rcu_read_unlock(); 186 rcu_read_unlock();
180 return 1; 187 return 1;
181 } 188 }
@@ -184,8 +191,8 @@ int is_my_mac(const uint8_t *addr)
184 return 0; 191 return 0;
185} 192}
186 193
187static int recv_unhandled_packet(struct sk_buff *skb, 194static int batadv_recv_unhandled_packet(struct sk_buff *skb,
188 struct hard_iface *recv_if) 195 struct batadv_hard_iface *recv_if)
189{ 196{
190 return NET_RX_DROP; 197 return NET_RX_DROP;
191} 198}
@@ -193,16 +200,18 @@ static int recv_unhandled_packet(struct sk_buff *skb,
193/* incoming packets with the batman ethertype received on any active hard 200/* incoming packets with the batman ethertype received on any active hard
194 * interface 201 * interface
195 */ 202 */
196int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, 203int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
197 struct packet_type *ptype, struct net_device *orig_dev) 204 struct packet_type *ptype,
205 struct net_device *orig_dev)
198{ 206{
199 struct bat_priv *bat_priv; 207 struct batadv_priv *bat_priv;
200 struct batman_ogm_packet *batman_ogm_packet; 208 struct batadv_ogm_packet *batadv_ogm_packet;
201 struct hard_iface *hard_iface; 209 struct batadv_hard_iface *hard_iface;
202 uint8_t idx; 210 uint8_t idx;
203 int ret; 211 int ret;
204 212
205 hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype); 213 hard_iface = container_of(ptype, struct batadv_hard_iface,
214 batman_adv_ptype);
206 skb = skb_share_check(skb, GFP_ATOMIC); 215 skb = skb_share_check(skb, GFP_ATOMIC);
207 216
208 /* skb was released by skb_share_check() */ 217 /* skb was released by skb_share_check() */
@@ -222,27 +231,27 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
222 231
223 bat_priv = netdev_priv(hard_iface->soft_iface); 232 bat_priv = netdev_priv(hard_iface->soft_iface);
224 233
225 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) 234 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
226 goto err_free; 235 goto err_free;
227 236
228 /* discard frames on not active interfaces */ 237 /* discard frames on not active interfaces */
229 if (hard_iface->if_status != IF_ACTIVE) 238 if (hard_iface->if_status != BATADV_IF_ACTIVE)
230 goto err_free; 239 goto err_free;
231 240
232 batman_ogm_packet = (struct batman_ogm_packet *)skb->data; 241 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
233 242
234 if (batman_ogm_packet->header.version != COMPAT_VERSION) { 243 if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
235 bat_dbg(DBG_BATMAN, bat_priv, 244 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
236 "Drop packet: incompatible batman version (%i)\n", 245 "Drop packet: incompatible batman version (%i)\n",
237 batman_ogm_packet->header.version); 246 batadv_ogm_packet->header.version);
238 goto err_free; 247 goto err_free;
239 } 248 }
240 249
241 /* all receive handlers return whether they received or reused 250 /* all receive handlers return whether they received or reused
242 * the supplied skb. if not, we have to free the skb. 251 * the supplied skb. if not, we have to free the skb.
243 */ 252 */
244 idx = batman_ogm_packet->header.packet_type; 253 idx = batadv_ogm_packet->header.packet_type;
245 ret = (*recv_packet_handler[idx])(skb, hard_iface); 254 ret = (*batadv_rx_handler[idx])(skb, hard_iface);
246 255
247 if (ret == NET_RX_DROP) 256 if (ret == NET_RX_DROP)
248 kfree_skb(skb); 257 kfree_skb(skb);
@@ -259,51 +268,52 @@ err_out:
259 return NET_RX_DROP; 268 return NET_RX_DROP;
260} 269}
261 270
262static void recv_handler_init(void) 271static void batadv_recv_handler_init(void)
263{ 272{
264 int i; 273 int i;
265 274
266 for (i = 0; i < ARRAY_SIZE(recv_packet_handler); i++) 275 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
267 recv_packet_handler[i] = recv_unhandled_packet; 276 batadv_rx_handler[i] = batadv_recv_unhandled_packet;
268 277
269 /* batman icmp packet */ 278 /* batman icmp packet */
270 recv_packet_handler[BAT_ICMP] = recv_icmp_packet; 279 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
271 /* unicast packet */ 280 /* unicast packet */
272 recv_packet_handler[BAT_UNICAST] = recv_unicast_packet; 281 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
273 /* fragmented unicast packet */ 282 /* fragmented unicast packet */
274 recv_packet_handler[BAT_UNICAST_FRAG] = recv_ucast_frag_packet; 283 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_ucast_frag_packet;
275 /* broadcast packet */ 284 /* broadcast packet */
276 recv_packet_handler[BAT_BCAST] = recv_bcast_packet; 285 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
277 /* vis packet */ 286 /* vis packet */
278 recv_packet_handler[BAT_VIS] = recv_vis_packet; 287 batadv_rx_handler[BATADV_VIS] = batadv_recv_vis_packet;
279 /* Translation table query (request or response) */ 288 /* Translation table query (request or response) */
280 recv_packet_handler[BAT_TT_QUERY] = recv_tt_query; 289 batadv_rx_handler[BATADV_TT_QUERY] = batadv_recv_tt_query;
281 /* Roaming advertisement */ 290 /* Roaming advertisement */
282 recv_packet_handler[BAT_ROAM_ADV] = recv_roam_adv; 291 batadv_rx_handler[BATADV_ROAM_ADV] = batadv_recv_roam_adv;
283} 292}
284 293
285int recv_handler_register(uint8_t packet_type, 294int
286 int (*recv_handler)(struct sk_buff *, 295batadv_recv_handler_register(uint8_t packet_type,
287 struct hard_iface *)) 296 int (*recv_handler)(struct sk_buff *,
297 struct batadv_hard_iface *))
288{ 298{
289 if (recv_packet_handler[packet_type] != &recv_unhandled_packet) 299 if (batadv_rx_handler[packet_type] != &batadv_recv_unhandled_packet)
290 return -EBUSY; 300 return -EBUSY;
291 301
292 recv_packet_handler[packet_type] = recv_handler; 302 batadv_rx_handler[packet_type] = recv_handler;
293 return 0; 303 return 0;
294} 304}
295 305
296void recv_handler_unregister(uint8_t packet_type) 306void batadv_recv_handler_unregister(uint8_t packet_type)
297{ 307{
298 recv_packet_handler[packet_type] = recv_unhandled_packet; 308 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
299} 309}
300 310
301static struct bat_algo_ops *bat_algo_get(char *name) 311static struct batadv_algo_ops *batadv_algo_get(char *name)
302{ 312{
303 struct bat_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp; 313 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
304 struct hlist_node *node; 314 struct hlist_node *node;
305 315
306 hlist_for_each_entry(bat_algo_ops_tmp, node, &bat_algo_list, list) { 316 hlist_for_each_entry(bat_algo_ops_tmp, node, &batadv_algo_list, list) {
307 if (strcmp(bat_algo_ops_tmp->name, name) != 0) 317 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
308 continue; 318 continue;
309 319
@@ -314,15 +324,16 @@ static struct bat_algo_ops *bat_algo_get(char *name)
314 return bat_algo_ops; 324 return bat_algo_ops;
315} 325}
316 326
317int bat_algo_register(struct bat_algo_ops *bat_algo_ops) 327int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
318{ 328{
319 struct bat_algo_ops *bat_algo_ops_tmp; 329 struct batadv_algo_ops *bat_algo_ops_tmp;
320 int ret = -1; 330 int ret;
321 331
322 bat_algo_ops_tmp = bat_algo_get(bat_algo_ops->name); 332 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
323 if (bat_algo_ops_tmp) { 333 if (bat_algo_ops_tmp) {
324 pr_info("Trying to register already registered routing algorithm: %s\n", 334 pr_info("Trying to register already registered routing algorithm: %s\n",
325 bat_algo_ops->name); 335 bat_algo_ops->name);
336 ret = -EEXIST;
326 goto out; 337 goto out;
327 } 338 }
328 339
@@ -335,23 +346,24 @@ int bat_algo_register(struct bat_algo_ops *bat_algo_ops)
335 !bat_algo_ops->bat_ogm_emit) { 346 !bat_algo_ops->bat_ogm_emit) {
336 pr_info("Routing algo '%s' does not implement required ops\n", 347 pr_info("Routing algo '%s' does not implement required ops\n",
337 bat_algo_ops->name); 348 bat_algo_ops->name);
349 ret = -EINVAL;
338 goto out; 350 goto out;
339 } 351 }
340 352
341 INIT_HLIST_NODE(&bat_algo_ops->list); 353 INIT_HLIST_NODE(&bat_algo_ops->list);
342 hlist_add_head(&bat_algo_ops->list, &bat_algo_list); 354 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
343 ret = 0; 355 ret = 0;
344 356
345out: 357out:
346 return ret; 358 return ret;
347} 359}
348 360
349int bat_algo_select(struct bat_priv *bat_priv, char *name) 361int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
350{ 362{
351 struct bat_algo_ops *bat_algo_ops; 363 struct batadv_algo_ops *bat_algo_ops;
352 int ret = -1; 364 int ret = -EINVAL;
353 365
354 bat_algo_ops = bat_algo_get(name); 366 bat_algo_ops = batadv_algo_get(name);
355 if (!bat_algo_ops) 367 if (!bat_algo_ops)
356 goto out; 368 goto out;
357 369
@@ -362,50 +374,56 @@ out:
362 return ret; 374 return ret;
363} 375}
364 376
365int bat_algo_seq_print_text(struct seq_file *seq, void *offset) 377int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
366{ 378{
367 struct bat_algo_ops *bat_algo_ops; 379 struct batadv_algo_ops *bat_algo_ops;
368 struct hlist_node *node; 380 struct hlist_node *node;
369 381
370 seq_printf(seq, "Available routing algorithms:\n"); 382 seq_printf(seq, "Available routing algorithms:\n");
371 383
372 hlist_for_each_entry(bat_algo_ops, node, &bat_algo_list, list) { 384 hlist_for_each_entry(bat_algo_ops, node, &batadv_algo_list, list) {
373 seq_printf(seq, "%s\n", bat_algo_ops->name); 385 seq_printf(seq, "%s\n", bat_algo_ops->name);
374 } 386 }
375 387
376 return 0; 388 return 0;
377} 389}
378 390
379static int param_set_ra(const char *val, const struct kernel_param *kp) 391static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
380{ 392{
381 struct bat_algo_ops *bat_algo_ops; 393 struct batadv_algo_ops *bat_algo_ops;
394 char *algo_name = (char *)val;
395 size_t name_len = strlen(algo_name);
396
397 if (algo_name[name_len - 1] == '\n')
398 algo_name[name_len - 1] = '\0';
382 399
383 bat_algo_ops = bat_algo_get((char *)val); 400 bat_algo_ops = batadv_algo_get(algo_name);
384 if (!bat_algo_ops) { 401 if (!bat_algo_ops) {
385 pr_err("Routing algorithm '%s' is not supported\n", val); 402 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
386 return -EINVAL; 403 return -EINVAL;
387 } 404 }
388 405
389 return param_set_copystring(val, kp); 406 return param_set_copystring(algo_name, kp);
390} 407}
391 408
392static const struct kernel_param_ops param_ops_ra = { 409static const struct kernel_param_ops batadv_param_ops_ra = {
393 .set = param_set_ra, 410 .set = batadv_param_set_ra,
394 .get = param_get_string, 411 .get = param_get_string,
395}; 412};
396 413
397static struct kparam_string __param_string_ra = { 414static struct kparam_string batadv_param_string_ra = {
398 .maxlen = sizeof(bat_routing_algo), 415 .maxlen = sizeof(batadv_routing_algo),
399 .string = bat_routing_algo, 416 .string = batadv_routing_algo,
400}; 417};
401 418
402module_param_cb(routing_algo, &param_ops_ra, &__param_string_ra, 0644); 419module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
403module_init(batman_init); 420 0644);
404module_exit(batman_exit); 421module_init(batadv_init);
422module_exit(batadv_exit);
405 423
406MODULE_LICENSE("GPL"); 424MODULE_LICENSE("GPL");
407 425
408MODULE_AUTHOR(DRIVER_AUTHOR); 426MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
409MODULE_DESCRIPTION(DRIVER_DESC); 427MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
410MODULE_SUPPORTED_DEVICE(DRIVER_DEVICE); 428MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
411MODULE_VERSION(SOURCE_VERSION); 429MODULE_VERSION(BATADV_SOURCE_VERSION);
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index f4a3ec00347..5d8fa075794 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,100 +15,106 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_MAIN_H_ 20#ifndef _NET_BATMAN_ADV_MAIN_H_
23#define _NET_BATMAN_ADV_MAIN_H_ 21#define _NET_BATMAN_ADV_MAIN_H_
24 22
25#define DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \ 23#define BATADV_DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \
26 "Simon Wunderlich <siwu@hrz.tu-chemnitz.de>" 24 "Simon Wunderlich <siwu@hrz.tu-chemnitz.de>"
27#define DRIVER_DESC "B.A.T.M.A.N. advanced" 25#define BATADV_DRIVER_DESC "B.A.T.M.A.N. advanced"
28#define DRIVER_DEVICE "batman-adv" 26#define BATADV_DRIVER_DEVICE "batman-adv"
29 27
30#ifndef SOURCE_VERSION 28#ifndef BATADV_SOURCE_VERSION
31#define SOURCE_VERSION "2012.2.0" 29#define BATADV_SOURCE_VERSION "2012.3.0"
32#endif 30#endif
33 31
34/* B.A.T.M.A.N. parameters */ 32/* B.A.T.M.A.N. parameters */
35 33
36#define TQ_MAX_VALUE 255 34#define BATADV_TQ_MAX_VALUE 255
37#define JITTER 20 35#define BATADV_JITTER 20
38 36
39 /* Time To Live of broadcast messages */ 37/* Time To Live of broadcast messages */
40#define TTL 50 38#define BATADV_TTL 50
41 39
42/* purge originators after time in seconds if no valid packet comes in 40/* purge originators after time in seconds if no valid packet comes in
43 * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */ 41 * -> TODO: check influence on BATADV_TQ_LOCAL_WINDOW_SIZE
44#define PURGE_TIMEOUT 200000 /* 200 seconds */ 42 */
45#define TT_LOCAL_TIMEOUT 3600000 /* in miliseconds */ 43#define BATADV_PURGE_TIMEOUT 200000 /* 200 seconds */
46#define TT_CLIENT_ROAM_TIMEOUT 600000 /* in miliseconds */ 44#define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in miliseconds */
45#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in miliseconds */
47/* sliding packet range of received originator messages in sequence numbers 46/* sliding packet range of received originator messages in sequence numbers
48 * (should be a multiple of our word size) */ 47 * (should be a multiple of our word size)
49#define TQ_LOCAL_WINDOW_SIZE 64 48 */
50#define TT_REQUEST_TIMEOUT 3000 /* miliseconds we have to keep 49#define BATADV_TQ_LOCAL_WINDOW_SIZE 64
51 * pending tt_req */ 50/* miliseconds we have to keep pending tt_req */
51#define BATADV_TT_REQUEST_TIMEOUT 3000
52 52
53#define TQ_GLOBAL_WINDOW_SIZE 5 53#define BATADV_TQ_GLOBAL_WINDOW_SIZE 5
54#define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1 54#define BATADV_TQ_LOCAL_BIDRECT_SEND_MINIMUM 1
55#define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1 55#define BATADV_TQ_LOCAL_BIDRECT_RECV_MINIMUM 1
56#define TQ_TOTAL_BIDRECT_LIMIT 1 56#define BATADV_TQ_TOTAL_BIDRECT_LIMIT 1
57 57
58#define TT_OGM_APPEND_MAX 3 /* number of OGMs sent with the last tt diff */ 58/* number of OGMs sent with the last tt diff */
59#define BATADV_TT_OGM_APPEND_MAX 3
59 60
60#define ROAMING_MAX_TIME 20000 /* Time in which a client can roam at most 61/* Time in which a client can roam at most ROAMING_MAX_COUNT times in
61 * ROAMING_MAX_COUNT times in miliseconds*/ 62 * miliseconds
62#define ROAMING_MAX_COUNT 5 63 */
64#define BATADV_ROAMING_MAX_TIME 20000
65#define BATADV_ROAMING_MAX_COUNT 5
63 66
64#define NO_FLAGS 0 67#define BATADV_NO_FLAGS 0
65 68
66#define NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */ 69#define BATADV_NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */
67 70
68#define NUM_WORDS BITS_TO_LONGS(TQ_LOCAL_WINDOW_SIZE) 71#define BATADV_NUM_WORDS BITS_TO_LONGS(BATADV_TQ_LOCAL_WINDOW_SIZE)
69 72
70#define LOG_BUF_LEN 8192 /* has to be a power of 2 */ 73#define BATADV_LOG_BUF_LEN 8192 /* has to be a power of 2 */
71 74
72#define VIS_INTERVAL 5000 /* 5 seconds */ 75#define BATADV_VIS_INTERVAL 5000 /* 5 seconds */
73 76
74/* how much worse secondary interfaces may be to be considered as bonding 77/* how much worse secondary interfaces may be to be considered as bonding
75 * candidates */ 78 * candidates
76#define BONDING_TQ_THRESHOLD 50 79 */
80#define BATADV_BONDING_TQ_THRESHOLD 50
77 81
78/* should not be bigger than 512 bytes or change the size of 82/* should not be bigger than 512 bytes or change the size of
79 * forw_packet->direct_link_flags */ 83 * forw_packet->direct_link_flags
80#define MAX_AGGREGATION_BYTES 512 84 */
81#define MAX_AGGREGATION_MS 100 85#define BATADV_MAX_AGGREGATION_BYTES 512
86#define BATADV_MAX_AGGREGATION_MS 100
82 87
83#define BLA_PERIOD_LENGTH 10000 /* 10 seconds */ 88#define BATADV_BLA_PERIOD_LENGTH 10000 /* 10 seconds */
84#define BLA_BACKBONE_TIMEOUT (BLA_PERIOD_LENGTH * 3) 89#define BATADV_BLA_BACKBONE_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 3)
85#define BLA_CLAIM_TIMEOUT (BLA_PERIOD_LENGTH * 10) 90#define BATADV_BLA_CLAIM_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 10)
86 91
87#define DUPLIST_SIZE 16 92#define BATADV_DUPLIST_SIZE 16
88#define DUPLIST_TIMEOUT 500 /* 500 ms */ 93#define BATADV_DUPLIST_TIMEOUT 500 /* 500 ms */
89/* don't reset again within 30 seconds */ 94/* don't reset again within 30 seconds */
90#define RESET_PROTECTION_MS 30000 95#define BATADV_RESET_PROTECTION_MS 30000
91#define EXPECTED_SEQNO_RANGE 65536 96#define BATADV_EXPECTED_SEQNO_RANGE 65536
92 97
93enum mesh_state { 98enum batadv_mesh_state {
94 MESH_INACTIVE, 99 BATADV_MESH_INACTIVE,
95 MESH_ACTIVE, 100 BATADV_MESH_ACTIVE,
96 MESH_DEACTIVATING 101 BATADV_MESH_DEACTIVATING,
97}; 102};
98 103
99#define BCAST_QUEUE_LEN 256 104#define BATADV_BCAST_QUEUE_LEN 256
100#define BATMAN_QUEUE_LEN 256 105#define BATADV_BATMAN_QUEUE_LEN 256
101 106
102enum uev_action { 107enum batadv_uev_action {
103 UEV_ADD = 0, 108 BATADV_UEV_ADD = 0,
104 UEV_DEL, 109 BATADV_UEV_DEL,
105 UEV_CHANGE 110 BATADV_UEV_CHANGE,
106}; 111};
107 112
108enum uev_type { 113enum batadv_uev_type {
109 UEV_GW = 0 114 BATADV_UEV_GW = 0,
110}; 115};
111 116
112#define GW_THRESHOLD 50 117#define BATADV_GW_THRESHOLD 50
113 118
114/* Debug Messages */ 119/* Debug Messages */
115#ifdef pr_fmt 120#ifdef pr_fmt
@@ -119,12 +124,12 @@ enum uev_type {
119#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 124#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
120 125
121/* all messages related to routing / flooding / broadcasting / etc */ 126/* all messages related to routing / flooding / broadcasting / etc */
122enum dbg_level { 127enum batadv_dbg_level {
123 DBG_BATMAN = 1 << 0, 128 BATADV_DBG_BATMAN = 1 << 0,
124 DBG_ROUTES = 1 << 1, /* route added / changed / deleted */ 129 BATADV_DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
125 DBG_TT = 1 << 2, /* translation table operations */ 130 BATADV_DBG_TT = 1 << 2, /* translation table operations */
126 DBG_BLA = 1 << 3, /* bridge loop avoidance */ 131 BATADV_DBG_BLA = 1 << 3, /* bridge loop avoidance */
127 DBG_ALL = 15 132 BATADV_DBG_ALL = 15,
128}; 133};
129 134
130/* Kernel headers */ 135/* Kernel headers */
@@ -138,73 +143,75 @@ enum dbg_level {
138#include <linux/kthread.h> /* kernel threads */ 143#include <linux/kthread.h> /* kernel threads */
139#include <linux/pkt_sched.h> /* schedule types */ 144#include <linux/pkt_sched.h> /* schedule types */
140#include <linux/workqueue.h> /* workqueue */ 145#include <linux/workqueue.h> /* workqueue */
146#include <linux/percpu.h>
141#include <linux/slab.h> 147#include <linux/slab.h>
142#include <net/sock.h> /* struct sock */ 148#include <net/sock.h> /* struct sock */
143#include <linux/jiffies.h> 149#include <linux/jiffies.h>
144#include <linux/seq_file.h> 150#include <linux/seq_file.h>
145#include "types.h" 151#include "types.h"
146 152
147extern char bat_routing_algo[]; 153extern char batadv_routing_algo[];
148extern struct list_head hardif_list; 154extern struct list_head batadv_hardif_list;
149 155
150extern unsigned char broadcast_addr[]; 156extern unsigned char batadv_broadcast_addr[];
151extern struct workqueue_struct *bat_event_workqueue; 157extern struct workqueue_struct *batadv_event_workqueue;
152 158
153int mesh_init(struct net_device *soft_iface); 159int batadv_mesh_init(struct net_device *soft_iface);
154void mesh_free(struct net_device *soft_iface); 160void batadv_mesh_free(struct net_device *soft_iface);
155void inc_module_count(void); 161void batadv_inc_module_count(void);
156void dec_module_count(void); 162void batadv_dec_module_count(void);
157int is_my_mac(const uint8_t *addr); 163int batadv_is_my_mac(const uint8_t *addr);
158int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, 164int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
159 struct packet_type *ptype, struct net_device *orig_dev); 165 struct packet_type *ptype,
160int recv_handler_register(uint8_t packet_type, 166 struct net_device *orig_dev);
161 int (*recv_handler)(struct sk_buff *, 167int
162 struct hard_iface *)); 168batadv_recv_handler_register(uint8_t packet_type,
163void recv_handler_unregister(uint8_t packet_type); 169 int (*recv_handler)(struct sk_buff *,
164int bat_algo_register(struct bat_algo_ops *bat_algo_ops); 170 struct batadv_hard_iface *));
165int bat_algo_select(struct bat_priv *bat_priv, char *name); 171void batadv_recv_handler_unregister(uint8_t packet_type);
166int bat_algo_seq_print_text(struct seq_file *seq, void *offset); 172int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops);
173int batadv_algo_select(struct batadv_priv *bat_priv, char *name);
174int batadv_algo_seq_print_text(struct seq_file *seq, void *offset);
167 175
168#ifdef CONFIG_BATMAN_ADV_DEBUG 176#ifdef CONFIG_BATMAN_ADV_DEBUG
169int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) __printf(2, 3); 177int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
178__printf(2, 3);
170 179
171#define bat_dbg(type, bat_priv, fmt, arg...) \ 180#define batadv_dbg(type, bat_priv, fmt, arg...) \
172 do { \ 181 do { \
173 if (atomic_read(&bat_priv->log_level) & type) \ 182 if (atomic_read(&bat_priv->log_level) & type) \
174 debug_log(bat_priv, fmt, ## arg); \ 183 batadv_debug_log(bat_priv, fmt, ## arg);\
175 } \ 184 } \
176 while (0) 185 while (0)
177#else /* !CONFIG_BATMAN_ADV_DEBUG */ 186#else /* !CONFIG_BATMAN_ADV_DEBUG */
178__printf(3, 4) 187__printf(3, 4)
179static inline void bat_dbg(int type __always_unused, 188static inline void batadv_dbg(int type __always_unused,
180 struct bat_priv *bat_priv __always_unused, 189 struct batadv_priv *bat_priv __always_unused,
181 const char *fmt __always_unused, ...) 190 const char *fmt __always_unused, ...)
182{ 191{
183} 192}
184#endif 193#endif
185 194
186#define bat_info(net_dev, fmt, arg...) \ 195#define batadv_info(net_dev, fmt, arg...) \
187 do { \ 196 do { \
188 struct net_device *_netdev = (net_dev); \ 197 struct net_device *_netdev = (net_dev); \
189 struct bat_priv *_batpriv = netdev_priv(_netdev); \ 198 struct batadv_priv *_batpriv = netdev_priv(_netdev); \
190 bat_dbg(DBG_ALL, _batpriv, fmt, ## arg); \ 199 batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg); \
191 pr_info("%s: " fmt, _netdev->name, ## arg); \ 200 pr_info("%s: " fmt, _netdev->name, ## arg); \
192 } while (0) 201 } while (0)
193#define bat_err(net_dev, fmt, arg...) \ 202#define batadv_err(net_dev, fmt, arg...) \
194 do { \ 203 do { \
195 struct net_device *_netdev = (net_dev); \ 204 struct net_device *_netdev = (net_dev); \
196 struct bat_priv *_batpriv = netdev_priv(_netdev); \ 205 struct batadv_priv *_batpriv = netdev_priv(_netdev); \
197 bat_dbg(DBG_ALL, _batpriv, fmt, ## arg); \ 206 batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg); \
198 pr_err("%s: " fmt, _netdev->name, ## arg); \ 207 pr_err("%s: " fmt, _netdev->name, ## arg); \
199 } while (0) 208 } while (0)
200 209
201/** 210/* returns 1 if they are the same ethernet addr
202 * returns 1 if they are the same ethernet addr
203 * 211 *
204 * note: can't use compare_ether_addr() as it requires aligned memory 212 * note: can't use compare_ether_addr() as it requires aligned memory
205 */ 213 */
206 214static inline int batadv_compare_eth(const void *data1, const void *data2)
207static inline int compare_eth(const void *data1, const void *data2)
208{ 215{
209 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 216 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
210} 217}
@@ -216,15 +223,16 @@ static inline int compare_eth(const void *data1, const void *data2)
216 * 223 *
217 * Returns true if current time is after timestamp + timeout 224 * Returns true if current time is after timestamp + timeout
218 */ 225 */
219static inline bool has_timed_out(unsigned long timestamp, unsigned int timeout) 226static inline bool batadv_has_timed_out(unsigned long timestamp,
227 unsigned int timeout)
220{ 228{
221 return time_is_before_jiffies(timestamp + msecs_to_jiffies(timeout)); 229 return time_is_before_jiffies(timestamp + msecs_to_jiffies(timeout));
222} 230}
223 231
224#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) 232#define batadv_atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
225 233
226/* Returns the smallest signed integer in two's complement with the sizeof x */ 234/* Returns the smallest signed integer in two's complement with the sizeof x */
227#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u))) 235#define batadv_smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
228 236
229/* Checks if a sequence number x is a predecessor/successor of y. 237/* Checks if a sequence number x is a predecessor/successor of y.
230 * they handle overflows/underflows and can correctly check for a 238 * they handle overflows/underflows and can correctly check for a
@@ -234,12 +242,39 @@ static inline bool has_timed_out(unsigned long timestamp, unsigned int timeout)
234 * - when adding nothing - it is neither a predecessor nor a successor 242 * - when adding nothing - it is neither a predecessor nor a successor
235 * - before adding more than 127 to the starting value - it is a predecessor, 243 * - before adding more than 127 to the starting value - it is a predecessor,
236 * - when adding 128 - it is neither a predecessor nor a successor, 244 * - when adding 128 - it is neither a predecessor nor a successor,
237 * - after adding more than 127 to the starting value - it is a successor */ 245 * - after adding more than 127 to the starting value - it is a successor
238#define seq_before(x, y) ({typeof(x) _d1 = (x); \ 246 */
239 typeof(y) _d2 = (y); \ 247#define batadv_seq_before(x, y) ({typeof(x) _d1 = (x); \
240 typeof(x) _dummy = (_d1 - _d2); \ 248 typeof(y) _d2 = (y); \
241 (void) (&_d1 == &_d2); \ 249 typeof(x) _dummy = (_d1 - _d2); \
242 _dummy > smallest_signed_int(_dummy); }) 250 (void) (&_d1 == &_d2); \
243#define seq_after(x, y) seq_before(y, x) 251 _dummy > batadv_smallest_signed_int(_dummy); })
252#define batadv_seq_after(x, y) batadv_seq_before(y, x)
253
254/* Stop preemption on local cpu while incrementing the counter */
255static inline void batadv_add_counter(struct batadv_priv *bat_priv, size_t idx,
256 size_t count)
257{
258 int cpu = get_cpu();
259 per_cpu_ptr(bat_priv->bat_counters, cpu)[idx] += count;
260 put_cpu();
261}
262
263#define batadv_inc_counter(b, i) batadv_add_counter(b, i, 1)
264
265/* Sum and return the cpu-local counters for index 'idx' */
266static inline uint64_t batadv_sum_counter(struct batadv_priv *bat_priv,
267 size_t idx)
268{
269 uint64_t *counters, sum = 0;
270 int cpu;
271
272 for_each_possible_cpu(cpu) {
273 counters = per_cpu_ptr(bat_priv->bat_counters, cpu);
274 sum += counters[idx];
275 }
276
277 return sum;
278}
244 279
245#endif /* _NET_BATMAN_ADV_MAIN_H_ */ 280#endif /* _NET_BATMAN_ADV_MAIN_H_ */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 41147942ba5..ac9bdf8f80a 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -30,50 +28,52 @@
30#include "soft-interface.h" 28#include "soft-interface.h"
31#include "bridge_loop_avoidance.h" 29#include "bridge_loop_avoidance.h"
32 30
33static void purge_orig(struct work_struct *work); 31static void batadv_purge_orig(struct work_struct *work);
34 32
35static void start_purge_timer(struct bat_priv *bat_priv) 33static void batadv_start_purge_timer(struct batadv_priv *bat_priv)
36{ 34{
37 INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig); 35 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
38 queue_delayed_work(bat_event_workqueue, 36 queue_delayed_work(batadv_event_workqueue,
39 &bat_priv->orig_work, msecs_to_jiffies(1000)); 37 &bat_priv->orig_work, msecs_to_jiffies(1000));
40} 38}
41 39
42/* returns 1 if they are the same originator */ 40/* returns 1 if they are the same originator */
43static int compare_orig(const struct hlist_node *node, const void *data2) 41static int batadv_compare_orig(const struct hlist_node *node, const void *data2)
44{ 42{
45 const void *data1 = container_of(node, struct orig_node, hash_entry); 43 const void *data1 = container_of(node, struct batadv_orig_node,
44 hash_entry);
46 45
47 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 46 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
48} 47}
49 48
50int originator_init(struct bat_priv *bat_priv) 49int batadv_originator_init(struct batadv_priv *bat_priv)
51{ 50{
52 if (bat_priv->orig_hash) 51 if (bat_priv->orig_hash)
53 return 1; 52 return 0;
54 53
55 bat_priv->orig_hash = hash_new(1024); 54 bat_priv->orig_hash = batadv_hash_new(1024);
56 55
57 if (!bat_priv->orig_hash) 56 if (!bat_priv->orig_hash)
58 goto err; 57 goto err;
59 58
60 start_purge_timer(bat_priv); 59 batadv_start_purge_timer(bat_priv);
61 return 1; 60 return 0;
62 61
63err: 62err:
64 return 0; 63 return -ENOMEM;
65} 64}
66 65
67void neigh_node_free_ref(struct neigh_node *neigh_node) 66void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
68{ 67{
69 if (atomic_dec_and_test(&neigh_node->refcount)) 68 if (atomic_dec_and_test(&neigh_node->refcount))
70 kfree_rcu(neigh_node, rcu); 69 kfree_rcu(neigh_node, rcu);
71} 70}
72 71
73/* increases the refcounter of a found router */ 72/* increases the refcounter of a found router */
74struct neigh_node *orig_node_get_router(struct orig_node *orig_node) 73struct batadv_neigh_node *
74batadv_orig_node_get_router(struct batadv_orig_node *orig_node)
75{ 75{
76 struct neigh_node *router; 76 struct batadv_neigh_node *router;
77 77
78 rcu_read_lock(); 78 rcu_read_lock();
79 router = rcu_dereference(orig_node->router); 79 router = rcu_dereference(orig_node->router);
@@ -85,12 +85,12 @@ struct neigh_node *orig_node_get_router(struct orig_node *orig_node)
85 return router; 85 return router;
86} 86}
87 87
88struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface, 88struct batadv_neigh_node *
89 const uint8_t *neigh_addr, 89batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
90 uint32_t seqno) 90 const uint8_t *neigh_addr, uint32_t seqno)
91{ 91{
92 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 92 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
93 struct neigh_node *neigh_node; 93 struct batadv_neigh_node *neigh_node;
94 94
95 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC); 95 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
96 if (!neigh_node) 96 if (!neigh_node)
@@ -104,21 +104,21 @@ struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface,
104 /* extra reference for return */ 104 /* extra reference for return */
105 atomic_set(&neigh_node->refcount, 2); 105 atomic_set(&neigh_node->refcount, 2);
106 106
107 bat_dbg(DBG_BATMAN, bat_priv, 107 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
108 "Creating new neighbor %pM, initial seqno %d\n", 108 "Creating new neighbor %pM, initial seqno %d\n",
109 neigh_addr, seqno); 109 neigh_addr, seqno);
110 110
111out: 111out:
112 return neigh_node; 112 return neigh_node;
113} 113}
114 114
115static void orig_node_free_rcu(struct rcu_head *rcu) 115static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
116{ 116{
117 struct hlist_node *node, *node_tmp; 117 struct hlist_node *node, *node_tmp;
118 struct neigh_node *neigh_node, *tmp_neigh_node; 118 struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
119 struct orig_node *orig_node; 119 struct batadv_orig_node *orig_node;
120 120
121 orig_node = container_of(rcu, struct orig_node, rcu); 121 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
122 122
123 spin_lock_bh(&orig_node->neigh_list_lock); 123 spin_lock_bh(&orig_node->neigh_list_lock);
124 124
@@ -126,21 +126,21 @@ static void orig_node_free_rcu(struct rcu_head *rcu)
126 list_for_each_entry_safe(neigh_node, tmp_neigh_node, 126 list_for_each_entry_safe(neigh_node, tmp_neigh_node,
127 &orig_node->bond_list, bonding_list) { 127 &orig_node->bond_list, bonding_list) {
128 list_del_rcu(&neigh_node->bonding_list); 128 list_del_rcu(&neigh_node->bonding_list);
129 neigh_node_free_ref(neigh_node); 129 batadv_neigh_node_free_ref(neigh_node);
130 } 130 }
131 131
132 /* for all neighbors towards this originator ... */ 132 /* for all neighbors towards this originator ... */
133 hlist_for_each_entry_safe(neigh_node, node, node_tmp, 133 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
134 &orig_node->neigh_list, list) { 134 &orig_node->neigh_list, list) {
135 hlist_del_rcu(&neigh_node->list); 135 hlist_del_rcu(&neigh_node->list);
136 neigh_node_free_ref(neigh_node); 136 batadv_neigh_node_free_ref(neigh_node);
137 } 137 }
138 138
139 spin_unlock_bh(&orig_node->neigh_list_lock); 139 spin_unlock_bh(&orig_node->neigh_list_lock);
140 140
141 frag_list_free(&orig_node->frag_list); 141 batadv_frag_list_free(&orig_node->frag_list);
142 tt_global_del_orig(orig_node->bat_priv, orig_node, 142 batadv_tt_global_del_orig(orig_node->bat_priv, orig_node,
143 "originator timed out"); 143 "originator timed out");
144 144
145 kfree(orig_node->tt_buff); 145 kfree(orig_node->tt_buff);
146 kfree(orig_node->bcast_own); 146 kfree(orig_node->bcast_own);
@@ -148,19 +148,19 @@ static void orig_node_free_rcu(struct rcu_head *rcu)
148 kfree(orig_node); 148 kfree(orig_node);
149} 149}
150 150
151void orig_node_free_ref(struct orig_node *orig_node) 151void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
152{ 152{
153 if (atomic_dec_and_test(&orig_node->refcount)) 153 if (atomic_dec_and_test(&orig_node->refcount))
154 call_rcu(&orig_node->rcu, orig_node_free_rcu); 154 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
155} 155}
156 156
157void originator_free(struct bat_priv *bat_priv) 157void batadv_originator_free(struct batadv_priv *bat_priv)
158{ 158{
159 struct hashtable_t *hash = bat_priv->orig_hash; 159 struct batadv_hashtable *hash = bat_priv->orig_hash;
160 struct hlist_node *node, *node_tmp; 160 struct hlist_node *node, *node_tmp;
161 struct hlist_head *head; 161 struct hlist_head *head;
162 spinlock_t *list_lock; /* spinlock to protect write access */ 162 spinlock_t *list_lock; /* spinlock to protect write access */
163 struct orig_node *orig_node; 163 struct batadv_orig_node *orig_node;
164 uint32_t i; 164 uint32_t i;
165 165
166 if (!hash) 166 if (!hash)
@@ -179,28 +179,31 @@ void originator_free(struct bat_priv *bat_priv)
179 head, hash_entry) { 179 head, hash_entry) {
180 180
181 hlist_del_rcu(node); 181 hlist_del_rcu(node);
182 orig_node_free_ref(orig_node); 182 batadv_orig_node_free_ref(orig_node);
183 } 183 }
184 spin_unlock_bh(list_lock); 184 spin_unlock_bh(list_lock);
185 } 185 }
186 186
187 hash_destroy(hash); 187 batadv_hash_destroy(hash);
188} 188}
189 189
190/* this function finds or creates an originator entry for the given 190/* this function finds or creates an originator entry for the given
191 * address if it does not exits */ 191 * address if it does not exits
192struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr) 192 */
193struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
194 const uint8_t *addr)
193{ 195{
194 struct orig_node *orig_node; 196 struct batadv_orig_node *orig_node;
195 int size; 197 int size;
196 int hash_added; 198 int hash_added;
199 unsigned long reset_time;
197 200
198 orig_node = orig_hash_find(bat_priv, addr); 201 orig_node = batadv_orig_hash_find(bat_priv, addr);
199 if (orig_node) 202 if (orig_node)
200 return orig_node; 203 return orig_node;
201 204
202 bat_dbg(DBG_BATMAN, bat_priv, 205 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
203 "Creating new originator: %pM\n", addr); 206 "Creating new originator: %pM\n", addr);
204 207
205 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC); 208 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
206 if (!orig_node) 209 if (!orig_node)
@@ -226,14 +229,13 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr)
226 orig_node->tt_buff = NULL; 229 orig_node->tt_buff = NULL;
227 orig_node->tt_buff_len = 0; 230 orig_node->tt_buff_len = 0;
228 atomic_set(&orig_node->tt_size, 0); 231 atomic_set(&orig_node->tt_size, 0);
229 orig_node->bcast_seqno_reset = jiffies - 1 232 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
230 - msecs_to_jiffies(RESET_PROTECTION_MS); 233 orig_node->bcast_seqno_reset = reset_time;
231 orig_node->batman_seqno_reset = jiffies - 1 234 orig_node->batman_seqno_reset = reset_time;
232 - msecs_to_jiffies(RESET_PROTECTION_MS);
233 235
234 atomic_set(&orig_node->bond_candidates, 0); 236 atomic_set(&orig_node->bond_candidates, 0);
235 237
236 size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS; 238 size = bat_priv->num_ifaces * sizeof(unsigned long) * BATADV_NUM_WORDS;
237 239
238 orig_node->bcast_own = kzalloc(size, GFP_ATOMIC); 240 orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
239 if (!orig_node->bcast_own) 241 if (!orig_node->bcast_own)
@@ -248,8 +250,9 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr)
248 if (!orig_node->bcast_own_sum) 250 if (!orig_node->bcast_own_sum)
249 goto free_bcast_own; 251 goto free_bcast_own;
250 252
251 hash_added = hash_add(bat_priv->orig_hash, compare_orig, 253 hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
252 choose_orig, orig_node, &orig_node->hash_entry); 254 batadv_choose_orig, orig_node,
255 &orig_node->hash_entry);
253 if (hash_added != 0) 256 if (hash_added != 0)
254 goto free_bcast_own_sum; 257 goto free_bcast_own_sum;
255 258
@@ -263,14 +266,16 @@ free_orig_node:
263 return NULL; 266 return NULL;
264} 267}
265 268
266static bool purge_orig_neighbors(struct bat_priv *bat_priv, 269static bool
267 struct orig_node *orig_node, 270batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
268 struct neigh_node **best_neigh_node) 271 struct batadv_orig_node *orig_node,
272 struct batadv_neigh_node **best_neigh_node)
269{ 273{
270 struct hlist_node *node, *node_tmp; 274 struct hlist_node *node, *node_tmp;
271 struct neigh_node *neigh_node; 275 struct batadv_neigh_node *neigh_node;
272 bool neigh_purged = false; 276 bool neigh_purged = false;
273 unsigned long last_seen; 277 unsigned long last_seen;
278 struct batadv_hard_iface *if_incoming;
274 279
275 *best_neigh_node = NULL; 280 *best_neigh_node = NULL;
276 281
@@ -280,34 +285,32 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
280 hlist_for_each_entry_safe(neigh_node, node, node_tmp, 285 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
281 &orig_node->neigh_list, list) { 286 &orig_node->neigh_list, list) {
282 287
283 if ((has_timed_out(neigh_node->last_seen, PURGE_TIMEOUT)) || 288 last_seen = neigh_node->last_seen;
284 (neigh_node->if_incoming->if_status == IF_INACTIVE) || 289 if_incoming = neigh_node->if_incoming;
285 (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) || 290
286 (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) { 291 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
287 292 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
288 last_seen = neigh_node->last_seen; 293 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
289 294 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
290 if ((neigh_node->if_incoming->if_status == 295
291 IF_INACTIVE) || 296 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
292 (neigh_node->if_incoming->if_status == 297 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
293 IF_NOT_IN_USE) || 298 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
294 (neigh_node->if_incoming->if_status == 299 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
295 IF_TO_BE_REMOVED)) 300 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
296 bat_dbg(DBG_BATMAN, bat_priv, 301 orig_node->orig, neigh_node->addr,
297 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n", 302 if_incoming->net_dev->name);
298 orig_node->orig, neigh_node->addr,
299 neigh_node->if_incoming->net_dev->name);
300 else 303 else
301 bat_dbg(DBG_BATMAN, bat_priv, 304 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
302 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n", 305 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
303 orig_node->orig, neigh_node->addr, 306 orig_node->orig, neigh_node->addr,
304 jiffies_to_msecs(last_seen)); 307 jiffies_to_msecs(last_seen));
305 308
306 neigh_purged = true; 309 neigh_purged = true;
307 310
308 hlist_del_rcu(&neigh_node->list); 311 hlist_del_rcu(&neigh_node->list);
309 bonding_candidate_del(orig_node, neigh_node); 312 batadv_bonding_candidate_del(orig_node, neigh_node);
310 neigh_node_free_ref(neigh_node); 313 batadv_neigh_node_free_ref(neigh_node);
311 } else { 314 } else {
312 if ((!*best_neigh_node) || 315 if ((!*best_neigh_node) ||
313 (neigh_node->tq_avg > (*best_neigh_node)->tq_avg)) 316 (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
@@ -319,33 +322,35 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
319 return neigh_purged; 322 return neigh_purged;
320} 323}
321 324
322static bool purge_orig_node(struct bat_priv *bat_priv, 325static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
323 struct orig_node *orig_node) 326 struct batadv_orig_node *orig_node)
324{ 327{
325 struct neigh_node *best_neigh_node; 328 struct batadv_neigh_node *best_neigh_node;
326 329
327 if (has_timed_out(orig_node->last_seen, 2 * PURGE_TIMEOUT)) { 330 if (batadv_has_timed_out(orig_node->last_seen,
328 bat_dbg(DBG_BATMAN, bat_priv, 331 2 * BATADV_PURGE_TIMEOUT)) {
329 "Originator timeout: originator %pM, last_seen %u\n", 332 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
330 orig_node->orig, 333 "Originator timeout: originator %pM, last_seen %u\n",
331 jiffies_to_msecs(orig_node->last_seen)); 334 orig_node->orig,
335 jiffies_to_msecs(orig_node->last_seen));
332 return true; 336 return true;
333 } else { 337 } else {
334 if (purge_orig_neighbors(bat_priv, orig_node, 338 if (batadv_purge_orig_neighbors(bat_priv, orig_node,
335 &best_neigh_node)) 339 &best_neigh_node))
336 update_route(bat_priv, orig_node, best_neigh_node); 340 batadv_update_route(bat_priv, orig_node,
341 best_neigh_node);
337 } 342 }
338 343
339 return false; 344 return false;
340} 345}
341 346
342static void _purge_orig(struct bat_priv *bat_priv) 347static void _batadv_purge_orig(struct batadv_priv *bat_priv)
343{ 348{
344 struct hashtable_t *hash = bat_priv->orig_hash; 349 struct batadv_hashtable *hash = bat_priv->orig_hash;
345 struct hlist_node *node, *node_tmp; 350 struct hlist_node *node, *node_tmp;
346 struct hlist_head *head; 351 struct hlist_head *head;
347 spinlock_t *list_lock; /* spinlock to protect write access */ 352 spinlock_t *list_lock; /* spinlock to protect write access */
348 struct orig_node *orig_node; 353 struct batadv_orig_node *orig_node;
349 uint32_t i; 354 uint32_t i;
350 355
351 if (!hash) 356 if (!hash)
@@ -359,58 +364,60 @@ static void _purge_orig(struct bat_priv *bat_priv)
359 spin_lock_bh(list_lock); 364 spin_lock_bh(list_lock);
360 hlist_for_each_entry_safe(orig_node, node, node_tmp, 365 hlist_for_each_entry_safe(orig_node, node, node_tmp,
361 head, hash_entry) { 366 head, hash_entry) {
362 if (purge_orig_node(bat_priv, orig_node)) { 367 if (batadv_purge_orig_node(bat_priv, orig_node)) {
363 if (orig_node->gw_flags) 368 if (orig_node->gw_flags)
364 gw_node_delete(bat_priv, orig_node); 369 batadv_gw_node_delete(bat_priv,
370 orig_node);
365 hlist_del_rcu(node); 371 hlist_del_rcu(node);
366 orig_node_free_ref(orig_node); 372 batadv_orig_node_free_ref(orig_node);
367 continue; 373 continue;
368 } 374 }
369 375
370 if (has_timed_out(orig_node->last_frag_packet, 376 if (batadv_has_timed_out(orig_node->last_frag_packet,
371 FRAG_TIMEOUT)) 377 BATADV_FRAG_TIMEOUT))
372 frag_list_free(&orig_node->frag_list); 378 batadv_frag_list_free(&orig_node->frag_list);
373 } 379 }
374 spin_unlock_bh(list_lock); 380 spin_unlock_bh(list_lock);
375 } 381 }
376 382
377 gw_node_purge(bat_priv); 383 batadv_gw_node_purge(bat_priv);
378 gw_election(bat_priv); 384 batadv_gw_election(bat_priv);
379} 385}
380 386
381static void purge_orig(struct work_struct *work) 387static void batadv_purge_orig(struct work_struct *work)
382{ 388{
383 struct delayed_work *delayed_work = 389 struct delayed_work *delayed_work;
384 container_of(work, struct delayed_work, work); 390 struct batadv_priv *bat_priv;
385 struct bat_priv *bat_priv =
386 container_of(delayed_work, struct bat_priv, orig_work);
387 391
388 _purge_orig(bat_priv); 392 delayed_work = container_of(work, struct delayed_work, work);
389 start_purge_timer(bat_priv); 393 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
394 _batadv_purge_orig(bat_priv);
395 batadv_start_purge_timer(bat_priv);
390} 396}
391 397
392void purge_orig_ref(struct bat_priv *bat_priv) 398void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
393{ 399{
394 _purge_orig(bat_priv); 400 _batadv_purge_orig(bat_priv);
395} 401}
396 402
397int orig_seq_print_text(struct seq_file *seq, void *offset) 403int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
398{ 404{
399 struct net_device *net_dev = (struct net_device *)seq->private; 405 struct net_device *net_dev = (struct net_device *)seq->private;
400 struct bat_priv *bat_priv = netdev_priv(net_dev); 406 struct batadv_priv *bat_priv = netdev_priv(net_dev);
401 struct hashtable_t *hash = bat_priv->orig_hash; 407 struct batadv_hashtable *hash = bat_priv->orig_hash;
402 struct hlist_node *node, *node_tmp; 408 struct hlist_node *node, *node_tmp;
403 struct hlist_head *head; 409 struct hlist_head *head;
404 struct hard_iface *primary_if; 410 struct batadv_hard_iface *primary_if;
405 struct orig_node *orig_node; 411 struct batadv_orig_node *orig_node;
406 struct neigh_node *neigh_node, *neigh_node_tmp; 412 struct batadv_neigh_node *neigh_node, *neigh_node_tmp;
407 int batman_count = 0; 413 int batman_count = 0;
408 int last_seen_secs; 414 int last_seen_secs;
409 int last_seen_msecs; 415 int last_seen_msecs;
416 unsigned long last_seen_jiffies;
410 uint32_t i; 417 uint32_t i;
411 int ret = 0; 418 int ret = 0;
412 419
413 primary_if = primary_if_get_selected(bat_priv); 420 primary_if = batadv_primary_if_get_selected(bat_priv);
414 421
415 if (!primary_if) { 422 if (!primary_if) {
416 ret = seq_printf(seq, 423 ret = seq_printf(seq,
@@ -419,7 +426,7 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
419 goto out; 426 goto out;
420 } 427 }
421 428
422 if (primary_if->if_status != IF_ACTIVE) { 429 if (primary_if->if_status != BATADV_IF_ACTIVE) {
423 ret = seq_printf(seq, 430 ret = seq_printf(seq,
424 "BATMAN mesh %s disabled - primary interface not active\n", 431 "BATMAN mesh %s disabled - primary interface not active\n",
425 net_dev->name); 432 net_dev->name);
@@ -427,28 +434,28 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
427 } 434 }
428 435
429 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", 436 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
430 SOURCE_VERSION, primary_if->net_dev->name, 437 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
431 primary_if->net_dev->dev_addr, net_dev->name); 438 primary_if->net_dev->dev_addr, net_dev->name);
432 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n", 439 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
433 "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop", 440 "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
434 "outgoingIF", "Potential nexthops"); 441 "Nexthop", "outgoingIF", "Potential nexthops");
435 442
436 for (i = 0; i < hash->size; i++) { 443 for (i = 0; i < hash->size; i++) {
437 head = &hash->table[i]; 444 head = &hash->table[i];
438 445
439 rcu_read_lock(); 446 rcu_read_lock();
440 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 447 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
441 neigh_node = orig_node_get_router(orig_node); 448 neigh_node = batadv_orig_node_get_router(orig_node);
442 if (!neigh_node) 449 if (!neigh_node)
443 continue; 450 continue;
444 451
445 if (neigh_node->tq_avg == 0) 452 if (neigh_node->tq_avg == 0)
446 goto next; 453 goto next;
447 454
448 last_seen_secs = jiffies_to_msecs(jiffies - 455 last_seen_jiffies = jiffies - orig_node->last_seen;
449 orig_node->last_seen) / 1000; 456 last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
450 last_seen_msecs = jiffies_to_msecs(jiffies - 457 last_seen_secs = last_seen_msecs / 1000;
451 orig_node->last_seen) % 1000; 458 last_seen_msecs = last_seen_msecs % 1000;
452 459
453 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:", 460 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
454 orig_node->orig, last_seen_secs, 461 orig_node->orig, last_seen_secs,
@@ -467,7 +474,7 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
467 batman_count++; 474 batman_count++;
468 475
469next: 476next:
470 neigh_node_free_ref(neigh_node); 477 batadv_neigh_node_free_ref(neigh_node);
471 } 478 }
472 rcu_read_unlock(); 479 rcu_read_unlock();
473 } 480 }
@@ -477,27 +484,29 @@ next:
477 484
478out: 485out:
479 if (primary_if) 486 if (primary_if)
480 hardif_free_ref(primary_if); 487 batadv_hardif_free_ref(primary_if);
481 return ret; 488 return ret;
482} 489}
483 490
484static int orig_node_add_if(struct orig_node *orig_node, int max_if_num) 491static int batadv_orig_node_add_if(struct batadv_orig_node *orig_node,
492 int max_if_num)
485{ 493{
486 void *data_ptr; 494 void *data_ptr;
495 size_t data_size, old_size;
487 496
488 data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS, 497 data_size = max_if_num * sizeof(unsigned long) * BATADV_NUM_WORDS;
489 GFP_ATOMIC); 498 old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS;
499 data_ptr = kmalloc(data_size, GFP_ATOMIC);
490 if (!data_ptr) 500 if (!data_ptr)
491 return -1; 501 return -ENOMEM;
492 502
493 memcpy(data_ptr, orig_node->bcast_own, 503 memcpy(data_ptr, orig_node->bcast_own, old_size);
494 (max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS);
495 kfree(orig_node->bcast_own); 504 kfree(orig_node->bcast_own);
496 orig_node->bcast_own = data_ptr; 505 orig_node->bcast_own = data_ptr;
497 506
498 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); 507 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
499 if (!data_ptr) 508 if (!data_ptr)
500 return -1; 509 return -ENOMEM;
501 510
502 memcpy(data_ptr, orig_node->bcast_own_sum, 511 memcpy(data_ptr, orig_node->bcast_own_sum,
503 (max_if_num - 1) * sizeof(uint8_t)); 512 (max_if_num - 1) * sizeof(uint8_t));
@@ -507,28 +516,30 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
507 return 0; 516 return 0;
508} 517}
509 518
510int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num) 519int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
520 int max_if_num)
511{ 521{
512 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 522 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
513 struct hashtable_t *hash = bat_priv->orig_hash; 523 struct batadv_hashtable *hash = bat_priv->orig_hash;
514 struct hlist_node *node; 524 struct hlist_node *node;
515 struct hlist_head *head; 525 struct hlist_head *head;
516 struct orig_node *orig_node; 526 struct batadv_orig_node *orig_node;
517 uint32_t i; 527 uint32_t i;
518 int ret; 528 int ret;
519 529
520 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 530 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
521 * if_num */ 531 * if_num
532 */
522 for (i = 0; i < hash->size; i++) { 533 for (i = 0; i < hash->size; i++) {
523 head = &hash->table[i]; 534 head = &hash->table[i];
524 535
525 rcu_read_lock(); 536 rcu_read_lock();
526 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 537 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
527 spin_lock_bh(&orig_node->ogm_cnt_lock); 538 spin_lock_bh(&orig_node->ogm_cnt_lock);
528 ret = orig_node_add_if(orig_node, max_if_num); 539 ret = batadv_orig_node_add_if(orig_node, max_if_num);
529 spin_unlock_bh(&orig_node->ogm_cnt_lock); 540 spin_unlock_bh(&orig_node->ogm_cnt_lock);
530 541
531 if (ret == -1) 542 if (ret == -ENOMEM)
532 goto err; 543 goto err;
533 } 544 }
534 rcu_read_unlock(); 545 rcu_read_unlock();
@@ -541,8 +552,8 @@ err:
541 return -ENOMEM; 552 return -ENOMEM;
542} 553}
543 554
544static int orig_node_del_if(struct orig_node *orig_node, 555static int batadv_orig_node_del_if(struct batadv_orig_node *orig_node,
545 int max_if_num, int del_if_num) 556 int max_if_num, int del_if_num)
546{ 557{
547 void *data_ptr = NULL; 558 void *data_ptr = NULL;
548 int chunk_size; 559 int chunk_size;
@@ -551,10 +562,10 @@ static int orig_node_del_if(struct orig_node *orig_node,
551 if (max_if_num == 0) 562 if (max_if_num == 0)
552 goto free_bcast_own; 563 goto free_bcast_own;
553 564
554 chunk_size = sizeof(unsigned long) * NUM_WORDS; 565 chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
555 data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC); 566 data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
556 if (!data_ptr) 567 if (!data_ptr)
557 return -1; 568 return -ENOMEM;
558 569
559 /* copy first part */ 570 /* copy first part */
560 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size); 571 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
@@ -573,7 +584,7 @@ free_bcast_own:
573 584
574 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); 585 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
575 if (!data_ptr) 586 if (!data_ptr)
576 return -1; 587 return -ENOMEM;
577 588
578 memcpy(data_ptr, orig_node->bcast_own_sum, 589 memcpy(data_ptr, orig_node->bcast_own_sum,
579 del_if_num * sizeof(uint8_t)); 590 del_if_num * sizeof(uint8_t));
@@ -589,30 +600,32 @@ free_own_sum:
589 return 0; 600 return 0;
590} 601}
591 602
592int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num) 603int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
604 int max_if_num)
593{ 605{
594 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 606 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
595 struct hashtable_t *hash = bat_priv->orig_hash; 607 struct batadv_hashtable *hash = bat_priv->orig_hash;
596 struct hlist_node *node; 608 struct hlist_node *node;
597 struct hlist_head *head; 609 struct hlist_head *head;
598 struct hard_iface *hard_iface_tmp; 610 struct batadv_hard_iface *hard_iface_tmp;
599 struct orig_node *orig_node; 611 struct batadv_orig_node *orig_node;
600 uint32_t i; 612 uint32_t i;
601 int ret; 613 int ret;
602 614
603 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 615 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
604 * if_num */ 616 * if_num
617 */
605 for (i = 0; i < hash->size; i++) { 618 for (i = 0; i < hash->size; i++) {
606 head = &hash->table[i]; 619 head = &hash->table[i];
607 620
608 rcu_read_lock(); 621 rcu_read_lock();
609 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 622 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
610 spin_lock_bh(&orig_node->ogm_cnt_lock); 623 spin_lock_bh(&orig_node->ogm_cnt_lock);
611 ret = orig_node_del_if(orig_node, max_if_num, 624 ret = batadv_orig_node_del_if(orig_node, max_if_num,
612 hard_iface->if_num); 625 hard_iface->if_num);
613 spin_unlock_bh(&orig_node->ogm_cnt_lock); 626 spin_unlock_bh(&orig_node->ogm_cnt_lock);
614 627
615 if (ret == -1) 628 if (ret == -ENOMEM)
616 goto err; 629 goto err;
617 } 630 }
618 rcu_read_unlock(); 631 rcu_read_unlock();
@@ -620,8 +633,8 @@ int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num)
620 633
621 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */ 634 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
622 rcu_read_lock(); 635 rcu_read_lock();
623 list_for_each_entry_rcu(hard_iface_tmp, &hardif_list, list) { 636 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
624 if (hard_iface_tmp->if_status == IF_NOT_IN_USE) 637 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
625 continue; 638 continue;
626 639
627 if (hard_iface == hard_iface_tmp) 640 if (hard_iface == hard_iface_tmp)
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index f74d0d69335..9778e656dec 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_ORIGINATOR_H_ 20#ifndef _NET_BATMAN_ADV_ORIGINATOR_H_
@@ -24,24 +22,29 @@
24 22
25#include "hash.h" 23#include "hash.h"
26 24
27int originator_init(struct bat_priv *bat_priv); 25int batadv_originator_init(struct batadv_priv *bat_priv);
28void originator_free(struct bat_priv *bat_priv); 26void batadv_originator_free(struct batadv_priv *bat_priv);
29void purge_orig_ref(struct bat_priv *bat_priv); 27void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
30void orig_node_free_ref(struct orig_node *orig_node); 28void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
31struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr); 29struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
32struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface, 30 const uint8_t *addr);
33 const uint8_t *neigh_addr, 31struct batadv_neigh_node *
34 uint32_t seqno); 32batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
35void neigh_node_free_ref(struct neigh_node *neigh_node); 33 const uint8_t *neigh_addr, uint32_t seqno);
36struct neigh_node *orig_node_get_router(struct orig_node *orig_node); 34void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node);
37int orig_seq_print_text(struct seq_file *seq, void *offset); 35struct batadv_neigh_node *
38int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num); 36batadv_orig_node_get_router(struct batadv_orig_node *orig_node);
39int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num); 37int batadv_orig_seq_print_text(struct seq_file *seq, void *offset);
40 38int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
41 39 int max_if_num);
42/* hashfunction to choose an entry in a hash table of given size */ 40int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
43/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ 41 int max_if_num);
44static inline uint32_t choose_orig(const void *data, uint32_t size) 42
43
44/* hashfunction to choose an entry in a hash table of given size
45 * hash algorithm from http://en.wikipedia.org/wiki/Hash_table
46 */
47static inline uint32_t batadv_choose_orig(const void *data, uint32_t size)
45{ 48{
46 const unsigned char *key = data; 49 const unsigned char *key = data;
47 uint32_t hash = 0; 50 uint32_t hash = 0;
@@ -60,24 +63,24 @@ static inline uint32_t choose_orig(const void *data, uint32_t size)
60 return hash % size; 63 return hash % size;
61} 64}
62 65
63static inline struct orig_node *orig_hash_find(struct bat_priv *bat_priv, 66static inline struct batadv_orig_node *
64 const void *data) 67batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
65{ 68{
66 struct hashtable_t *hash = bat_priv->orig_hash; 69 struct batadv_hashtable *hash = bat_priv->orig_hash;
67 struct hlist_head *head; 70 struct hlist_head *head;
68 struct hlist_node *node; 71 struct hlist_node *node;
69 struct orig_node *orig_node, *orig_node_tmp = NULL; 72 struct batadv_orig_node *orig_node, *orig_node_tmp = NULL;
70 int index; 73 int index;
71 74
72 if (!hash) 75 if (!hash)
73 return NULL; 76 return NULL;
74 77
75 index = choose_orig(data, hash->size); 78 index = batadv_choose_orig(data, hash->size);
76 head = &hash->table[index]; 79 head = &hash->table[index];
77 80
78 rcu_read_lock(); 81 rcu_read_lock();
79 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 82 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
80 if (!compare_eth(orig_node, data)) 83 if (!batadv_compare_eth(orig_node, data))
81 continue; 84 continue;
82 85
83 if (!atomic_inc_not_zero(&orig_node->refcount)) 86 if (!atomic_inc_not_zero(&orig_node->refcount))
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 0ee1af77079..8d3e55a96ad 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,171 +15,172 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_PACKET_H_ 20#ifndef _NET_BATMAN_ADV_PACKET_H_
23#define _NET_BATMAN_ADV_PACKET_H_ 21#define _NET_BATMAN_ADV_PACKET_H_
24 22
25#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */ 23#define BATADV_ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */
26 24
27enum bat_packettype { 25enum batadv_packettype {
28 BAT_IV_OGM = 0x01, 26 BATADV_IV_OGM = 0x01,
29 BAT_ICMP = 0x02, 27 BATADV_ICMP = 0x02,
30 BAT_UNICAST = 0x03, 28 BATADV_UNICAST = 0x03,
31 BAT_BCAST = 0x04, 29 BATADV_BCAST = 0x04,
32 BAT_VIS = 0x05, 30 BATADV_VIS = 0x05,
33 BAT_UNICAST_FRAG = 0x06, 31 BATADV_UNICAST_FRAG = 0x06,
34 BAT_TT_QUERY = 0x07, 32 BATADV_TT_QUERY = 0x07,
35 BAT_ROAM_ADV = 0x08 33 BATADV_ROAM_ADV = 0x08,
36}; 34};
37 35
38/* this file is included by batctl which needs these defines */ 36/* this file is included by batctl which needs these defines */
39#define COMPAT_VERSION 14 37#define BATADV_COMPAT_VERSION 14
40 38
41enum batman_iv_flags { 39enum batadv_iv_flags {
42 NOT_BEST_NEXT_HOP = 1 << 3, 40 BATADV_NOT_BEST_NEXT_HOP = 1 << 3,
43 PRIMARIES_FIRST_HOP = 1 << 4, 41 BATADV_PRIMARIES_FIRST_HOP = 1 << 4,
44 VIS_SERVER = 1 << 5, 42 BATADV_VIS_SERVER = 1 << 5,
45 DIRECTLINK = 1 << 6 43 BATADV_DIRECTLINK = 1 << 6,
46}; 44};
47 45
48/* ICMP message types */ 46/* ICMP message types */
49enum icmp_packettype { 47enum batadv_icmp_packettype {
50 ECHO_REPLY = 0, 48 BATADV_ECHO_REPLY = 0,
51 DESTINATION_UNREACHABLE = 3, 49 BATADV_DESTINATION_UNREACHABLE = 3,
52 ECHO_REQUEST = 8, 50 BATADV_ECHO_REQUEST = 8,
53 TTL_EXCEEDED = 11, 51 BATADV_TTL_EXCEEDED = 11,
54 PARAMETER_PROBLEM = 12 52 BATADV_PARAMETER_PROBLEM = 12,
55}; 53};
56 54
57/* vis defines */ 55/* vis defines */
58enum vis_packettype { 56enum batadv_vis_packettype {
59 VIS_TYPE_SERVER_SYNC = 0, 57 BATADV_VIS_TYPE_SERVER_SYNC = 0,
60 VIS_TYPE_CLIENT_UPDATE = 1 58 BATADV_VIS_TYPE_CLIENT_UPDATE = 1,
61}; 59};
62 60
63/* fragmentation defines */ 61/* fragmentation defines */
64enum unicast_frag_flags { 62enum batadv_unicast_frag_flags {
65 UNI_FRAG_HEAD = 1 << 0, 63 BATADV_UNI_FRAG_HEAD = 1 << 0,
66 UNI_FRAG_LARGETAIL = 1 << 1 64 BATADV_UNI_FRAG_LARGETAIL = 1 << 1,
67}; 65};
68 66
69/* TT_QUERY subtypes */ 67/* TT_QUERY subtypes */
70#define TT_QUERY_TYPE_MASK 0x3 68#define BATADV_TT_QUERY_TYPE_MASK 0x3
71 69
72enum tt_query_packettype { 70enum batadv_tt_query_packettype {
73 TT_REQUEST = 0, 71 BATADV_TT_REQUEST = 0,
74 TT_RESPONSE = 1 72 BATADV_TT_RESPONSE = 1,
75}; 73};
76 74
77/* TT_QUERY flags */ 75/* TT_QUERY flags */
78enum tt_query_flags { 76enum batadv_tt_query_flags {
79 TT_FULL_TABLE = 1 << 2 77 BATADV_TT_FULL_TABLE = 1 << 2,
80}; 78};
81 79
82/* TT_CLIENT flags. 80/* BATADV_TT_CLIENT flags.
83 * Flags from 1 to 1 << 7 are sent on the wire, while flags from 1 << 8 to 81 * Flags from 1 to 1 << 7 are sent on the wire, while flags from 1 << 8 to
84 * 1 << 15 are used for local computation only */ 82 * 1 << 15 are used for local computation only
85enum tt_client_flags { 83 */
86 TT_CLIENT_DEL = 1 << 0, 84enum batadv_tt_client_flags {
87 TT_CLIENT_ROAM = 1 << 1, 85 BATADV_TT_CLIENT_DEL = 1 << 0,
88 TT_CLIENT_WIFI = 1 << 2, 86 BATADV_TT_CLIENT_ROAM = 1 << 1,
89 TT_CLIENT_NOPURGE = 1 << 8, 87 BATADV_TT_CLIENT_WIFI = 1 << 2,
90 TT_CLIENT_NEW = 1 << 9, 88 BATADV_TT_CLIENT_NOPURGE = 1 << 8,
91 TT_CLIENT_PENDING = 1 << 10 89 BATADV_TT_CLIENT_NEW = 1 << 9,
90 BATADV_TT_CLIENT_PENDING = 1 << 10,
92}; 91};
93 92
94/* claim frame types for the bridge loop avoidance */ 93/* claim frame types for the bridge loop avoidance */
95enum bla_claimframe { 94enum batadv_bla_claimframe {
96 CLAIM_TYPE_ADD = 0x00, 95 BATADV_CLAIM_TYPE_ADD = 0x00,
97 CLAIM_TYPE_DEL = 0x01, 96 BATADV_CLAIM_TYPE_DEL = 0x01,
98 CLAIM_TYPE_ANNOUNCE = 0x02, 97 BATADV_CLAIM_TYPE_ANNOUNCE = 0x02,
99 CLAIM_TYPE_REQUEST = 0x03 98 BATADV_CLAIM_TYPE_REQUEST = 0x03,
100}; 99};
101 100
102/* the destination hardware field in the ARP frame is used to 101/* the destination hardware field in the ARP frame is used to
103 * transport the claim type and the group id 102 * transport the claim type and the group id
104 */ 103 */
105struct bla_claim_dst { 104struct batadv_bla_claim_dst {
106 uint8_t magic[3]; /* FF:43:05 */ 105 uint8_t magic[3]; /* FF:43:05 */
107 uint8_t type; /* bla_claimframe */ 106 uint8_t type; /* bla_claimframe */
108 uint16_t group; /* group id */ 107 __be16 group; /* group id */
109} __packed; 108} __packed;
110 109
111struct batman_header { 110struct batadv_header {
112 uint8_t packet_type; 111 uint8_t packet_type;
113 uint8_t version; /* batman version field */ 112 uint8_t version; /* batman version field */
114 uint8_t ttl; 113 uint8_t ttl;
115} __packed; 114} __packed;
116 115
117struct batman_ogm_packet { 116struct batadv_ogm_packet {
118 struct batman_header header; 117 struct batadv_header header;
119 uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */ 118 uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
120 uint32_t seqno; 119 __be32 seqno;
121 uint8_t orig[ETH_ALEN]; 120 uint8_t orig[ETH_ALEN];
122 uint8_t prev_sender[ETH_ALEN]; 121 uint8_t prev_sender[ETH_ALEN];
123 uint8_t gw_flags; /* flags related to gateway class */ 122 uint8_t gw_flags; /* flags related to gateway class */
124 uint8_t tq; 123 uint8_t tq;
125 uint8_t tt_num_changes; 124 uint8_t tt_num_changes;
126 uint8_t ttvn; /* translation table version number */ 125 uint8_t ttvn; /* translation table version number */
127 uint16_t tt_crc; 126 __be16 tt_crc;
128} __packed; 127} __packed;
129 128
130#define BATMAN_OGM_HLEN sizeof(struct batman_ogm_packet) 129#define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
131 130
132struct icmp_packet { 131struct batadv_icmp_packet {
133 struct batman_header header; 132 struct batadv_header header;
134 uint8_t msg_type; /* see ICMP message types above */ 133 uint8_t msg_type; /* see ICMP message types above */
135 uint8_t dst[ETH_ALEN]; 134 uint8_t dst[ETH_ALEN];
136 uint8_t orig[ETH_ALEN]; 135 uint8_t orig[ETH_ALEN];
137 uint16_t seqno; 136 __be16 seqno;
138 uint8_t uid; 137 uint8_t uid;
139 uint8_t reserved; 138 uint8_t reserved;
140} __packed; 139} __packed;
141 140
142#define BAT_RR_LEN 16 141#define BATADV_RR_LEN 16
143 142
144/* icmp_packet_rr must start with all fields from imcp_packet 143/* icmp_packet_rr must start with all fields from imcp_packet
145 * as this is assumed by code that handles ICMP packets */ 144 * as this is assumed by code that handles ICMP packets
146struct icmp_packet_rr { 145 */
147 struct batman_header header; 146struct batadv_icmp_packet_rr {
147 struct batadv_header header;
148 uint8_t msg_type; /* see ICMP message types above */ 148 uint8_t msg_type; /* see ICMP message types above */
149 uint8_t dst[ETH_ALEN]; 149 uint8_t dst[ETH_ALEN];
150 uint8_t orig[ETH_ALEN]; 150 uint8_t orig[ETH_ALEN];
151 uint16_t seqno; 151 __be16 seqno;
152 uint8_t uid; 152 uint8_t uid;
153 uint8_t rr_cur; 153 uint8_t rr_cur;
154 uint8_t rr[BAT_RR_LEN][ETH_ALEN]; 154 uint8_t rr[BATADV_RR_LEN][ETH_ALEN];
155} __packed; 155} __packed;
156 156
157struct unicast_packet { 157struct batadv_unicast_packet {
158 struct batman_header header; 158 struct batadv_header header;
159 uint8_t ttvn; /* destination translation table version number */ 159 uint8_t ttvn; /* destination translation table version number */
160 uint8_t dest[ETH_ALEN]; 160 uint8_t dest[ETH_ALEN];
161} __packed; 161} __packed;
162 162
163struct unicast_frag_packet { 163struct batadv_unicast_frag_packet {
164 struct batman_header header; 164 struct batadv_header header;
165 uint8_t ttvn; /* destination translation table version number */ 165 uint8_t ttvn; /* destination translation table version number */
166 uint8_t dest[ETH_ALEN]; 166 uint8_t dest[ETH_ALEN];
167 uint8_t flags; 167 uint8_t flags;
168 uint8_t align; 168 uint8_t align;
169 uint8_t orig[ETH_ALEN]; 169 uint8_t orig[ETH_ALEN];
170 uint16_t seqno; 170 __be16 seqno;
171} __packed; 171} __packed;
172 172
173struct bcast_packet { 173struct batadv_bcast_packet {
174 struct batman_header header; 174 struct batadv_header header;
175 uint8_t reserved; 175 uint8_t reserved;
176 uint32_t seqno; 176 __be32 seqno;
177 uint8_t orig[ETH_ALEN]; 177 uint8_t orig[ETH_ALEN];
178} __packed; 178} __packed;
179 179
180struct vis_packet { 180struct batadv_vis_packet {
181 struct batman_header header; 181 struct batadv_header header;
182 uint8_t vis_type; /* which type of vis-participant sent this? */ 182 uint8_t vis_type; /* which type of vis-participant sent this? */
183 uint32_t seqno; /* sequence number */ 183 __be32 seqno; /* sequence number */
184 uint8_t entries; /* number of entries behind this struct */ 184 uint8_t entries; /* number of entries behind this struct */
185 uint8_t reserved; 185 uint8_t reserved;
186 uint8_t vis_orig[ETH_ALEN]; /* originator reporting its neighbors */ 186 uint8_t vis_orig[ETH_ALEN]; /* originator reporting its neighbors */
@@ -188,11 +188,12 @@ struct vis_packet {
188 uint8_t sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */ 188 uint8_t sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */
189} __packed; 189} __packed;
190 190
191struct tt_query_packet { 191struct batadv_tt_query_packet {
192 struct batman_header header; 192 struct batadv_header header;
193 /* the flag field is a combination of: 193 /* the flag field is a combination of:
194 * - TT_REQUEST or TT_RESPONSE 194 * - TT_REQUEST or TT_RESPONSE
195 * - TT_FULL_TABLE */ 195 * - TT_FULL_TABLE
196 */
196 uint8_t flags; 197 uint8_t flags;
197 uint8_t dst[ETH_ALEN]; 198 uint8_t dst[ETH_ALEN];
198 uint8_t src[ETH_ALEN]; 199 uint8_t src[ETH_ALEN];
@@ -200,24 +201,26 @@ struct tt_query_packet {
200 * if TT_REQUEST: ttvn that triggered the 201 * if TT_REQUEST: ttvn that triggered the
201 * request 202 * request
202 * if TT_RESPONSE: new ttvn for the src 203 * if TT_RESPONSE: new ttvn for the src
203 * orig_node */ 204 * orig_node
205 */
204 uint8_t ttvn; 206 uint8_t ttvn;
205 /* tt_data field is: 207 /* tt_data field is:
206 * if TT_REQUEST: crc associated with the 208 * if TT_REQUEST: crc associated with the
207 * ttvn 209 * ttvn
208 * if TT_RESPONSE: table_size */ 210 * if TT_RESPONSE: table_size
209 uint16_t tt_data; 211 */
212 __be16 tt_data;
210} __packed; 213} __packed;
211 214
212struct roam_adv_packet { 215struct batadv_roam_adv_packet {
213 struct batman_header header; 216 struct batadv_header header;
214 uint8_t reserved; 217 uint8_t reserved;
215 uint8_t dst[ETH_ALEN]; 218 uint8_t dst[ETH_ALEN];
216 uint8_t src[ETH_ALEN]; 219 uint8_t src[ETH_ALEN];
217 uint8_t client[ETH_ALEN]; 220 uint8_t client[ETH_ALEN];
218} __packed; 221} __packed;
219 222
220struct tt_change { 223struct batadv_tt_change {
221 uint8_t flags; 224 uint8_t flags;
222 uint8_t addr[ETH_ALEN]; 225 uint8_t addr[ETH_ALEN];
223} __packed; 226} __packed;
diff --git a/net/batman-adv/ring_buffer.c b/net/batman-adv/ring_buffer.c
index fd63951d118..c8f61e395b7 100644
--- a/net/batman-adv/ring_buffer.c
+++ b/net/batman-adv/ring_buffer.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,26 +15,26 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
23#include "ring_buffer.h" 21#include "ring_buffer.h"
24 22
25void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value) 23void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index,
24 uint8_t value)
26{ 25{
27 lq_recv[*lq_index] = value; 26 lq_recv[*lq_index] = value;
28 *lq_index = (*lq_index + 1) % TQ_GLOBAL_WINDOW_SIZE; 27 *lq_index = (*lq_index + 1) % BATADV_TQ_GLOBAL_WINDOW_SIZE;
29} 28}
30 29
31uint8_t ring_buffer_avg(const uint8_t lq_recv[]) 30uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[])
32{ 31{
33 const uint8_t *ptr; 32 const uint8_t *ptr;
34 uint16_t count = 0, i = 0, sum = 0; 33 uint16_t count = 0, i = 0, sum = 0;
35 34
36 ptr = lq_recv; 35 ptr = lq_recv;
37 36
38 while (i < TQ_GLOBAL_WINDOW_SIZE) { 37 while (i < BATADV_TQ_GLOBAL_WINDOW_SIZE) {
39 if (*ptr != 0) { 38 if (*ptr != 0) {
40 count++; 39 count++;
41 sum += *ptr; 40 sum += *ptr;
diff --git a/net/batman-adv/ring_buffer.h b/net/batman-adv/ring_buffer.h
index 8b58bd82767..fda8c17df27 100644
--- a/net/batman-adv/ring_buffer.h
+++ b/net/batman-adv/ring_buffer.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,13 +15,13 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_RING_BUFFER_H_ 20#ifndef _NET_BATMAN_ADV_RING_BUFFER_H_
23#define _NET_BATMAN_ADV_RING_BUFFER_H_ 21#define _NET_BATMAN_ADV_RING_BUFFER_H_
24 22
25void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value); 23void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index,
26uint8_t ring_buffer_avg(const uint8_t lq_recv[]); 24 uint8_t value);
25uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[]);
27 26
28#endif /* _NET_BATMAN_ADV_RING_BUFFER_H_ */ 27#endif /* _NET_BATMAN_ADV_RING_BUFFER_H_ */
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 015471d801b..bc2b88bbea1 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -31,19 +29,20 @@
31#include "unicast.h" 29#include "unicast.h"
32#include "bridge_loop_avoidance.h" 30#include "bridge_loop_avoidance.h"
33 31
34static int route_unicast_packet(struct sk_buff *skb, 32static int batadv_route_unicast_packet(struct sk_buff *skb,
35 struct hard_iface *recv_if); 33 struct batadv_hard_iface *recv_if);
36 34
37void slide_own_bcast_window(struct hard_iface *hard_iface) 35void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
38{ 36{
39 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 37 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
40 struct hashtable_t *hash = bat_priv->orig_hash; 38 struct batadv_hashtable *hash = bat_priv->orig_hash;
41 struct hlist_node *node; 39 struct hlist_node *node;
42 struct hlist_head *head; 40 struct hlist_head *head;
43 struct orig_node *orig_node; 41 struct batadv_orig_node *orig_node;
44 unsigned long *word; 42 unsigned long *word;
45 uint32_t i; 43 uint32_t i;
46 size_t word_index; 44 size_t word_index;
45 uint8_t *w;
47 46
48 for (i = 0; i < hash->size; i++) { 47 for (i = 0; i < hash->size; i++) {
49 head = &hash->table[i]; 48 head = &hash->table[i];
@@ -51,49 +50,49 @@ void slide_own_bcast_window(struct hard_iface *hard_iface)
51 rcu_read_lock(); 50 rcu_read_lock();
52 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 51 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
53 spin_lock_bh(&orig_node->ogm_cnt_lock); 52 spin_lock_bh(&orig_node->ogm_cnt_lock);
54 word_index = hard_iface->if_num * NUM_WORDS; 53 word_index = hard_iface->if_num * BATADV_NUM_WORDS;
55 word = &(orig_node->bcast_own[word_index]); 54 word = &(orig_node->bcast_own[word_index]);
56 55
57 bit_get_packet(bat_priv, word, 1, 0); 56 batadv_bit_get_packet(bat_priv, word, 1, 0);
58 orig_node->bcast_own_sum[hard_iface->if_num] = 57 w = &orig_node->bcast_own_sum[hard_iface->if_num];
59 bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE); 58 *w = bitmap_weight(word, BATADV_TQ_LOCAL_WINDOW_SIZE);
60 spin_unlock_bh(&orig_node->ogm_cnt_lock); 59 spin_unlock_bh(&orig_node->ogm_cnt_lock);
61 } 60 }
62 rcu_read_unlock(); 61 rcu_read_unlock();
63 } 62 }
64} 63}
65 64
66static void _update_route(struct bat_priv *bat_priv, 65static void _batadv_update_route(struct batadv_priv *bat_priv,
67 struct orig_node *orig_node, 66 struct batadv_orig_node *orig_node,
68 struct neigh_node *neigh_node) 67 struct batadv_neigh_node *neigh_node)
69{ 68{
70 struct neigh_node *curr_router; 69 struct batadv_neigh_node *curr_router;
71 70
72 curr_router = orig_node_get_router(orig_node); 71 curr_router = batadv_orig_node_get_router(orig_node);
73 72
74 /* route deleted */ 73 /* route deleted */
75 if ((curr_router) && (!neigh_node)) { 74 if ((curr_router) && (!neigh_node)) {
76 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n", 75 batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
77 orig_node->orig); 76 "Deleting route towards: %pM\n", orig_node->orig);
78 tt_global_del_orig(bat_priv, orig_node, 77 batadv_tt_global_del_orig(bat_priv, orig_node,
79 "Deleted route towards originator"); 78 "Deleted route towards originator");
80 79
81 /* route added */ 80 /* route added */
82 } else if ((!curr_router) && (neigh_node)) { 81 } else if ((!curr_router) && (neigh_node)) {
83 82
84 bat_dbg(DBG_ROUTES, bat_priv, 83 batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
85 "Adding route towards: %pM (via %pM)\n", 84 "Adding route towards: %pM (via %pM)\n",
86 orig_node->orig, neigh_node->addr); 85 orig_node->orig, neigh_node->addr);
87 /* route changed */ 86 /* route changed */
88 } else if (neigh_node && curr_router) { 87 } else if (neigh_node && curr_router) {
89 bat_dbg(DBG_ROUTES, bat_priv, 88 batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
90 "Changing route towards: %pM (now via %pM - was via %pM)\n", 89 "Changing route towards: %pM (now via %pM - was via %pM)\n",
91 orig_node->orig, neigh_node->addr, 90 orig_node->orig, neigh_node->addr,
92 curr_router->addr); 91 curr_router->addr);
93 } 92 }
94 93
95 if (curr_router) 94 if (curr_router)
96 neigh_node_free_ref(curr_router); 95 batadv_neigh_node_free_ref(curr_router);
97 96
98 /* increase refcount of new best neighbor */ 97 /* increase refcount of new best neighbor */
99 if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount)) 98 if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
@@ -105,30 +104,31 @@ static void _update_route(struct bat_priv *bat_priv,
105 104
106 /* decrease refcount of previous best neighbor */ 105 /* decrease refcount of previous best neighbor */
107 if (curr_router) 106 if (curr_router)
108 neigh_node_free_ref(curr_router); 107 batadv_neigh_node_free_ref(curr_router);
109} 108}
110 109
111void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, 110void batadv_update_route(struct batadv_priv *bat_priv,
112 struct neigh_node *neigh_node) 111 struct batadv_orig_node *orig_node,
112 struct batadv_neigh_node *neigh_node)
113{ 113{
114 struct neigh_node *router = NULL; 114 struct batadv_neigh_node *router = NULL;
115 115
116 if (!orig_node) 116 if (!orig_node)
117 goto out; 117 goto out;
118 118
119 router = orig_node_get_router(orig_node); 119 router = batadv_orig_node_get_router(orig_node);
120 120
121 if (router != neigh_node) 121 if (router != neigh_node)
122 _update_route(bat_priv, orig_node, neigh_node); 122 _batadv_update_route(bat_priv, orig_node, neigh_node);
123 123
124out: 124out:
125 if (router) 125 if (router)
126 neigh_node_free_ref(router); 126 batadv_neigh_node_free_ref(router);
127} 127}
128 128
129/* caller must hold the neigh_list_lock */ 129/* caller must hold the neigh_list_lock */
130void bonding_candidate_del(struct orig_node *orig_node, 130void batadv_bonding_candidate_del(struct batadv_orig_node *orig_node,
131 struct neigh_node *neigh_node) 131 struct batadv_neigh_node *neigh_node)
132{ 132{
133 /* this neighbor is not part of our candidate list */ 133 /* this neighbor is not part of our candidate list */
134 if (list_empty(&neigh_node->bonding_list)) 134 if (list_empty(&neigh_node->bonding_list))
@@ -136,37 +136,36 @@ void bonding_candidate_del(struct orig_node *orig_node,
136 136
137 list_del_rcu(&neigh_node->bonding_list); 137 list_del_rcu(&neigh_node->bonding_list);
138 INIT_LIST_HEAD(&neigh_node->bonding_list); 138 INIT_LIST_HEAD(&neigh_node->bonding_list);
139 neigh_node_free_ref(neigh_node); 139 batadv_neigh_node_free_ref(neigh_node);
140 atomic_dec(&orig_node->bond_candidates); 140 atomic_dec(&orig_node->bond_candidates);
141 141
142out: 142out:
143 return; 143 return;
144} 144}
145 145
146void bonding_candidate_add(struct orig_node *orig_node, 146void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
147 struct neigh_node *neigh_node) 147 struct batadv_neigh_node *neigh_node)
148{ 148{
149 struct hlist_node *node; 149 struct hlist_node *node;
150 struct neigh_node *tmp_neigh_node, *router = NULL; 150 struct batadv_neigh_node *tmp_neigh_node, *router = NULL;
151 uint8_t interference_candidate = 0; 151 uint8_t interference_candidate = 0;
152 152
153 spin_lock_bh(&orig_node->neigh_list_lock); 153 spin_lock_bh(&orig_node->neigh_list_lock);
154 154
155 /* only consider if it has the same primary address ... */ 155 /* only consider if it has the same primary address ... */
156 if (!compare_eth(orig_node->orig, 156 if (!batadv_compare_eth(orig_node->orig,
157 neigh_node->orig_node->primary_addr)) 157 neigh_node->orig_node->primary_addr))
158 goto candidate_del; 158 goto candidate_del;
159 159
160 router = orig_node_get_router(orig_node); 160 router = batadv_orig_node_get_router(orig_node);
161 if (!router) 161 if (!router)
162 goto candidate_del; 162 goto candidate_del;
163 163
164 /* ... and is good enough to be considered */ 164 /* ... and is good enough to be considered */
165 if (neigh_node->tq_avg < router->tq_avg - BONDING_TQ_THRESHOLD) 165 if (neigh_node->tq_avg < router->tq_avg - BATADV_BONDING_TQ_THRESHOLD)
166 goto candidate_del; 166 goto candidate_del;
167 167
168 /** 168 /* check if we have another candidate with the same mac address or
169 * check if we have another candidate with the same mac address or
170 * interface. If we do, we won't select this candidate because of 169 * interface. If we do, we won't select this candidate because of
171 * possible interference. 170 * possible interference.
172 */ 171 */
@@ -177,12 +176,14 @@ void bonding_candidate_add(struct orig_node *orig_node,
177 continue; 176 continue;
178 177
179 /* we only care if the other candidate is even 178 /* we only care if the other candidate is even
180 * considered as candidate. */ 179 * considered as candidate.
180 */
181 if (list_empty(&tmp_neigh_node->bonding_list)) 181 if (list_empty(&tmp_neigh_node->bonding_list))
182 continue; 182 continue;
183 183
184 if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) || 184 if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
185 (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) { 185 (batadv_compare_eth(neigh_node->addr,
186 tmp_neigh_node->addr))) {
186 interference_candidate = 1; 187 interference_candidate = 1;
187 break; 188 break;
188 } 189 }
@@ -204,21 +205,22 @@ void bonding_candidate_add(struct orig_node *orig_node,
204 goto out; 205 goto out;
205 206
206candidate_del: 207candidate_del:
207 bonding_candidate_del(orig_node, neigh_node); 208 batadv_bonding_candidate_del(orig_node, neigh_node);
208 209
209out: 210out:
210 spin_unlock_bh(&orig_node->neigh_list_lock); 211 spin_unlock_bh(&orig_node->neigh_list_lock);
211 212
212 if (router) 213 if (router)
213 neigh_node_free_ref(router); 214 batadv_neigh_node_free_ref(router);
214} 215}
215 216
216/* copy primary address for bonding */ 217/* copy primary address for bonding */
217void bonding_save_primary(const struct orig_node *orig_node, 218void
218 struct orig_node *orig_neigh_node, 219batadv_bonding_save_primary(const struct batadv_orig_node *orig_node,
219 const struct batman_ogm_packet *batman_ogm_packet) 220 struct batadv_orig_node *orig_neigh_node,
221 const struct batadv_ogm_packet *batman_ogm_packet)
220{ 222{
221 if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP)) 223 if (!(batman_ogm_packet->flags & BATADV_PRIMARIES_FIRST_HOP))
222 return; 224 return;
223 225
224 memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN); 226 memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
@@ -229,25 +231,26 @@ void bonding_save_primary(const struct orig_node *orig_node,
229 * 0 if the packet is to be accepted 231 * 0 if the packet is to be accepted
230 * 1 if the packet is to be ignored. 232 * 1 if the packet is to be ignored.
231 */ 233 */
232int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff, 234int batadv_window_protected(struct batadv_priv *bat_priv, int32_t seq_num_diff,
233 unsigned long *last_reset) 235 unsigned long *last_reset)
234{ 236{
235 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) || 237 if (seq_num_diff <= -BATADV_TQ_LOCAL_WINDOW_SIZE ||
236 (seq_num_diff >= EXPECTED_SEQNO_RANGE)) { 238 seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE) {
237 if (!has_timed_out(*last_reset, RESET_PROTECTION_MS)) 239 if (!batadv_has_timed_out(*last_reset,
240 BATADV_RESET_PROTECTION_MS))
238 return 1; 241 return 1;
239 242
240 *last_reset = jiffies; 243 *last_reset = jiffies;
241 bat_dbg(DBG_BATMAN, bat_priv, 244 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
242 "old packet received, start protection\n"); 245 "old packet received, start protection\n");
243 } 246 }
244 247
245 return 0; 248 return 0;
246} 249}
247 250
248bool check_management_packet(struct sk_buff *skb, 251bool batadv_check_management_packet(struct sk_buff *skb,
249 struct hard_iface *hard_iface, 252 struct batadv_hard_iface *hard_iface,
250 int header_len) 253 int header_len)
251{ 254{
252 struct ethhdr *ethhdr; 255 struct ethhdr *ethhdr;
253 256
@@ -276,34 +279,34 @@ bool check_management_packet(struct sk_buff *skb,
276 return true; 279 return true;
277} 280}
278 281
279static int recv_my_icmp_packet(struct bat_priv *bat_priv, 282static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
280 struct sk_buff *skb, size_t icmp_len) 283 struct sk_buff *skb, size_t icmp_len)
281{ 284{
282 struct hard_iface *primary_if = NULL; 285 struct batadv_hard_iface *primary_if = NULL;
283 struct orig_node *orig_node = NULL; 286 struct batadv_orig_node *orig_node = NULL;
284 struct neigh_node *router = NULL; 287 struct batadv_neigh_node *router = NULL;
285 struct icmp_packet_rr *icmp_packet; 288 struct batadv_icmp_packet_rr *icmp_packet;
286 int ret = NET_RX_DROP; 289 int ret = NET_RX_DROP;
287 290
288 icmp_packet = (struct icmp_packet_rr *)skb->data; 291 icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
289 292
290 /* add data to device queue */ 293 /* add data to device queue */
291 if (icmp_packet->msg_type != ECHO_REQUEST) { 294 if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
292 bat_socket_receive_packet(icmp_packet, icmp_len); 295 batadv_socket_receive_packet(icmp_packet, icmp_len);
293 goto out; 296 goto out;
294 } 297 }
295 298
296 primary_if = primary_if_get_selected(bat_priv); 299 primary_if = batadv_primary_if_get_selected(bat_priv);
297 if (!primary_if) 300 if (!primary_if)
298 goto out; 301 goto out;
299 302
300 /* answer echo request (ping) */ 303 /* answer echo request (ping) */
301 /* get routing information */ 304 /* get routing information */
302 orig_node = orig_hash_find(bat_priv, icmp_packet->orig); 305 orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
303 if (!orig_node) 306 if (!orig_node)
304 goto out; 307 goto out;
305 308
306 router = orig_node_get_router(orig_node); 309 router = batadv_orig_node_get_router(orig_node);
307 if (!router) 310 if (!router)
308 goto out; 311 goto out;
309 312
@@ -311,54 +314,54 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
311 if (skb_cow(skb, ETH_HLEN) < 0) 314 if (skb_cow(skb, ETH_HLEN) < 0)
312 goto out; 315 goto out;
313 316
314 icmp_packet = (struct icmp_packet_rr *)skb->data; 317 icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
315 318
316 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 319 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
317 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN); 320 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
318 icmp_packet->msg_type = ECHO_REPLY; 321 icmp_packet->msg_type = BATADV_ECHO_REPLY;
319 icmp_packet->header.ttl = TTL; 322 icmp_packet->header.ttl = BATADV_TTL;
320 323
321 send_skb_packet(skb, router->if_incoming, router->addr); 324 batadv_send_skb_packet(skb, router->if_incoming, router->addr);
322 ret = NET_RX_SUCCESS; 325 ret = NET_RX_SUCCESS;
323 326
324out: 327out:
325 if (primary_if) 328 if (primary_if)
326 hardif_free_ref(primary_if); 329 batadv_hardif_free_ref(primary_if);
327 if (router) 330 if (router)
328 neigh_node_free_ref(router); 331 batadv_neigh_node_free_ref(router);
329 if (orig_node) 332 if (orig_node)
330 orig_node_free_ref(orig_node); 333 batadv_orig_node_free_ref(orig_node);
331 return ret; 334 return ret;
332} 335}
333 336
334static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv, 337static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
335 struct sk_buff *skb) 338 struct sk_buff *skb)
336{ 339{
337 struct hard_iface *primary_if = NULL; 340 struct batadv_hard_iface *primary_if = NULL;
338 struct orig_node *orig_node = NULL; 341 struct batadv_orig_node *orig_node = NULL;
339 struct neigh_node *router = NULL; 342 struct batadv_neigh_node *router = NULL;
340 struct icmp_packet *icmp_packet; 343 struct batadv_icmp_packet *icmp_packet;
341 int ret = NET_RX_DROP; 344 int ret = NET_RX_DROP;
342 345
343 icmp_packet = (struct icmp_packet *)skb->data; 346 icmp_packet = (struct batadv_icmp_packet *)skb->data;
344 347
345 /* send TTL exceeded if packet is an echo request (traceroute) */ 348 /* send TTL exceeded if packet is an echo request (traceroute) */
346 if (icmp_packet->msg_type != ECHO_REQUEST) { 349 if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
347 pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n", 350 pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
348 icmp_packet->orig, icmp_packet->dst); 351 icmp_packet->orig, icmp_packet->dst);
349 goto out; 352 goto out;
350 } 353 }
351 354
352 primary_if = primary_if_get_selected(bat_priv); 355 primary_if = batadv_primary_if_get_selected(bat_priv);
353 if (!primary_if) 356 if (!primary_if)
354 goto out; 357 goto out;
355 358
356 /* get routing information */ 359 /* get routing information */
357 orig_node = orig_hash_find(bat_priv, icmp_packet->orig); 360 orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
358 if (!orig_node) 361 if (!orig_node)
359 goto out; 362 goto out;
360 363
361 router = orig_node_get_router(orig_node); 364 router = batadv_orig_node_get_router(orig_node);
362 if (!router) 365 if (!router)
363 goto out; 366 goto out;
364 367
@@ -366,42 +369,41 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
366 if (skb_cow(skb, ETH_HLEN) < 0) 369 if (skb_cow(skb, ETH_HLEN) < 0)
367 goto out; 370 goto out;
368 371
369 icmp_packet = (struct icmp_packet *)skb->data; 372 icmp_packet = (struct batadv_icmp_packet *)skb->data;
370 373
371 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 374 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
372 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN); 375 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
373 icmp_packet->msg_type = TTL_EXCEEDED; 376 icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
374 icmp_packet->header.ttl = TTL; 377 icmp_packet->header.ttl = BATADV_TTL;
375 378
376 send_skb_packet(skb, router->if_incoming, router->addr); 379 batadv_send_skb_packet(skb, router->if_incoming, router->addr);
377 ret = NET_RX_SUCCESS; 380 ret = NET_RX_SUCCESS;
378 381
379out: 382out:
380 if (primary_if) 383 if (primary_if)
381 hardif_free_ref(primary_if); 384 batadv_hardif_free_ref(primary_if);
382 if (router) 385 if (router)
383 neigh_node_free_ref(router); 386 batadv_neigh_node_free_ref(router);
384 if (orig_node) 387 if (orig_node)
385 orig_node_free_ref(orig_node); 388 batadv_orig_node_free_ref(orig_node);
386 return ret; 389 return ret;
387} 390}
388 391
389 392
390int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if) 393int batadv_recv_icmp_packet(struct sk_buff *skb,
394 struct batadv_hard_iface *recv_if)
391{ 395{
392 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 396 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
393 struct icmp_packet_rr *icmp_packet; 397 struct batadv_icmp_packet_rr *icmp_packet;
394 struct ethhdr *ethhdr; 398 struct ethhdr *ethhdr;
395 struct orig_node *orig_node = NULL; 399 struct batadv_orig_node *orig_node = NULL;
396 struct neigh_node *router = NULL; 400 struct batadv_neigh_node *router = NULL;
397 int hdr_size = sizeof(struct icmp_packet); 401 int hdr_size = sizeof(struct batadv_icmp_packet);
398 int ret = NET_RX_DROP; 402 int ret = NET_RX_DROP;
399 403
400 /** 404 /* we truncate all incoming icmp packets if they don't match our size */
401 * we truncate all incoming icmp packets if they don't match our size 405 if (skb->len >= sizeof(struct batadv_icmp_packet_rr))
402 */ 406 hdr_size = sizeof(struct batadv_icmp_packet_rr);
403 if (skb->len >= sizeof(struct icmp_packet_rr))
404 hdr_size = sizeof(struct icmp_packet_rr);
405 407
406 /* drop packet if it has not necessary minimum size */ 408 /* drop packet if it has not necessary minimum size */
407 if (unlikely(!pskb_may_pull(skb, hdr_size))) 409 if (unlikely(!pskb_may_pull(skb, hdr_size)))
@@ -418,33 +420,33 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
418 goto out; 420 goto out;
419 421
420 /* not for me */ 422 /* not for me */
421 if (!is_my_mac(ethhdr->h_dest)) 423 if (!batadv_is_my_mac(ethhdr->h_dest))
422 goto out; 424 goto out;
423 425
424 icmp_packet = (struct icmp_packet_rr *)skb->data; 426 icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
425 427
426 /* add record route information if not full */ 428 /* add record route information if not full */
427 if ((hdr_size == sizeof(struct icmp_packet_rr)) && 429 if ((hdr_size == sizeof(struct batadv_icmp_packet_rr)) &&
428 (icmp_packet->rr_cur < BAT_RR_LEN)) { 430 (icmp_packet->rr_cur < BATADV_RR_LEN)) {
429 memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]), 431 memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
430 ethhdr->h_dest, ETH_ALEN); 432 ethhdr->h_dest, ETH_ALEN);
431 icmp_packet->rr_cur++; 433 icmp_packet->rr_cur++;
432 } 434 }
433 435
434 /* packet for me */ 436 /* packet for me */
435 if (is_my_mac(icmp_packet->dst)) 437 if (batadv_is_my_mac(icmp_packet->dst))
436 return recv_my_icmp_packet(bat_priv, skb, hdr_size); 438 return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size);
437 439
438 /* TTL exceeded */ 440 /* TTL exceeded */
439 if (icmp_packet->header.ttl < 2) 441 if (icmp_packet->header.ttl < 2)
440 return recv_icmp_ttl_exceeded(bat_priv, skb); 442 return batadv_recv_icmp_ttl_exceeded(bat_priv, skb);
441 443
442 /* get routing information */ 444 /* get routing information */
443 orig_node = orig_hash_find(bat_priv, icmp_packet->dst); 445 orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->dst);
444 if (!orig_node) 446 if (!orig_node)
445 goto out; 447 goto out;
446 448
447 router = orig_node_get_router(orig_node); 449 router = batadv_orig_node_get_router(orig_node);
448 if (!router) 450 if (!router)
449 goto out; 451 goto out;
450 452
@@ -452,20 +454,20 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
452 if (skb_cow(skb, ETH_HLEN) < 0) 454 if (skb_cow(skb, ETH_HLEN) < 0)
453 goto out; 455 goto out;
454 456
455 icmp_packet = (struct icmp_packet_rr *)skb->data; 457 icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
456 458
457 /* decrement ttl */ 459 /* decrement ttl */
458 icmp_packet->header.ttl--; 460 icmp_packet->header.ttl--;
459 461
460 /* route it */ 462 /* route it */
461 send_skb_packet(skb, router->if_incoming, router->addr); 463 batadv_send_skb_packet(skb, router->if_incoming, router->addr);
462 ret = NET_RX_SUCCESS; 464 ret = NET_RX_SUCCESS;
463 465
464out: 466out:
465 if (router) 467 if (router)
466 neigh_node_free_ref(router); 468 batadv_neigh_node_free_ref(router);
467 if (orig_node) 469 if (orig_node)
468 orig_node_free_ref(orig_node); 470 batadv_orig_node_free_ref(orig_node);
469 return ret; 471 return ret;
470} 472}
471 473
@@ -473,12 +475,14 @@ out:
473 * robin fashion over the remaining interfaces. 475 * robin fashion over the remaining interfaces.
474 * 476 *
475 * This method rotates the bonding list and increases the 477 * This method rotates the bonding list and increases the
476 * returned router's refcount. */ 478 * returned router's refcount.
477static struct neigh_node *find_bond_router(struct orig_node *primary_orig, 479 */
478 const struct hard_iface *recv_if) 480static struct batadv_neigh_node *
481batadv_find_bond_router(struct batadv_orig_node *primary_orig,
482 const struct batadv_hard_iface *recv_if)
479{ 483{
480 struct neigh_node *tmp_neigh_node; 484 struct batadv_neigh_node *tmp_neigh_node;
481 struct neigh_node *router = NULL, *first_candidate = NULL; 485 struct batadv_neigh_node *router = NULL, *first_candidate = NULL;
482 486
483 rcu_read_lock(); 487 rcu_read_lock();
484 list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list, 488 list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
@@ -506,10 +510,12 @@ static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
506 goto out; 510 goto out;
507 511
508 /* selected should point to the next element 512 /* selected should point to the next element
509 * after the current router */ 513 * after the current router
514 */
510 spin_lock_bh(&primary_orig->neigh_list_lock); 515 spin_lock_bh(&primary_orig->neigh_list_lock);
511 /* this is a list_move(), which unfortunately 516 /* this is a list_move(), which unfortunately
512 * does not exist as rcu version */ 517 * does not exist as rcu version
518 */
513 list_del_rcu(&primary_orig->bond_list); 519 list_del_rcu(&primary_orig->bond_list);
514 list_add_rcu(&primary_orig->bond_list, 520 list_add_rcu(&primary_orig->bond_list,
515 &router->bonding_list); 521 &router->bonding_list);
@@ -524,12 +530,14 @@ out:
524 * remaining candidates which are not using 530 * remaining candidates which are not using
525 * this interface. 531 * this interface.
526 * 532 *
527 * Increases the returned router's refcount */ 533 * Increases the returned router's refcount
528static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig, 534 */
529 const struct hard_iface *recv_if) 535static struct batadv_neigh_node *
536batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,
537 const struct batadv_hard_iface *recv_if)
530{ 538{
531 struct neigh_node *tmp_neigh_node; 539 struct batadv_neigh_node *tmp_neigh_node;
532 struct neigh_node *router = NULL, *first_candidate = NULL; 540 struct batadv_neigh_node *router = NULL, *first_candidate = NULL;
533 541
534 rcu_read_lock(); 542 rcu_read_lock();
535 list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list, 543 list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
@@ -545,19 +553,21 @@ static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
545 continue; 553 continue;
546 554
547 /* if we don't have a router yet 555 /* if we don't have a router yet
548 * or this one is better, choose it. */ 556 * or this one is better, choose it.
557 */
549 if ((!router) || 558 if ((!router) ||
550 (tmp_neigh_node->tq_avg > router->tq_avg)) { 559 (tmp_neigh_node->tq_avg > router->tq_avg)) {
551 /* decrement refcount of 560 /* decrement refcount of
552 * previously selected router */ 561 * previously selected router
562 */
553 if (router) 563 if (router)
554 neigh_node_free_ref(router); 564 batadv_neigh_node_free_ref(router);
555 565
556 router = tmp_neigh_node; 566 router = tmp_neigh_node;
557 atomic_inc_not_zero(&router->refcount); 567 atomic_inc_not_zero(&router->refcount);
558 } 568 }
559 569
560 neigh_node_free_ref(tmp_neigh_node); 570 batadv_neigh_node_free_ref(tmp_neigh_node);
561 } 571 }
562 572
563 /* use the first candidate if nothing was found. */ 573 /* use the first candidate if nothing was found. */
@@ -569,19 +579,22 @@ static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
569 return router; 579 return router;
570} 580}
571 581
572int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if) 582int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
573{ 583{
574 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 584 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
575 struct tt_query_packet *tt_query; 585 struct batadv_tt_query_packet *tt_query;
576 uint16_t tt_len; 586 uint16_t tt_size;
577 struct ethhdr *ethhdr; 587 struct ethhdr *ethhdr;
588 char tt_flag;
589 size_t packet_size;
578 590
579 /* drop packet if it has not necessary minimum size */ 591 /* drop packet if it has not necessary minimum size */
580 if (unlikely(!pskb_may_pull(skb, sizeof(struct tt_query_packet)))) 592 if (unlikely(!pskb_may_pull(skb,
593 sizeof(struct batadv_tt_query_packet))))
581 goto out; 594 goto out;
582 595
583 /* I could need to modify it */ 596 /* I could need to modify it */
584 if (skb_cow(skb, sizeof(struct tt_query_packet)) < 0) 597 if (skb_cow(skb, sizeof(struct batadv_tt_query_packet)) < 0)
585 goto out; 598 goto out;
586 599
587 ethhdr = (struct ethhdr *)skb_mac_header(skb); 600 ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -594,47 +607,59 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
594 if (is_broadcast_ether_addr(ethhdr->h_source)) 607 if (is_broadcast_ether_addr(ethhdr->h_source))
595 goto out; 608 goto out;
596 609
597 tt_query = (struct tt_query_packet *)skb->data; 610 tt_query = (struct batadv_tt_query_packet *)skb->data;
598 611
599 tt_query->tt_data = ntohs(tt_query->tt_data); 612 switch (tt_query->flags & BATADV_TT_QUERY_TYPE_MASK) {
613 case BATADV_TT_REQUEST:
614 batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_RX);
600 615
601 switch (tt_query->flags & TT_QUERY_TYPE_MASK) {
602 case TT_REQUEST:
603 /* If we cannot provide an answer the tt_request is 616 /* If we cannot provide an answer the tt_request is
604 * forwarded */ 617 * forwarded
605 if (!send_tt_response(bat_priv, tt_query)) { 618 */
606 bat_dbg(DBG_TT, bat_priv, 619 if (!batadv_send_tt_response(bat_priv, tt_query)) {
607 "Routing TT_REQUEST to %pM [%c]\n", 620 if (tt_query->flags & BATADV_TT_FULL_TABLE)
608 tt_query->dst, 621 tt_flag = 'F';
609 (tt_query->flags & TT_FULL_TABLE ? 'F' : '.')); 622 else
610 tt_query->tt_data = htons(tt_query->tt_data); 623 tt_flag = '.';
611 return route_unicast_packet(skb, recv_if); 624
625 batadv_dbg(BATADV_DBG_TT, bat_priv,
626 "Routing TT_REQUEST to %pM [%c]\n",
627 tt_query->dst,
628 tt_flag);
629 return batadv_route_unicast_packet(skb, recv_if);
612 } 630 }
613 break; 631 break;
614 case TT_RESPONSE: 632 case BATADV_TT_RESPONSE:
615 if (is_my_mac(tt_query->dst)) { 633 batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX);
634
635 if (batadv_is_my_mac(tt_query->dst)) {
616 /* packet needs to be linearized to access the TT 636 /* packet needs to be linearized to access the TT
617 * changes */ 637 * changes
638 */
618 if (skb_linearize(skb) < 0) 639 if (skb_linearize(skb) < 0)
619 goto out; 640 goto out;
620 /* skb_linearize() possibly changed skb->data */ 641 /* skb_linearize() possibly changed skb->data */
621 tt_query = (struct tt_query_packet *)skb->data; 642 tt_query = (struct batadv_tt_query_packet *)skb->data;
622 643
623 tt_len = tt_query->tt_data * sizeof(struct tt_change); 644 tt_size = batadv_tt_len(ntohs(tt_query->tt_data));
624 645
625 /* Ensure we have all the claimed data */ 646 /* Ensure we have all the claimed data */
626 if (unlikely(skb_headlen(skb) < 647 packet_size = sizeof(struct batadv_tt_query_packet);
627 sizeof(struct tt_query_packet) + tt_len)) 648 packet_size += tt_size;
649 if (unlikely(skb_headlen(skb) < packet_size))
628 goto out; 650 goto out;
629 651
630 handle_tt_response(bat_priv, tt_query); 652 batadv_handle_tt_response(bat_priv, tt_query);
631 } else { 653 } else {
632 bat_dbg(DBG_TT, bat_priv, 654 if (tt_query->flags & BATADV_TT_FULL_TABLE)
633 "Routing TT_RESPONSE to %pM [%c]\n", 655 tt_flag = 'F';
634 tt_query->dst, 656 else
635 (tt_query->flags & TT_FULL_TABLE ? 'F' : '.')); 657 tt_flag = '.';
636 tt_query->tt_data = htons(tt_query->tt_data); 658 batadv_dbg(BATADV_DBG_TT, bat_priv,
637 return route_unicast_packet(skb, recv_if); 659 "Routing TT_RESPONSE to %pM [%c]\n",
660 tt_query->dst,
661 tt_flag);
662 return batadv_route_unicast_packet(skb, recv_if);
638 } 663 }
639 break; 664 break;
640 } 665 }
@@ -644,15 +669,16 @@ out:
644 return NET_RX_DROP; 669 return NET_RX_DROP;
645} 670}
646 671
647int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if) 672int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
648{ 673{
649 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 674 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
650 struct roam_adv_packet *roam_adv_packet; 675 struct batadv_roam_adv_packet *roam_adv_packet;
651 struct orig_node *orig_node; 676 struct batadv_orig_node *orig_node;
652 struct ethhdr *ethhdr; 677 struct ethhdr *ethhdr;
653 678
654 /* drop packet if it has not necessary minimum size */ 679 /* drop packet if it has not necessary minimum size */
655 if (unlikely(!pskb_may_pull(skb, sizeof(struct roam_adv_packet)))) 680 if (unlikely(!pskb_may_pull(skb,
681 sizeof(struct batadv_roam_adv_packet))))
656 goto out; 682 goto out;
657 683
658 ethhdr = (struct ethhdr *)skb_mac_header(skb); 684 ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -665,35 +691,39 @@ int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
665 if (is_broadcast_ether_addr(ethhdr->h_source)) 691 if (is_broadcast_ether_addr(ethhdr->h_source))
666 goto out; 692 goto out;
667 693
668 roam_adv_packet = (struct roam_adv_packet *)skb->data; 694 batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);
695
696 roam_adv_packet = (struct batadv_roam_adv_packet *)skb->data;
669 697
670 if (!is_my_mac(roam_adv_packet->dst)) 698 if (!batadv_is_my_mac(roam_adv_packet->dst))
671 return route_unicast_packet(skb, recv_if); 699 return batadv_route_unicast_packet(skb, recv_if);
672 700
673 /* check if it is a backbone gateway. we don't accept 701 /* check if it is a backbone gateway. we don't accept
674 * roaming advertisement from it, as it has the same 702 * roaming advertisement from it, as it has the same
675 * entries as we have. 703 * entries as we have.
676 */ 704 */
677 if (bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src)) 705 if (batadv_bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src))
678 goto out; 706 goto out;
679 707
680 orig_node = orig_hash_find(bat_priv, roam_adv_packet->src); 708 orig_node = batadv_orig_hash_find(bat_priv, roam_adv_packet->src);
681 if (!orig_node) 709 if (!orig_node)
682 goto out; 710 goto out;
683 711
684 bat_dbg(DBG_TT, bat_priv, 712 batadv_dbg(BATADV_DBG_TT, bat_priv,
685 "Received ROAMING_ADV from %pM (client %pM)\n", 713 "Received ROAMING_ADV from %pM (client %pM)\n",
686 roam_adv_packet->src, roam_adv_packet->client); 714 roam_adv_packet->src, roam_adv_packet->client);
687 715
688 tt_global_add(bat_priv, orig_node, roam_adv_packet->client, 716 batadv_tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
689 atomic_read(&orig_node->last_ttvn) + 1, true, false); 717 BATADV_TT_CLIENT_ROAM,
718 atomic_read(&orig_node->last_ttvn) + 1);
690 719
691 /* Roaming phase starts: I have new information but the ttvn has not 720 /* Roaming phase starts: I have new information but the ttvn has not
692 * been incremented yet. This flag will make me check all the incoming 721 * been incremented yet. This flag will make me check all the incoming
693 * packets for the correct destination. */ 722 * packets for the correct destination.
723 */
694 bat_priv->tt_poss_change = true; 724 bat_priv->tt_poss_change = true;
695 725
696 orig_node_free_ref(orig_node); 726 batadv_orig_node_free_ref(orig_node);
697out: 727out:
698 /* returning NET_RX_DROP will make the caller function kfree the skb */ 728 /* returning NET_RX_DROP will make the caller function kfree the skb */
699 return NET_RX_DROP; 729 return NET_RX_DROP;
@@ -701,26 +731,30 @@ out:
701 731
702/* find a suitable router for this originator, and use 732/* find a suitable router for this originator, and use
703 * bonding if possible. increases the found neighbors 733 * bonding if possible. increases the found neighbors
704 * refcount.*/ 734 * refcount.
705struct neigh_node *find_router(struct bat_priv *bat_priv, 735 */
706 struct orig_node *orig_node, 736struct batadv_neigh_node *
707 const struct hard_iface *recv_if) 737batadv_find_router(struct batadv_priv *bat_priv,
738 struct batadv_orig_node *orig_node,
739 const struct batadv_hard_iface *recv_if)
708{ 740{
709 struct orig_node *primary_orig_node; 741 struct batadv_orig_node *primary_orig_node;
710 struct orig_node *router_orig; 742 struct batadv_orig_node *router_orig;
711 struct neigh_node *router; 743 struct batadv_neigh_node *router;
712 static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; 744 static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
713 int bonding_enabled; 745 int bonding_enabled;
746 uint8_t *primary_addr;
714 747
715 if (!orig_node) 748 if (!orig_node)
716 return NULL; 749 return NULL;
717 750
718 router = orig_node_get_router(orig_node); 751 router = batadv_orig_node_get_router(orig_node);
719 if (!router) 752 if (!router)
720 goto err; 753 goto err;
721 754
722 /* without bonding, the first node should 755 /* without bonding, the first node should
723 * always choose the default router. */ 756 * always choose the default router.
757 */
724 bonding_enabled = atomic_read(&bat_priv->bonding); 758 bonding_enabled = atomic_read(&bat_priv->bonding);
725 759
726 rcu_read_lock(); 760 rcu_read_lock();
@@ -732,43 +766,47 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
732 if ((!recv_if) && (!bonding_enabled)) 766 if ((!recv_if) && (!bonding_enabled))
733 goto return_router; 767 goto return_router;
734 768
769 primary_addr = router_orig->primary_addr;
770
735 /* if we have something in the primary_addr, we can search 771 /* if we have something in the primary_addr, we can search
736 * for a potential bonding candidate. */ 772 * for a potential bonding candidate.
737 if (compare_eth(router_orig->primary_addr, zero_mac)) 773 */
774 if (batadv_compare_eth(primary_addr, zero_mac))
738 goto return_router; 775 goto return_router;
739 776
740 /* find the orig_node which has the primary interface. might 777 /* find the orig_node which has the primary interface. might
741 * even be the same as our router_orig in many cases */ 778 * even be the same as our router_orig in many cases
742 779 */
743 if (compare_eth(router_orig->primary_addr, router_orig->orig)) { 780 if (batadv_compare_eth(primary_addr, router_orig->orig)) {
744 primary_orig_node = router_orig; 781 primary_orig_node = router_orig;
745 } else { 782 } else {
746 primary_orig_node = orig_hash_find(bat_priv, 783 primary_orig_node = batadv_orig_hash_find(bat_priv,
747 router_orig->primary_addr); 784 primary_addr);
748 if (!primary_orig_node) 785 if (!primary_orig_node)
749 goto return_router; 786 goto return_router;
750 787
751 orig_node_free_ref(primary_orig_node); 788 batadv_orig_node_free_ref(primary_orig_node);
752 } 789 }
753 790
754 /* with less than 2 candidates, we can't do any 791 /* with less than 2 candidates, we can't do any
755 * bonding and prefer the original router. */ 792 * bonding and prefer the original router.
793 */
756 if (atomic_read(&primary_orig_node->bond_candidates) < 2) 794 if (atomic_read(&primary_orig_node->bond_candidates) < 2)
757 goto return_router; 795 goto return_router;
758 796
759 /* all nodes between should choose a candidate which 797 /* all nodes between should choose a candidate which
760 * is is not on the interface where the packet came 798 * is is not on the interface where the packet came
761 * in. */ 799 * in.
762 800 */
763 neigh_node_free_ref(router); 801 batadv_neigh_node_free_ref(router);
764 802
765 if (bonding_enabled) 803 if (bonding_enabled)
766 router = find_bond_router(primary_orig_node, recv_if); 804 router = batadv_find_bond_router(primary_orig_node, recv_if);
767 else 805 else
768 router = find_ifalter_router(primary_orig_node, recv_if); 806 router = batadv_find_ifalter_router(primary_orig_node, recv_if);
769 807
770return_router: 808return_router:
771 if (router && router->if_incoming->if_status != IF_ACTIVE) 809 if (router && router->if_incoming->if_status != BATADV_IF_ACTIVE)
772 goto err_unlock; 810 goto err_unlock;
773 811
774 rcu_read_unlock(); 812 rcu_read_unlock();
@@ -777,11 +815,11 @@ err_unlock:
777 rcu_read_unlock(); 815 rcu_read_unlock();
778err: 816err:
779 if (router) 817 if (router)
780 neigh_node_free_ref(router); 818 batadv_neigh_node_free_ref(router);
781 return NULL; 819 return NULL;
782} 820}
783 821
784static int check_unicast_packet(struct sk_buff *skb, int hdr_size) 822static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
785{ 823{
786 struct ethhdr *ethhdr; 824 struct ethhdr *ethhdr;
787 825
@@ -800,23 +838,24 @@ static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
800 return -1; 838 return -1;
801 839
802 /* not for me */ 840 /* not for me */
803 if (!is_my_mac(ethhdr->h_dest)) 841 if (!batadv_is_my_mac(ethhdr->h_dest))
804 return -1; 842 return -1;
805 843
806 return 0; 844 return 0;
807} 845}
808 846
809static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) 847static int batadv_route_unicast_packet(struct sk_buff *skb,
848 struct batadv_hard_iface *recv_if)
810{ 849{
811 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 850 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
812 struct orig_node *orig_node = NULL; 851 struct batadv_orig_node *orig_node = NULL;
813 struct neigh_node *neigh_node = NULL; 852 struct batadv_neigh_node *neigh_node = NULL;
814 struct unicast_packet *unicast_packet; 853 struct batadv_unicast_packet *unicast_packet;
815 struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb); 854 struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
816 int ret = NET_RX_DROP; 855 int ret = NET_RX_DROP;
817 struct sk_buff *new_skb; 856 struct sk_buff *new_skb;
818 857
819 unicast_packet = (struct unicast_packet *)skb->data; 858 unicast_packet = (struct batadv_unicast_packet *)skb->data;
820 859
821 /* TTL exceeded */ 860 /* TTL exceeded */
822 if (unicast_packet->header.ttl < 2) { 861 if (unicast_packet->header.ttl < 2) {
@@ -826,13 +865,13 @@ static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
826 } 865 }
827 866
828 /* get routing information */ 867 /* get routing information */
829 orig_node = orig_hash_find(bat_priv, unicast_packet->dest); 868 orig_node = batadv_orig_hash_find(bat_priv, unicast_packet->dest);
830 869
831 if (!orig_node) 870 if (!orig_node)
832 goto out; 871 goto out;
833 872
834 /* find_router() increases neigh_nodes refcount if found. */ 873 /* find_router() increases neigh_nodes refcount if found. */
835 neigh_node = find_router(bat_priv, orig_node, recv_if); 874 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
836 875
837 if (!neigh_node) 876 if (!neigh_node)
838 goto out; 877 goto out;
@@ -841,20 +880,22 @@ static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
841 if (skb_cow(skb, ETH_HLEN) < 0) 880 if (skb_cow(skb, ETH_HLEN) < 0)
842 goto out; 881 goto out;
843 882
844 unicast_packet = (struct unicast_packet *)skb->data; 883 unicast_packet = (struct batadv_unicast_packet *)skb->data;
845 884
846 if (unicast_packet->header.packet_type == BAT_UNICAST && 885 if (unicast_packet->header.packet_type == BATADV_UNICAST &&
847 atomic_read(&bat_priv->fragmentation) && 886 atomic_read(&bat_priv->fragmentation) &&
848 skb->len > neigh_node->if_incoming->net_dev->mtu) { 887 skb->len > neigh_node->if_incoming->net_dev->mtu) {
849 ret = frag_send_skb(skb, bat_priv, 888 ret = batadv_frag_send_skb(skb, bat_priv,
850 neigh_node->if_incoming, neigh_node->addr); 889 neigh_node->if_incoming,
890 neigh_node->addr);
851 goto out; 891 goto out;
852 } 892 }
853 893
854 if (unicast_packet->header.packet_type == BAT_UNICAST_FRAG && 894 if (unicast_packet->header.packet_type == BATADV_UNICAST_FRAG &&
855 frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) { 895 batadv_frag_can_reassemble(skb,
896 neigh_node->if_incoming->net_dev->mtu)) {
856 897
857 ret = frag_reassemble_skb(skb, bat_priv, &new_skb); 898 ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
858 899
859 if (ret == NET_RX_DROP) 900 if (ret == NET_RX_DROP)
860 goto out; 901 goto out;
@@ -866,141 +907,153 @@ static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
866 } 907 }
867 908
868 skb = new_skb; 909 skb = new_skb;
869 unicast_packet = (struct unicast_packet *)skb->data; 910 unicast_packet = (struct batadv_unicast_packet *)skb->data;
870 } 911 }
871 912
872 /* decrement ttl */ 913 /* decrement ttl */
873 unicast_packet->header.ttl--; 914 unicast_packet->header.ttl--;
874 915
916 /* Update stats counter */
917 batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
918 batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
919 skb->len + ETH_HLEN);
920
875 /* route it */ 921 /* route it */
876 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 922 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
877 ret = NET_RX_SUCCESS; 923 ret = NET_RX_SUCCESS;
878 924
879out: 925out:
880 if (neigh_node) 926 if (neigh_node)
881 neigh_node_free_ref(neigh_node); 927 batadv_neigh_node_free_ref(neigh_node);
882 if (orig_node) 928 if (orig_node)
883 orig_node_free_ref(orig_node); 929 batadv_orig_node_free_ref(orig_node);
884 return ret; 930 return ret;
885} 931}
886 932
887static int check_unicast_ttvn(struct bat_priv *bat_priv, 933static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
888 struct sk_buff *skb) { 934 struct sk_buff *skb) {
889 uint8_t curr_ttvn; 935 uint8_t curr_ttvn;
890 struct orig_node *orig_node; 936 struct batadv_orig_node *orig_node;
891 struct ethhdr *ethhdr; 937 struct ethhdr *ethhdr;
892 struct hard_iface *primary_if; 938 struct batadv_hard_iface *primary_if;
893 struct unicast_packet *unicast_packet; 939 struct batadv_unicast_packet *unicast_packet;
894 bool tt_poss_change; 940 bool tt_poss_change;
941 int is_old_ttvn;
895 942
896 /* I could need to modify it */ 943 /* I could need to modify it */
897 if (skb_cow(skb, sizeof(struct unicast_packet)) < 0) 944 if (skb_cow(skb, sizeof(struct batadv_unicast_packet)) < 0)
898 return 0; 945 return 0;
899 946
900 unicast_packet = (struct unicast_packet *)skb->data; 947 unicast_packet = (struct batadv_unicast_packet *)skb->data;
901 948
902 if (is_my_mac(unicast_packet->dest)) { 949 if (batadv_is_my_mac(unicast_packet->dest)) {
903 tt_poss_change = bat_priv->tt_poss_change; 950 tt_poss_change = bat_priv->tt_poss_change;
904 curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); 951 curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
905 } else { 952 } else {
906 orig_node = orig_hash_find(bat_priv, unicast_packet->dest); 953 orig_node = batadv_orig_hash_find(bat_priv,
954 unicast_packet->dest);
907 955
908 if (!orig_node) 956 if (!orig_node)
909 return 0; 957 return 0;
910 958
911 curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); 959 curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
912 tt_poss_change = orig_node->tt_poss_change; 960 tt_poss_change = orig_node->tt_poss_change;
913 orig_node_free_ref(orig_node); 961 batadv_orig_node_free_ref(orig_node);
914 } 962 }
915 963
916 /* Check whether I have to reroute the packet */ 964 /* Check whether I have to reroute the packet */
917 if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) { 965 is_old_ttvn = batadv_seq_before(unicast_packet->ttvn, curr_ttvn);
966 if (is_old_ttvn || tt_poss_change) {
918 /* check if there is enough data before accessing it */ 967 /* check if there is enough data before accessing it */
919 if (pskb_may_pull(skb, sizeof(struct unicast_packet) + 968 if (pskb_may_pull(skb, sizeof(struct batadv_unicast_packet) +
920 ETH_HLEN) < 0) 969 ETH_HLEN) < 0)
921 return 0; 970 return 0;
922 971
923 ethhdr = (struct ethhdr *)(skb->data + 972 ethhdr = (struct ethhdr *)(skb->data + sizeof(*unicast_packet));
924 sizeof(struct unicast_packet));
925 973
926 /* we don't have an updated route for this client, so we should 974 /* we don't have an updated route for this client, so we should
927 * not try to reroute the packet!! 975 * not try to reroute the packet!!
928 */ 976 */
929 if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest)) 977 if (batadv_tt_global_client_is_roaming(bat_priv,
978 ethhdr->h_dest))
930 return 1; 979 return 1;
931 980
932 orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest); 981 orig_node = batadv_transtable_search(bat_priv, NULL,
982 ethhdr->h_dest);
933 983
934 if (!orig_node) { 984 if (!orig_node) {
935 if (!is_my_client(bat_priv, ethhdr->h_dest)) 985 if (!batadv_is_my_client(bat_priv, ethhdr->h_dest))
936 return 0; 986 return 0;
937 primary_if = primary_if_get_selected(bat_priv); 987 primary_if = batadv_primary_if_get_selected(bat_priv);
938 if (!primary_if) 988 if (!primary_if)
939 return 0; 989 return 0;
940 memcpy(unicast_packet->dest, 990 memcpy(unicast_packet->dest,
941 primary_if->net_dev->dev_addr, ETH_ALEN); 991 primary_if->net_dev->dev_addr, ETH_ALEN);
942 hardif_free_ref(primary_if); 992 batadv_hardif_free_ref(primary_if);
943 } else { 993 } else {
944 memcpy(unicast_packet->dest, orig_node->orig, 994 memcpy(unicast_packet->dest, orig_node->orig,
945 ETH_ALEN); 995 ETH_ALEN);
946 curr_ttvn = (uint8_t) 996 curr_ttvn = (uint8_t)
947 atomic_read(&orig_node->last_ttvn); 997 atomic_read(&orig_node->last_ttvn);
948 orig_node_free_ref(orig_node); 998 batadv_orig_node_free_ref(orig_node);
949 } 999 }
950 1000
951 bat_dbg(DBG_ROUTES, bat_priv, 1001 batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
952 "TTVN mismatch (old_ttvn %u new_ttvn %u)! Rerouting unicast packet (for %pM) to %pM\n", 1002 "TTVN mismatch (old_ttvn %u new_ttvn %u)! Rerouting unicast packet (for %pM) to %pM\n",
953 unicast_packet->ttvn, curr_ttvn, ethhdr->h_dest, 1003 unicast_packet->ttvn, curr_ttvn, ethhdr->h_dest,
954 unicast_packet->dest); 1004 unicast_packet->dest);
955 1005
956 unicast_packet->ttvn = curr_ttvn; 1006 unicast_packet->ttvn = curr_ttvn;
957 } 1007 }
958 return 1; 1008 return 1;
959} 1009}
960 1010
961int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) 1011int batadv_recv_unicast_packet(struct sk_buff *skb,
1012 struct batadv_hard_iface *recv_if)
962{ 1013{
963 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 1014 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
964 struct unicast_packet *unicast_packet; 1015 struct batadv_unicast_packet *unicast_packet;
965 int hdr_size = sizeof(*unicast_packet); 1016 int hdr_size = sizeof(*unicast_packet);
966 1017
967 if (check_unicast_packet(skb, hdr_size) < 0) 1018 if (batadv_check_unicast_packet(skb, hdr_size) < 0)
968 return NET_RX_DROP; 1019 return NET_RX_DROP;
969 1020
970 if (!check_unicast_ttvn(bat_priv, skb)) 1021 if (!batadv_check_unicast_ttvn(bat_priv, skb))
971 return NET_RX_DROP; 1022 return NET_RX_DROP;
972 1023
973 unicast_packet = (struct unicast_packet *)skb->data; 1024 unicast_packet = (struct batadv_unicast_packet *)skb->data;
974 1025
975 /* packet for me */ 1026 /* packet for me */
976 if (is_my_mac(unicast_packet->dest)) { 1027 if (batadv_is_my_mac(unicast_packet->dest)) {
977 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); 1028 batadv_interface_rx(recv_if->soft_iface, skb, recv_if,
1029 hdr_size);
978 return NET_RX_SUCCESS; 1030 return NET_RX_SUCCESS;
979 } 1031 }
980 1032
981 return route_unicast_packet(skb, recv_if); 1033 return batadv_route_unicast_packet(skb, recv_if);
982} 1034}
983 1035
984int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if) 1036int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
1037 struct batadv_hard_iface *recv_if)
985{ 1038{
986 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 1039 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
987 struct unicast_frag_packet *unicast_packet; 1040 struct batadv_unicast_frag_packet *unicast_packet;
988 int hdr_size = sizeof(*unicast_packet); 1041 int hdr_size = sizeof(*unicast_packet);
989 struct sk_buff *new_skb = NULL; 1042 struct sk_buff *new_skb = NULL;
990 int ret; 1043 int ret;
991 1044
992 if (check_unicast_packet(skb, hdr_size) < 0) 1045 if (batadv_check_unicast_packet(skb, hdr_size) < 0)
993 return NET_RX_DROP; 1046 return NET_RX_DROP;
994 1047
995 if (!check_unicast_ttvn(bat_priv, skb)) 1048 if (!batadv_check_unicast_ttvn(bat_priv, skb))
996 return NET_RX_DROP; 1049 return NET_RX_DROP;
997 1050
998 unicast_packet = (struct unicast_frag_packet *)skb->data; 1051 unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
999 1052
1000 /* packet for me */ 1053 /* packet for me */
1001 if (is_my_mac(unicast_packet->dest)) { 1054 if (batadv_is_my_mac(unicast_packet->dest)) {
1002 1055
1003 ret = frag_reassemble_skb(skb, bat_priv, &new_skb); 1056 ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
1004 1057
1005 if (ret == NET_RX_DROP) 1058 if (ret == NET_RX_DROP)
1006 return NET_RX_DROP; 1059 return NET_RX_DROP;
@@ -1009,20 +1062,21 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1009 if (!new_skb) 1062 if (!new_skb)
1010 return NET_RX_SUCCESS; 1063 return NET_RX_SUCCESS;
1011 1064
1012 interface_rx(recv_if->soft_iface, new_skb, recv_if, 1065 batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if,
1013 sizeof(struct unicast_packet)); 1066 sizeof(struct batadv_unicast_packet));
1014 return NET_RX_SUCCESS; 1067 return NET_RX_SUCCESS;
1015 } 1068 }
1016 1069
1017 return route_unicast_packet(skb, recv_if); 1070 return batadv_route_unicast_packet(skb, recv_if);
1018} 1071}
1019 1072
1020 1073
1021int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if) 1074int batadv_recv_bcast_packet(struct sk_buff *skb,
1075 struct batadv_hard_iface *recv_if)
1022{ 1076{
1023 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 1077 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1024 struct orig_node *orig_node = NULL; 1078 struct batadv_orig_node *orig_node = NULL;
1025 struct bcast_packet *bcast_packet; 1079 struct batadv_bcast_packet *bcast_packet;
1026 struct ethhdr *ethhdr; 1080 struct ethhdr *ethhdr;
1027 int hdr_size = sizeof(*bcast_packet); 1081 int hdr_size = sizeof(*bcast_packet);
1028 int ret = NET_RX_DROP; 1082 int ret = NET_RX_DROP;
@@ -1043,19 +1097,19 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1043 goto out; 1097 goto out;
1044 1098
1045 /* ignore broadcasts sent by myself */ 1099 /* ignore broadcasts sent by myself */
1046 if (is_my_mac(ethhdr->h_source)) 1100 if (batadv_is_my_mac(ethhdr->h_source))
1047 goto out; 1101 goto out;
1048 1102
1049 bcast_packet = (struct bcast_packet *)skb->data; 1103 bcast_packet = (struct batadv_bcast_packet *)skb->data;
1050 1104
1051 /* ignore broadcasts originated by myself */ 1105 /* ignore broadcasts originated by myself */
1052 if (is_my_mac(bcast_packet->orig)) 1106 if (batadv_is_my_mac(bcast_packet->orig))
1053 goto out; 1107 goto out;
1054 1108
1055 if (bcast_packet->header.ttl < 2) 1109 if (bcast_packet->header.ttl < 2)
1056 goto out; 1110 goto out;
1057 1111
1058 orig_node = orig_hash_find(bat_priv, bcast_packet->orig); 1112 orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig);
1059 1113
1060 if (!orig_node) 1114 if (!orig_node)
1061 goto out; 1115 goto out;
@@ -1063,39 +1117,40 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1063 spin_lock_bh(&orig_node->bcast_seqno_lock); 1117 spin_lock_bh(&orig_node->bcast_seqno_lock);
1064 1118
1065 /* check whether the packet is a duplicate */ 1119 /* check whether the packet is a duplicate */
1066 if (bat_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno, 1120 if (batadv_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno,
1067 ntohl(bcast_packet->seqno))) 1121 ntohl(bcast_packet->seqno)))
1068 goto spin_unlock; 1122 goto spin_unlock;
1069 1123
1070 seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno; 1124 seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
1071 1125
1072 /* check whether the packet is old and the host just restarted. */ 1126 /* check whether the packet is old and the host just restarted. */
1073 if (window_protected(bat_priv, seq_diff, 1127 if (batadv_window_protected(bat_priv, seq_diff,
1074 &orig_node->bcast_seqno_reset)) 1128 &orig_node->bcast_seqno_reset))
1075 goto spin_unlock; 1129 goto spin_unlock;
1076 1130
1077 /* mark broadcast in flood history, update window position 1131 /* mark broadcast in flood history, update window position
1078 * if required. */ 1132 * if required.
1079 if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1)) 1133 */
1134 if (batadv_bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
1080 orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno); 1135 orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
1081 1136
1082 spin_unlock_bh(&orig_node->bcast_seqno_lock); 1137 spin_unlock_bh(&orig_node->bcast_seqno_lock);
1083 1138
1084 /* check whether this has been sent by another originator before */ 1139 /* check whether this has been sent by another originator before */
1085 if (bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size)) 1140 if (batadv_bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size))
1086 goto out; 1141 goto out;
1087 1142
1088 /* rebroadcast packet */ 1143 /* rebroadcast packet */
1089 add_bcast_packet_to_list(bat_priv, skb, 1); 1144 batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
1090 1145
1091 /* don't hand the broadcast up if it is from an originator 1146 /* don't hand the broadcast up if it is from an originator
1092 * from the same backbone. 1147 * from the same backbone.
1093 */ 1148 */
1094 if (bla_is_backbone_gw(skb, orig_node, hdr_size)) 1149 if (batadv_bla_is_backbone_gw(skb, orig_node, hdr_size))
1095 goto out; 1150 goto out;
1096 1151
1097 /* broadcast for me */ 1152 /* broadcast for me */
1098 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); 1153 batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
1099 ret = NET_RX_SUCCESS; 1154 ret = NET_RX_SUCCESS;
1100 goto out; 1155 goto out;
1101 1156
@@ -1103,15 +1158,16 @@ spin_unlock:
1103 spin_unlock_bh(&orig_node->bcast_seqno_lock); 1158 spin_unlock_bh(&orig_node->bcast_seqno_lock);
1104out: 1159out:
1105 if (orig_node) 1160 if (orig_node)
1106 orig_node_free_ref(orig_node); 1161 batadv_orig_node_free_ref(orig_node);
1107 return ret; 1162 return ret;
1108} 1163}
1109 1164
1110int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if) 1165int batadv_recv_vis_packet(struct sk_buff *skb,
1166 struct batadv_hard_iface *recv_if)
1111{ 1167{
1112 struct vis_packet *vis_packet; 1168 struct batadv_vis_packet *vis_packet;
1113 struct ethhdr *ethhdr; 1169 struct ethhdr *ethhdr;
1114 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 1170 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1115 int hdr_size = sizeof(*vis_packet); 1171 int hdr_size = sizeof(*vis_packet);
1116 1172
1117 /* keep skb linear */ 1173 /* keep skb linear */
@@ -1121,29 +1177,29 @@ int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1121 if (unlikely(!pskb_may_pull(skb, hdr_size))) 1177 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1122 return NET_RX_DROP; 1178 return NET_RX_DROP;
1123 1179
1124 vis_packet = (struct vis_packet *)skb->data; 1180 vis_packet = (struct batadv_vis_packet *)skb->data;
1125 ethhdr = (struct ethhdr *)skb_mac_header(skb); 1181 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1126 1182
1127 /* not for me */ 1183 /* not for me */
1128 if (!is_my_mac(ethhdr->h_dest)) 1184 if (!batadv_is_my_mac(ethhdr->h_dest))
1129 return NET_RX_DROP; 1185 return NET_RX_DROP;
1130 1186
1131 /* ignore own packets */ 1187 /* ignore own packets */
1132 if (is_my_mac(vis_packet->vis_orig)) 1188 if (batadv_is_my_mac(vis_packet->vis_orig))
1133 return NET_RX_DROP; 1189 return NET_RX_DROP;
1134 1190
1135 if (is_my_mac(vis_packet->sender_orig)) 1191 if (batadv_is_my_mac(vis_packet->sender_orig))
1136 return NET_RX_DROP; 1192 return NET_RX_DROP;
1137 1193
1138 switch (vis_packet->vis_type) { 1194 switch (vis_packet->vis_type) {
1139 case VIS_TYPE_SERVER_SYNC: 1195 case BATADV_VIS_TYPE_SERVER_SYNC:
1140 receive_server_sync_packet(bat_priv, vis_packet, 1196 batadv_receive_server_sync_packet(bat_priv, vis_packet,
1141 skb_headlen(skb)); 1197 skb_headlen(skb));
1142 break; 1198 break;
1143 1199
1144 case VIS_TYPE_CLIENT_UPDATE: 1200 case BATADV_VIS_TYPE_CLIENT_UPDATE:
1145 receive_client_update_packet(bat_priv, vis_packet, 1201 batadv_receive_client_update_packet(bat_priv, vis_packet,
1146 skb_headlen(skb)); 1202 skb_headlen(skb));
1147 break; 1203 break;
1148 1204
1149 default: /* ignore unknown packet */ 1205 default: /* ignore unknown packet */
@@ -1151,6 +1207,7 @@ int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1151 } 1207 }
1152 1208
1153 /* We take a copy of the data in the packet, so we should 1209 /* We take a copy of the data in the packet, so we should
1154 always free the skbuf. */ 1210 * always free the skbuf.
1211 */
1155 return NET_RX_DROP; 1212 return NET_RX_DROP;
1156} 1213}
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index d6bbbebb656..9262279ea66 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,36 +15,45 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_ROUTING_H_ 20#ifndef _NET_BATMAN_ADV_ROUTING_H_
23#define _NET_BATMAN_ADV_ROUTING_H_ 21#define _NET_BATMAN_ADV_ROUTING_H_
24 22
25void slide_own_bcast_window(struct hard_iface *hard_iface); 23void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface);
26bool check_management_packet(struct sk_buff *skb, 24bool batadv_check_management_packet(struct sk_buff *skb,
27 struct hard_iface *hard_iface, 25 struct batadv_hard_iface *hard_iface,
28 int header_len); 26 int header_len);
29void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, 27void batadv_update_route(struct batadv_priv *bat_priv,
30 struct neigh_node *neigh_node); 28 struct batadv_orig_node *orig_node,
31int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); 29 struct batadv_neigh_node *neigh_node);
32int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 30int batadv_recv_icmp_packet(struct sk_buff *skb,
33int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if); 31 struct batadv_hard_iface *recv_if);
34int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 32int batadv_recv_unicast_packet(struct sk_buff *skb,
35int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if); 33 struct batadv_hard_iface *recv_if);
36int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if); 34int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
37int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if); 35 struct batadv_hard_iface *recv_if);
38struct neigh_node *find_router(struct bat_priv *bat_priv, 36int batadv_recv_bcast_packet(struct sk_buff *skb,
39 struct orig_node *orig_node, 37 struct batadv_hard_iface *recv_if);
40 const struct hard_iface *recv_if); 38int batadv_recv_vis_packet(struct sk_buff *skb,
41void bonding_candidate_del(struct orig_node *orig_node, 39 struct batadv_hard_iface *recv_if);
42 struct neigh_node *neigh_node); 40int batadv_recv_tt_query(struct sk_buff *skb,
43void bonding_candidate_add(struct orig_node *orig_node, 41 struct batadv_hard_iface *recv_if);
44 struct neigh_node *neigh_node); 42int batadv_recv_roam_adv(struct sk_buff *skb,
45void bonding_save_primary(const struct orig_node *orig_node, 43 struct batadv_hard_iface *recv_if);
46 struct orig_node *orig_neigh_node, 44struct batadv_neigh_node *
47 const struct batman_ogm_packet *batman_ogm_packet); 45batadv_find_router(struct batadv_priv *bat_priv,
48int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff, 46 struct batadv_orig_node *orig_node,
49 unsigned long *last_reset); 47 const struct batadv_hard_iface *recv_if);
48void batadv_bonding_candidate_del(struct batadv_orig_node *orig_node,
49 struct batadv_neigh_node *neigh_node);
50void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
51 struct batadv_neigh_node *neigh_node);
52void batadv_bonding_save_primary(const struct batadv_orig_node *orig_node,
53 struct batadv_orig_node *orig_neigh_node,
54 const struct batadv_ogm_packet
55 *batman_ogm_packet);
56int batadv_window_protected(struct batadv_priv *bat_priv, int32_t seq_num_diff,
57 unsigned long *last_reset);
50 58
51#endif /* _NET_BATMAN_ADV_ROUTING_H_ */ 59#endif /* _NET_BATMAN_ADV_ROUTING_H_ */
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index f47299f22c6..3b4b2daa3b3 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -29,16 +27,18 @@
29#include "gateway_common.h" 27#include "gateway_common.h"
30#include "originator.h" 28#include "originator.h"
31 29
32static void send_outstanding_bcast_packet(struct work_struct *work); 30static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
33 31
34/* send out an already prepared packet to the given address via the 32/* send out an already prepared packet to the given address via the
35 * specified batman interface */ 33 * specified batman interface
36int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, 34 */
37 const uint8_t *dst_addr) 35int batadv_send_skb_packet(struct sk_buff *skb,
36 struct batadv_hard_iface *hard_iface,
37 const uint8_t *dst_addr)
38{ 38{
39 struct ethhdr *ethhdr; 39 struct ethhdr *ethhdr;
40 40
41 if (hard_iface->if_status != IF_ACTIVE) 41 if (hard_iface->if_status != BATADV_IF_ACTIVE)
42 goto send_skb_err; 42 goto send_skb_err;
43 43
44 if (unlikely(!hard_iface->net_dev)) 44 if (unlikely(!hard_iface->net_dev))
@@ -51,7 +51,7 @@ int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
51 } 51 }
52 52
53 /* push to the ethernet header. */ 53 /* push to the ethernet header. */
54 if (my_skb_head_push(skb, ETH_HLEN) < 0) 54 if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
55 goto send_skb_err; 55 goto send_skb_err;
56 56
57 skb_reset_mac_header(skb); 57 skb_reset_mac_header(skb);
@@ -59,129 +59,57 @@ int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
59 ethhdr = (struct ethhdr *)skb_mac_header(skb); 59 ethhdr = (struct ethhdr *)skb_mac_header(skb);
60 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN); 60 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
61 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); 61 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
62 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); 62 ethhdr->h_proto = __constant_htons(BATADV_ETH_P_BATMAN);
63 63
64 skb_set_network_header(skb, ETH_HLEN); 64 skb_set_network_header(skb, ETH_HLEN);
65 skb->priority = TC_PRIO_CONTROL; 65 skb->priority = TC_PRIO_CONTROL;
66 skb->protocol = __constant_htons(ETH_P_BATMAN); 66 skb->protocol = __constant_htons(BATADV_ETH_P_BATMAN);
67 67
68 skb->dev = hard_iface->net_dev; 68 skb->dev = hard_iface->net_dev;
69 69
70 /* dev_queue_xmit() returns a negative result on error. However on 70 /* dev_queue_xmit() returns a negative result on error. However on
71 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP 71 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
72 * (which is > 0). This will not be treated as an error. */ 72 * (which is > 0). This will not be treated as an error.
73 73 */
74 return dev_queue_xmit(skb); 74 return dev_queue_xmit(skb);
75send_skb_err: 75send_skb_err:
76 kfree_skb(skb); 76 kfree_skb(skb);
77 return NET_XMIT_DROP; 77 return NET_XMIT_DROP;
78} 78}
79 79
80static void realloc_packet_buffer(struct hard_iface *hard_iface, 80void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
81 int new_len)
82{ 81{
83 unsigned char *new_buff; 82 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
84
85 new_buff = kmalloc(new_len, GFP_ATOMIC);
86
87 /* keep old buffer if kmalloc should fail */
88 if (new_buff) {
89 memcpy(new_buff, hard_iface->packet_buff,
90 BATMAN_OGM_HLEN);
91
92 kfree(hard_iface->packet_buff);
93 hard_iface->packet_buff = new_buff;
94 hard_iface->packet_len = new_len;
95 }
96}
97 83
98/* when calling this function (hard_iface == primary_if) has to be true */ 84 if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
99static int prepare_packet_buffer(struct bat_priv *bat_priv, 85 (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
100 struct hard_iface *hard_iface)
101{
102 int new_len;
103
104 new_len = BATMAN_OGM_HLEN +
105 tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
106
107 /* if we have too many changes for one packet don't send any
108 * and wait for the tt table request which will be fragmented */
109 if (new_len > hard_iface->soft_iface->mtu)
110 new_len = BATMAN_OGM_HLEN;
111
112 realloc_packet_buffer(hard_iface, new_len);
113
114 atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
115
116 /* reset the sending counter */
117 atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
118
119 return tt_changes_fill_buffer(bat_priv,
120 hard_iface->packet_buff + BATMAN_OGM_HLEN,
121 hard_iface->packet_len - BATMAN_OGM_HLEN);
122}
123
124static int reset_packet_buffer(struct bat_priv *bat_priv,
125 struct hard_iface *hard_iface)
126{
127 realloc_packet_buffer(hard_iface, BATMAN_OGM_HLEN);
128 return 0;
129}
130
131void schedule_bat_ogm(struct hard_iface *hard_iface)
132{
133 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
134 struct hard_iface *primary_if;
135 int tt_num_changes = -1;
136
137 if ((hard_iface->if_status == IF_NOT_IN_USE) ||
138 (hard_iface->if_status == IF_TO_BE_REMOVED))
139 return; 86 return;
140 87
141 /** 88 /* the interface gets activated here to avoid race conditions between
142 * the interface gets activated here to avoid race conditions between
143 * the moment of activating the interface in 89 * the moment of activating the interface in
144 * hardif_activate_interface() where the originator mac is set and 90 * hardif_activate_interface() where the originator mac is set and
145 * outdated packets (especially uninitialized mac addresses) in the 91 * outdated packets (especially uninitialized mac addresses) in the
146 * packet queue 92 * packet queue
147 */ 93 */
148 if (hard_iface->if_status == IF_TO_BE_ACTIVATED) 94 if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
149 hard_iface->if_status = IF_ACTIVE; 95 hard_iface->if_status = BATADV_IF_ACTIVE;
150
151 primary_if = primary_if_get_selected(bat_priv);
152
153 if (hard_iface == primary_if) {
154 /* if at least one change happened */
155 if (atomic_read(&bat_priv->tt_local_changes) > 0) {
156 tt_commit_changes(bat_priv);
157 tt_num_changes = prepare_packet_buffer(bat_priv,
158 hard_iface);
159 }
160
161 /* if the changes have been sent often enough */
162 if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
163 tt_num_changes = reset_packet_buffer(bat_priv,
164 hard_iface);
165 }
166
167 if (primary_if)
168 hardif_free_ref(primary_if);
169 96
170 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface, tt_num_changes); 97 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
171} 98}
172 99
173static void forw_packet_free(struct forw_packet *forw_packet) 100static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
174{ 101{
175 if (forw_packet->skb) 102 if (forw_packet->skb)
176 kfree_skb(forw_packet->skb); 103 kfree_skb(forw_packet->skb);
177 if (forw_packet->if_incoming) 104 if (forw_packet->if_incoming)
178 hardif_free_ref(forw_packet->if_incoming); 105 batadv_hardif_free_ref(forw_packet->if_incoming);
179 kfree(forw_packet); 106 kfree(forw_packet);
180} 107}
181 108
182static void _add_bcast_packet_to_list(struct bat_priv *bat_priv, 109static void
183 struct forw_packet *forw_packet, 110_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
184 unsigned long send_time) 111 struct batadv_forw_packet *forw_packet,
112 unsigned long send_time)
185{ 113{
186 INIT_HLIST_NODE(&forw_packet->list); 114 INIT_HLIST_NODE(&forw_packet->list);
187 115
@@ -192,8 +120,8 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
192 120
193 /* start timer for this packet */ 121 /* start timer for this packet */
194 INIT_DELAYED_WORK(&forw_packet->delayed_work, 122 INIT_DELAYED_WORK(&forw_packet->delayed_work,
195 send_outstanding_bcast_packet); 123 batadv_send_outstanding_bcast_packet);
196 queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work, 124 queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
197 send_time); 125 send_time);
198} 126}
199 127
@@ -204,21 +132,24 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
204 * errors. 132 * errors.
205 * 133 *
206 * The skb is not consumed, so the caller should make sure that the 134 * The skb is not consumed, so the caller should make sure that the
207 * skb is freed. */ 135 * skb is freed.
208int add_bcast_packet_to_list(struct bat_priv *bat_priv, 136 */
209 const struct sk_buff *skb, unsigned long delay) 137int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
138 const struct sk_buff *skb,
139 unsigned long delay)
210{ 140{
211 struct hard_iface *primary_if = NULL; 141 struct batadv_hard_iface *primary_if = NULL;
212 struct forw_packet *forw_packet; 142 struct batadv_forw_packet *forw_packet;
213 struct bcast_packet *bcast_packet; 143 struct batadv_bcast_packet *bcast_packet;
214 struct sk_buff *newskb; 144 struct sk_buff *newskb;
215 145
216 if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) { 146 if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
217 bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n"); 147 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
148 "bcast packet queue full\n");
218 goto out; 149 goto out;
219 } 150 }
220 151
221 primary_if = primary_if_get_selected(bat_priv); 152 primary_if = batadv_primary_if_get_selected(bat_priv);
222 if (!primary_if) 153 if (!primary_if)
223 goto out_and_inc; 154 goto out_and_inc;
224 155
@@ -232,7 +163,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv,
232 goto packet_free; 163 goto packet_free;
233 164
234 /* as we have a copy now, it is safe to decrease the TTL */ 165 /* as we have a copy now, it is safe to decrease the TTL */
235 bcast_packet = (struct bcast_packet *)newskb->data; 166 bcast_packet = (struct batadv_bcast_packet *)newskb->data;
236 bcast_packet->header.ttl--; 167 bcast_packet->header.ttl--;
237 168
238 skb_reset_mac_header(newskb); 169 skb_reset_mac_header(newskb);
@@ -243,7 +174,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv,
243 /* how often did we send the bcast packet ? */ 174 /* how often did we send the bcast packet ? */
244 forw_packet->num_packets = 0; 175 forw_packet->num_packets = 0;
245 176
246 _add_bcast_packet_to_list(bat_priv, forw_packet, delay); 177 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
247 return NETDEV_TX_OK; 178 return NETDEV_TX_OK;
248 179
249packet_free: 180packet_free:
@@ -252,38 +183,43 @@ out_and_inc:
252 atomic_inc(&bat_priv->bcast_queue_left); 183 atomic_inc(&bat_priv->bcast_queue_left);
253out: 184out:
254 if (primary_if) 185 if (primary_if)
255 hardif_free_ref(primary_if); 186 batadv_hardif_free_ref(primary_if);
256 return NETDEV_TX_BUSY; 187 return NETDEV_TX_BUSY;
257} 188}
258 189
259static void send_outstanding_bcast_packet(struct work_struct *work) 190static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
260{ 191{
261 struct hard_iface *hard_iface; 192 struct batadv_hard_iface *hard_iface;
262 struct delayed_work *delayed_work = 193 struct delayed_work *delayed_work =
263 container_of(work, struct delayed_work, work); 194 container_of(work, struct delayed_work, work);
264 struct forw_packet *forw_packet = 195 struct batadv_forw_packet *forw_packet;
265 container_of(delayed_work, struct forw_packet, delayed_work);
266 struct sk_buff *skb1; 196 struct sk_buff *skb1;
267 struct net_device *soft_iface = forw_packet->if_incoming->soft_iface; 197 struct net_device *soft_iface;
268 struct bat_priv *bat_priv = netdev_priv(soft_iface); 198 struct batadv_priv *bat_priv;
199
200 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
201 delayed_work);
202 soft_iface = forw_packet->if_incoming->soft_iface;
203 bat_priv = netdev_priv(soft_iface);
269 204
270 spin_lock_bh(&bat_priv->forw_bcast_list_lock); 205 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
271 hlist_del(&forw_packet->list); 206 hlist_del(&forw_packet->list);
272 spin_unlock_bh(&bat_priv->forw_bcast_list_lock); 207 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
273 208
274 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING) 209 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
275 goto out; 210 goto out;
276 211
277 /* rebroadcast packet */ 212 /* rebroadcast packet */
278 rcu_read_lock(); 213 rcu_read_lock();
279 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 214 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
280 if (hard_iface->soft_iface != soft_iface) 215 if (hard_iface->soft_iface != soft_iface)
281 continue; 216 continue;
282 217
283 /* send a copy of the saved skb */ 218 /* send a copy of the saved skb */
284 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); 219 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
285 if (skb1) 220 if (skb1)
286 send_skb_packet(skb1, hard_iface, broadcast_addr); 221 batadv_send_skb_packet(skb1, hard_iface,
222 batadv_broadcast_addr);
287 } 223 }
288 rcu_read_unlock(); 224 rcu_read_unlock();
289 225
@@ -291,72 +227,72 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
291 227
292 /* if we still have some more bcasts to send */ 228 /* if we still have some more bcasts to send */
293 if (forw_packet->num_packets < 3) { 229 if (forw_packet->num_packets < 3) {
294 _add_bcast_packet_to_list(bat_priv, forw_packet, 230 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
295 msecs_to_jiffies(5)); 231 msecs_to_jiffies(5));
296 return; 232 return;
297 } 233 }
298 234
299out: 235out:
300 forw_packet_free(forw_packet); 236 batadv_forw_packet_free(forw_packet);
301 atomic_inc(&bat_priv->bcast_queue_left); 237 atomic_inc(&bat_priv->bcast_queue_left);
302} 238}
303 239
304void send_outstanding_bat_ogm_packet(struct work_struct *work) 240void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
305{ 241{
306 struct delayed_work *delayed_work = 242 struct delayed_work *delayed_work =
307 container_of(work, struct delayed_work, work); 243 container_of(work, struct delayed_work, work);
308 struct forw_packet *forw_packet = 244 struct batadv_forw_packet *forw_packet;
309 container_of(delayed_work, struct forw_packet, delayed_work); 245 struct batadv_priv *bat_priv;
310 struct bat_priv *bat_priv;
311 246
247 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
248 delayed_work);
312 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface); 249 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
313 spin_lock_bh(&bat_priv->forw_bat_list_lock); 250 spin_lock_bh(&bat_priv->forw_bat_list_lock);
314 hlist_del(&forw_packet->list); 251 hlist_del(&forw_packet->list);
315 spin_unlock_bh(&bat_priv->forw_bat_list_lock); 252 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
316 253
317 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING) 254 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
318 goto out; 255 goto out;
319 256
320 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet); 257 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
321 258
322 /** 259 /* we have to have at least one packet in the queue
323 * we have to have at least one packet in the queue
324 * to determine the queues wake up time unless we are 260 * to determine the queues wake up time unless we are
325 * shutting down 261 * shutting down
326 */ 262 */
327 if (forw_packet->own) 263 if (forw_packet->own)
328 schedule_bat_ogm(forw_packet->if_incoming); 264 batadv_schedule_bat_ogm(forw_packet->if_incoming);
329 265
330out: 266out:
331 /* don't count own packet */ 267 /* don't count own packet */
332 if (!forw_packet->own) 268 if (!forw_packet->own)
333 atomic_inc(&bat_priv->batman_queue_left); 269 atomic_inc(&bat_priv->batman_queue_left);
334 270
335 forw_packet_free(forw_packet); 271 batadv_forw_packet_free(forw_packet);
336} 272}
337 273
338void purge_outstanding_packets(struct bat_priv *bat_priv, 274void
339 const struct hard_iface *hard_iface) 275batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
276 const struct batadv_hard_iface *hard_iface)
340{ 277{
341 struct forw_packet *forw_packet; 278 struct batadv_forw_packet *forw_packet;
342 struct hlist_node *tmp_node, *safe_tmp_node; 279 struct hlist_node *tmp_node, *safe_tmp_node;
343 bool pending; 280 bool pending;
344 281
345 if (hard_iface) 282 if (hard_iface)
346 bat_dbg(DBG_BATMAN, bat_priv, 283 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
347 "purge_outstanding_packets(): %s\n", 284 "purge_outstanding_packets(): %s\n",
348 hard_iface->net_dev->name); 285 hard_iface->net_dev->name);
349 else 286 else
350 bat_dbg(DBG_BATMAN, bat_priv, 287 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
351 "purge_outstanding_packets()\n"); 288 "purge_outstanding_packets()\n");
352 289
353 /* free bcast list */ 290 /* free bcast list */
354 spin_lock_bh(&bat_priv->forw_bcast_list_lock); 291 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
355 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, 292 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
356 &bat_priv->forw_bcast_list, list) { 293 &bat_priv->forw_bcast_list, list) {
357 294
358 /** 295 /* if purge_outstanding_packets() was called with an argument
359 * if purge_outstanding_packets() was called with an argument
360 * we delete only packets belonging to the given interface 296 * we delete only packets belonging to the given interface
361 */ 297 */
362 if ((hard_iface) && 298 if ((hard_iface) &&
@@ -365,8 +301,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
365 301
366 spin_unlock_bh(&bat_priv->forw_bcast_list_lock); 302 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
367 303
368 /** 304 /* batadv_send_outstanding_bcast_packet() will lock the list to
369 * send_outstanding_bcast_packet() will lock the list to
370 * delete the item from the list 305 * delete the item from the list
371 */ 306 */
372 pending = cancel_delayed_work_sync(&forw_packet->delayed_work); 307 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
@@ -374,7 +309,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
374 309
375 if (pending) { 310 if (pending) {
376 hlist_del(&forw_packet->list); 311 hlist_del(&forw_packet->list);
377 forw_packet_free(forw_packet); 312 batadv_forw_packet_free(forw_packet);
378 } 313 }
379 } 314 }
380 spin_unlock_bh(&bat_priv->forw_bcast_list_lock); 315 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
@@ -384,8 +319,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
384 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, 319 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
385 &bat_priv->forw_bat_list, list) { 320 &bat_priv->forw_bat_list, list) {
386 321
387 /** 322 /* if purge_outstanding_packets() was called with an argument
388 * if purge_outstanding_packets() was called with an argument
389 * we delete only packets belonging to the given interface 323 * we delete only packets belonging to the given interface
390 */ 324 */
391 if ((hard_iface) && 325 if ((hard_iface) &&
@@ -394,8 +328,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
394 328
395 spin_unlock_bh(&bat_priv->forw_bat_list_lock); 329 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
396 330
397 /** 331 /* send_outstanding_bat_packet() will lock the list to
398 * send_outstanding_bat_packet() will lock the list to
399 * delete the item from the list 332 * delete the item from the list
400 */ 333 */
401 pending = cancel_delayed_work_sync(&forw_packet->delayed_work); 334 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
@@ -403,7 +336,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
403 336
404 if (pending) { 337 if (pending) {
405 hlist_del(&forw_packet->list); 338 hlist_del(&forw_packet->list);
406 forw_packet_free(forw_packet); 339 batadv_forw_packet_free(forw_packet);
407 } 340 }
408 } 341 }
409 spin_unlock_bh(&bat_priv->forw_bat_list_lock); 342 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 824ef06f9b0..643329b787e 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,19 +15,21 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_SEND_H_ 20#ifndef _NET_BATMAN_ADV_SEND_H_
23#define _NET_BATMAN_ADV_SEND_H_ 21#define _NET_BATMAN_ADV_SEND_H_
24 22
25int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, 23int batadv_send_skb_packet(struct sk_buff *skb,
26 const uint8_t *dst_addr); 24 struct batadv_hard_iface *hard_iface,
27void schedule_bat_ogm(struct hard_iface *hard_iface); 25 const uint8_t *dst_addr);
28int add_bcast_packet_to_list(struct bat_priv *bat_priv, 26void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface);
29 const struct sk_buff *skb, unsigned long delay); 27int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
30void send_outstanding_bat_ogm_packet(struct work_struct *work); 28 const struct sk_buff *skb,
31void purge_outstanding_packets(struct bat_priv *bat_priv, 29 unsigned long delay);
32 const struct hard_iface *hard_iface); 30void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work);
31void
32batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
33 const struct batadv_hard_iface *hard_iface);
33 34
34#endif /* _NET_BATMAN_ADV_SEND_H_ */ 35#endif /* _NET_BATMAN_ADV_SEND_H_ */
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index a0ec0e4ada4..109ea2aae96 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -24,12 +22,12 @@
24#include "hard-interface.h" 22#include "hard-interface.h"
25#include "routing.h" 23#include "routing.h"
26#include "send.h" 24#include "send.h"
27#include "bat_debugfs.h" 25#include "debugfs.h"
28#include "translation-table.h" 26#include "translation-table.h"
29#include "hash.h" 27#include "hash.h"
30#include "gateway_common.h" 28#include "gateway_common.h"
31#include "gateway_client.h" 29#include "gateway_client.h"
32#include "bat_sysfs.h" 30#include "sysfs.h"
33#include "originator.h" 31#include "originator.h"
34#include <linux/slab.h> 32#include <linux/slab.h>
35#include <linux/ethtool.h> 33#include <linux/ethtool.h>
@@ -39,27 +37,33 @@
39#include "bridge_loop_avoidance.h" 37#include "bridge_loop_avoidance.h"
40 38
41 39
42static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); 40static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
43static void bat_get_drvinfo(struct net_device *dev, 41static void batadv_get_drvinfo(struct net_device *dev,
44 struct ethtool_drvinfo *info); 42 struct ethtool_drvinfo *info);
45static u32 bat_get_msglevel(struct net_device *dev); 43static u32 batadv_get_msglevel(struct net_device *dev);
46static void bat_set_msglevel(struct net_device *dev, u32 value); 44static void batadv_set_msglevel(struct net_device *dev, u32 value);
47static u32 bat_get_link(struct net_device *dev); 45static u32 batadv_get_link(struct net_device *dev);
48 46static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data);
49static const struct ethtool_ops bat_ethtool_ops = { 47static void batadv_get_ethtool_stats(struct net_device *dev,
50 .get_settings = bat_get_settings, 48 struct ethtool_stats *stats, u64 *data);
51 .get_drvinfo = bat_get_drvinfo, 49static int batadv_get_sset_count(struct net_device *dev, int stringset);
52 .get_msglevel = bat_get_msglevel, 50
53 .set_msglevel = bat_set_msglevel, 51static const struct ethtool_ops batadv_ethtool_ops = {
54 .get_link = bat_get_link, 52 .get_settings = batadv_get_settings,
53 .get_drvinfo = batadv_get_drvinfo,
54 .get_msglevel = batadv_get_msglevel,
55 .set_msglevel = batadv_set_msglevel,
56 .get_link = batadv_get_link,
57 .get_strings = batadv_get_strings,
58 .get_ethtool_stats = batadv_get_ethtool_stats,
59 .get_sset_count = batadv_get_sset_count,
55}; 60};
56 61
57int my_skb_head_push(struct sk_buff *skb, unsigned int len) 62int batadv_skb_head_push(struct sk_buff *skb, unsigned int len)
58{ 63{
59 int result; 64 int result;
60 65
61 /** 66 /* TODO: We must check if we can release all references to non-payload
62 * TODO: We must check if we can release all references to non-payload
63 * data using skb_header_release in our skbs to allow skb_cow_header to 67 * data using skb_header_release in our skbs to allow skb_cow_header to
64 * work optimally. This means that those skbs are not allowed to read 68 * work optimally. This means that those skbs are not allowed to read
65 * or write any data which is before the current position of skb->data 69 * or write any data which is before the current position of skb->data
@@ -74,37 +78,37 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len)
74 return 0; 78 return 0;
75} 79}
76 80
77static int interface_open(struct net_device *dev) 81static int batadv_interface_open(struct net_device *dev)
78{ 82{
79 netif_start_queue(dev); 83 netif_start_queue(dev);
80 return 0; 84 return 0;
81} 85}
82 86
83static int interface_release(struct net_device *dev) 87static int batadv_interface_release(struct net_device *dev)
84{ 88{
85 netif_stop_queue(dev); 89 netif_stop_queue(dev);
86 return 0; 90 return 0;
87} 91}
88 92
89static struct net_device_stats *interface_stats(struct net_device *dev) 93static struct net_device_stats *batadv_interface_stats(struct net_device *dev)
90{ 94{
91 struct bat_priv *bat_priv = netdev_priv(dev); 95 struct batadv_priv *bat_priv = netdev_priv(dev);
92 return &bat_priv->stats; 96 return &bat_priv->stats;
93} 97}
94 98
95static int interface_set_mac_addr(struct net_device *dev, void *p) 99static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
96{ 100{
97 struct bat_priv *bat_priv = netdev_priv(dev); 101 struct batadv_priv *bat_priv = netdev_priv(dev);
98 struct sockaddr *addr = p; 102 struct sockaddr *addr = p;
99 103
100 if (!is_valid_ether_addr(addr->sa_data)) 104 if (!is_valid_ether_addr(addr->sa_data))
101 return -EADDRNOTAVAIL; 105 return -EADDRNOTAVAIL;
102 106
103 /* only modify transtable if it has been initialized before */ 107 /* only modify transtable if it has been initialized before */
104 if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) { 108 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) {
105 tt_local_remove(bat_priv, dev->dev_addr, 109 batadv_tt_local_remove(bat_priv, dev->dev_addr,
106 "mac address changed", false); 110 "mac address changed", false);
107 tt_local_add(dev, addr->sa_data, NULL_IFINDEX); 111 batadv_tt_local_add(dev, addr->sa_data, BATADV_NULL_IFINDEX);
108 } 112 }
109 113
110 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 114 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
@@ -112,10 +116,10 @@ static int interface_set_mac_addr(struct net_device *dev, void *p)
112 return 0; 116 return 0;
113} 117}
114 118
115static int interface_change_mtu(struct net_device *dev, int new_mtu) 119static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
116{ 120{
117 /* check ranges */ 121 /* check ranges */
118 if ((new_mtu < 68) || (new_mtu > hardif_min_mtu(dev))) 122 if ((new_mtu < 68) || (new_mtu > batadv_hardif_min_mtu(dev)))
119 return -EINVAL; 123 return -EINVAL;
120 124
121 dev->mtu = new_mtu; 125 dev->mtu = new_mtu;
@@ -123,13 +127,15 @@ static int interface_change_mtu(struct net_device *dev, int new_mtu)
123 return 0; 127 return 0;
124} 128}
125 129
126static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) 130static int batadv_interface_tx(struct sk_buff *skb,
131 struct net_device *soft_iface)
127{ 132{
128 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 133 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
129 struct bat_priv *bat_priv = netdev_priv(soft_iface); 134 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
130 struct hard_iface *primary_if = NULL; 135 struct batadv_hard_iface *primary_if = NULL;
131 struct bcast_packet *bcast_packet; 136 struct batadv_bcast_packet *bcast_packet;
132 struct vlan_ethhdr *vhdr; 137 struct vlan_ethhdr *vhdr;
138 __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN);
133 static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 139 static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00,
134 0x00}; 140 0x00};
135 unsigned int header_len = 0; 141 unsigned int header_len = 0;
@@ -137,7 +143,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
137 short vid __maybe_unused = -1; 143 short vid __maybe_unused = -1;
138 bool do_bcast = false; 144 bool do_bcast = false;
139 145
140 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) 146 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
141 goto dropped; 147 goto dropped;
142 148
143 soft_iface->trans_start = jiffies; 149 soft_iface->trans_start = jiffies;
@@ -147,45 +153,47 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
147 vhdr = (struct vlan_ethhdr *)skb->data; 153 vhdr = (struct vlan_ethhdr *)skb->data;
148 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; 154 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
149 155
150 if (ntohs(vhdr->h_vlan_encapsulated_proto) != ETH_P_BATMAN) 156 if (vhdr->h_vlan_encapsulated_proto != ethertype)
151 break; 157 break;
152 158
153 /* fall through */ 159 /* fall through */
154 case ETH_P_BATMAN: 160 case BATADV_ETH_P_BATMAN:
155 goto dropped; 161 goto dropped;
156 } 162 }
157 163
158 if (bla_tx(bat_priv, skb, vid)) 164 if (batadv_bla_tx(bat_priv, skb, vid))
159 goto dropped; 165 goto dropped;
160 166
161 /* Register the client MAC in the transtable */ 167 /* Register the client MAC in the transtable */
162 tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); 168 batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
163 169
164 /* don't accept stp packets. STP does not help in meshes. 170 /* don't accept stp packets. STP does not help in meshes.
165 * better use the bridge loop avoidance ... 171 * better use the bridge loop avoidance ...
166 */ 172 */
167 if (compare_eth(ethhdr->h_dest, stp_addr)) 173 if (batadv_compare_eth(ethhdr->h_dest, stp_addr))
168 goto dropped; 174 goto dropped;
169 175
170 if (is_multicast_ether_addr(ethhdr->h_dest)) { 176 if (is_multicast_ether_addr(ethhdr->h_dest)) {
171 do_bcast = true; 177 do_bcast = true;
172 178
173 switch (atomic_read(&bat_priv->gw_mode)) { 179 switch (atomic_read(&bat_priv->gw_mode)) {
174 case GW_MODE_SERVER: 180 case BATADV_GW_MODE_SERVER:
175 /* gateway servers should not send dhcp 181 /* gateway servers should not send dhcp
176 * requests into the mesh */ 182 * requests into the mesh
177 ret = gw_is_dhcp_target(skb, &header_len); 183 */
184 ret = batadv_gw_is_dhcp_target(skb, &header_len);
178 if (ret) 185 if (ret)
179 goto dropped; 186 goto dropped;
180 break; 187 break;
181 case GW_MODE_CLIENT: 188 case BATADV_GW_MODE_CLIENT:
182 /* gateway clients should send dhcp requests 189 /* gateway clients should send dhcp requests
183 * via unicast to their gateway */ 190 * via unicast to their gateway
184 ret = gw_is_dhcp_target(skb, &header_len); 191 */
192 ret = batadv_gw_is_dhcp_target(skb, &header_len);
185 if (ret) 193 if (ret)
186 do_bcast = false; 194 do_bcast = false;
187 break; 195 break;
188 case GW_MODE_OFF: 196 case BATADV_GW_MODE_OFF:
189 default: 197 default:
190 break; 198 break;
191 } 199 }
@@ -193,22 +201,24 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
193 201
194 /* ethernet packet should be broadcasted */ 202 /* ethernet packet should be broadcasted */
195 if (do_bcast) { 203 if (do_bcast) {
196 primary_if = primary_if_get_selected(bat_priv); 204 primary_if = batadv_primary_if_get_selected(bat_priv);
197 if (!primary_if) 205 if (!primary_if)
198 goto dropped; 206 goto dropped;
199 207
200 if (my_skb_head_push(skb, sizeof(*bcast_packet)) < 0) 208 if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
201 goto dropped; 209 goto dropped;
202 210
203 bcast_packet = (struct bcast_packet *)skb->data; 211 bcast_packet = (struct batadv_bcast_packet *)skb->data;
204 bcast_packet->header.version = COMPAT_VERSION; 212 bcast_packet->header.version = BATADV_COMPAT_VERSION;
205 bcast_packet->header.ttl = TTL; 213 bcast_packet->header.ttl = BATADV_TTL;
206 214
207 /* batman packet type: broadcast */ 215 /* batman packet type: broadcast */
208 bcast_packet->header.packet_type = BAT_BCAST; 216 bcast_packet->header.packet_type = BATADV_BCAST;
217 bcast_packet->reserved = 0;
209 218
210 /* hw address of first interface is the orig mac because only 219 /* hw address of first interface is the orig mac because only
211 * this mac is known throughout the mesh */ 220 * this mac is known throughout the mesh
221 */
212 memcpy(bcast_packet->orig, 222 memcpy(bcast_packet->orig,
213 primary_if->net_dev->dev_addr, ETH_ALEN); 223 primary_if->net_dev->dev_addr, ETH_ALEN);
214 224
@@ -216,21 +226,22 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
216 bcast_packet->seqno = 226 bcast_packet->seqno =
217 htonl(atomic_inc_return(&bat_priv->bcast_seqno)); 227 htonl(atomic_inc_return(&bat_priv->bcast_seqno));
218 228
219 add_bcast_packet_to_list(bat_priv, skb, 1); 229 batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
220 230
221 /* a copy is stored in the bcast list, therefore removing 231 /* a copy is stored in the bcast list, therefore removing
222 * the original skb. */ 232 * the original skb.
233 */
223 kfree_skb(skb); 234 kfree_skb(skb);
224 235
225 /* unicast packet */ 236 /* unicast packet */
226 } else { 237 } else {
227 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_OFF) { 238 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
228 ret = gw_out_of_range(bat_priv, skb, ethhdr); 239 ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr);
229 if (ret) 240 if (ret)
230 goto dropped; 241 goto dropped;
231 } 242 }
232 243
233 ret = unicast_send_skb(skb, bat_priv); 244 ret = batadv_unicast_send_skb(skb, bat_priv);
234 if (ret != 0) 245 if (ret != 0)
235 goto dropped_freed; 246 goto dropped_freed;
236 } 247 }
@@ -245,22 +256,23 @@ dropped_freed:
245 bat_priv->stats.tx_dropped++; 256 bat_priv->stats.tx_dropped++;
246end: 257end:
247 if (primary_if) 258 if (primary_if)
248 hardif_free_ref(primary_if); 259 batadv_hardif_free_ref(primary_if);
249 return NETDEV_TX_OK; 260 return NETDEV_TX_OK;
250} 261}
251 262
252void interface_rx(struct net_device *soft_iface, 263void batadv_interface_rx(struct net_device *soft_iface,
253 struct sk_buff *skb, struct hard_iface *recv_if, 264 struct sk_buff *skb, struct batadv_hard_iface *recv_if,
254 int hdr_size) 265 int hdr_size)
255{ 266{
256 struct bat_priv *bat_priv = netdev_priv(soft_iface); 267 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
257 struct ethhdr *ethhdr; 268 struct ethhdr *ethhdr;
258 struct vlan_ethhdr *vhdr; 269 struct vlan_ethhdr *vhdr;
259 struct batman_header *batadv_header = (struct batman_header *)skb->data; 270 struct batadv_header *batadv_header = (struct batadv_header *)skb->data;
260 short vid __maybe_unused = -1; 271 short vid __maybe_unused = -1;
272 __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN);
261 bool is_bcast; 273 bool is_bcast;
262 274
263 is_bcast = (batadv_header->packet_type == BAT_BCAST); 275 is_bcast = (batadv_header->packet_type == BATADV_BCAST);
264 276
265 /* check if enough space is available for pulling, and pull */ 277 /* check if enough space is available for pulling, and pull */
266 if (!pskb_may_pull(skb, hdr_size)) 278 if (!pskb_may_pull(skb, hdr_size))
@@ -276,11 +288,11 @@ void interface_rx(struct net_device *soft_iface,
276 vhdr = (struct vlan_ethhdr *)skb->data; 288 vhdr = (struct vlan_ethhdr *)skb->data;
277 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; 289 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
278 290
279 if (ntohs(vhdr->h_vlan_encapsulated_proto) != ETH_P_BATMAN) 291 if (vhdr->h_vlan_encapsulated_proto != ethertype)
280 break; 292 break;
281 293
282 /* fall through */ 294 /* fall through */
283 case ETH_P_BATMAN: 295 case BATADV_ETH_P_BATMAN:
284 goto dropped; 296 goto dropped;
285 } 297 }
286 298
@@ -291,22 +303,23 @@ void interface_rx(struct net_device *soft_iface,
291 303
292 /* should not be necessary anymore as we use skb_pull_rcsum() 304 /* should not be necessary anymore as we use skb_pull_rcsum()
293 * TODO: please verify this and remove this TODO 305 * TODO: please verify this and remove this TODO
294 * -- Dec 21st 2009, Simon Wunderlich */ 306 * -- Dec 21st 2009, Simon Wunderlich
307 */
295 308
296/* skb->ip_summed = CHECKSUM_UNNECESSARY;*/ 309 /* skb->ip_summed = CHECKSUM_UNNECESSARY; */
297 310
298 bat_priv->stats.rx_packets++; 311 bat_priv->stats.rx_packets++;
299 bat_priv->stats.rx_bytes += skb->len + ETH_HLEN; 312 bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
300 313
301 soft_iface->last_rx = jiffies; 314 soft_iface->last_rx = jiffies;
302 315
303 if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest)) 316 if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
304 goto dropped; 317 goto dropped;
305 318
306 /* Let the bridge loop avoidance check the packet. If will 319 /* Let the bridge loop avoidance check the packet. If will
307 * not handle it, we can safely push it up. 320 * not handle it, we can safely push it up.
308 */ 321 */
309 if (bla_rx(bat_priv, skb, vid, is_bcast)) 322 if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
310 goto out; 323 goto out;
311 324
312 netif_rx(skb); 325 netif_rx(skb);
@@ -318,49 +331,50 @@ out:
318 return; 331 return;
319} 332}
320 333
321static const struct net_device_ops bat_netdev_ops = { 334static const struct net_device_ops batadv_netdev_ops = {
322 .ndo_open = interface_open, 335 .ndo_open = batadv_interface_open,
323 .ndo_stop = interface_release, 336 .ndo_stop = batadv_interface_release,
324 .ndo_get_stats = interface_stats, 337 .ndo_get_stats = batadv_interface_stats,
325 .ndo_set_mac_address = interface_set_mac_addr, 338 .ndo_set_mac_address = batadv_interface_set_mac_addr,
326 .ndo_change_mtu = interface_change_mtu, 339 .ndo_change_mtu = batadv_interface_change_mtu,
327 .ndo_start_xmit = interface_tx, 340 .ndo_start_xmit = batadv_interface_tx,
328 .ndo_validate_addr = eth_validate_addr 341 .ndo_validate_addr = eth_validate_addr
329}; 342};
330 343
331static void interface_setup(struct net_device *dev) 344static void batadv_interface_setup(struct net_device *dev)
332{ 345{
333 struct bat_priv *priv = netdev_priv(dev); 346 struct batadv_priv *priv = netdev_priv(dev);
334 347
335 ether_setup(dev); 348 ether_setup(dev);
336 349
337 dev->netdev_ops = &bat_netdev_ops; 350 dev->netdev_ops = &batadv_netdev_ops;
338 dev->destructor = free_netdev; 351 dev->destructor = free_netdev;
339 dev->tx_queue_len = 0; 352 dev->tx_queue_len = 0;
340 353
341 /** 354 /* can't call min_mtu, because the needed variables
342 * can't call min_mtu, because the needed variables
343 * have not been initialized yet 355 * have not been initialized yet
344 */ 356 */
345 dev->mtu = ETH_DATA_LEN; 357 dev->mtu = ETH_DATA_LEN;
346 /* reserve more space in the skbuff for our header */ 358 /* reserve more space in the skbuff for our header */
347 dev->hard_header_len = BAT_HEADER_LEN; 359 dev->hard_header_len = BATADV_HEADER_LEN;
348 360
349 /* generate random address */ 361 /* generate random address */
350 eth_hw_addr_random(dev); 362 eth_hw_addr_random(dev);
351 363
352 SET_ETHTOOL_OPS(dev, &bat_ethtool_ops); 364 SET_ETHTOOL_OPS(dev, &batadv_ethtool_ops);
353 365
354 memset(priv, 0, sizeof(*priv)); 366 memset(priv, 0, sizeof(*priv));
355} 367}
356 368
357struct net_device *softif_create(const char *name) 369struct net_device *batadv_softif_create(const char *name)
358{ 370{
359 struct net_device *soft_iface; 371 struct net_device *soft_iface;
360 struct bat_priv *bat_priv; 372 struct batadv_priv *bat_priv;
361 int ret; 373 int ret;
374 size_t cnt_len = sizeof(uint64_t) * BATADV_CNT_NUM;
362 375
363 soft_iface = alloc_netdev(sizeof(*bat_priv), name, interface_setup); 376 soft_iface = alloc_netdev(sizeof(*bat_priv), name,
377 batadv_interface_setup);
364 378
365 if (!soft_iface) 379 if (!soft_iface)
366 goto out; 380 goto out;
@@ -378,18 +392,18 @@ struct net_device *softif_create(const char *name)
378 atomic_set(&bat_priv->bonding, 0); 392 atomic_set(&bat_priv->bonding, 0);
379 atomic_set(&bat_priv->bridge_loop_avoidance, 0); 393 atomic_set(&bat_priv->bridge_loop_avoidance, 0);
380 atomic_set(&bat_priv->ap_isolation, 0); 394 atomic_set(&bat_priv->ap_isolation, 0);
381 atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE); 395 atomic_set(&bat_priv->vis_mode, BATADV_VIS_TYPE_CLIENT_UPDATE);
382 atomic_set(&bat_priv->gw_mode, GW_MODE_OFF); 396 atomic_set(&bat_priv->gw_mode, BATADV_GW_MODE_OFF);
383 atomic_set(&bat_priv->gw_sel_class, 20); 397 atomic_set(&bat_priv->gw_sel_class, 20);
384 atomic_set(&bat_priv->gw_bandwidth, 41); 398 atomic_set(&bat_priv->gw_bandwidth, 41);
385 atomic_set(&bat_priv->orig_interval, 1000); 399 atomic_set(&bat_priv->orig_interval, 1000);
386 atomic_set(&bat_priv->hop_penalty, 30); 400 atomic_set(&bat_priv->hop_penalty, 30);
387 atomic_set(&bat_priv->log_level, 0); 401 atomic_set(&bat_priv->log_level, 0);
388 atomic_set(&bat_priv->fragmentation, 1); 402 atomic_set(&bat_priv->fragmentation, 1);
389 atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN); 403 atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN);
390 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN); 404 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
391 405
392 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); 406 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
393 atomic_set(&bat_priv->bcast_seqno, 1); 407 atomic_set(&bat_priv->bcast_seqno, 1);
394 atomic_set(&bat_priv->ttvn, 0); 408 atomic_set(&bat_priv->ttvn, 0);
395 atomic_set(&bat_priv->tt_local_changes, 0); 409 atomic_set(&bat_priv->tt_local_changes, 0);
@@ -403,28 +417,34 @@ struct net_device *softif_create(const char *name)
403 bat_priv->primary_if = NULL; 417 bat_priv->primary_if = NULL;
404 bat_priv->num_ifaces = 0; 418 bat_priv->num_ifaces = 0;
405 419
406 ret = bat_algo_select(bat_priv, bat_routing_algo); 420 bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
407 if (ret < 0) 421 if (!bat_priv->bat_counters)
408 goto unreg_soft_iface; 422 goto unreg_soft_iface;
409 423
410 ret = sysfs_add_meshif(soft_iface); 424 ret = batadv_algo_select(bat_priv, batadv_routing_algo);
411 if (ret < 0) 425 if (ret < 0)
412 goto unreg_soft_iface; 426 goto free_bat_counters;
413 427
414 ret = debugfs_add_meshif(soft_iface); 428 ret = batadv_sysfs_add_meshif(soft_iface);
429 if (ret < 0)
430 goto free_bat_counters;
431
432 ret = batadv_debugfs_add_meshif(soft_iface);
415 if (ret < 0) 433 if (ret < 0)
416 goto unreg_sysfs; 434 goto unreg_sysfs;
417 435
418 ret = mesh_init(soft_iface); 436 ret = batadv_mesh_init(soft_iface);
419 if (ret < 0) 437 if (ret < 0)
420 goto unreg_debugfs; 438 goto unreg_debugfs;
421 439
422 return soft_iface; 440 return soft_iface;
423 441
424unreg_debugfs: 442unreg_debugfs:
425 debugfs_del_meshif(soft_iface); 443 batadv_debugfs_del_meshif(soft_iface);
426unreg_sysfs: 444unreg_sysfs:
427 sysfs_del_meshif(soft_iface); 445 batadv_sysfs_del_meshif(soft_iface);
446free_bat_counters:
447 free_percpu(bat_priv->bat_counters);
428unreg_soft_iface: 448unreg_soft_iface:
429 unregister_netdevice(soft_iface); 449 unregister_netdevice(soft_iface);
430 return NULL; 450 return NULL;
@@ -435,24 +455,24 @@ out:
435 return NULL; 455 return NULL;
436} 456}
437 457
438void softif_destroy(struct net_device *soft_iface) 458void batadv_softif_destroy(struct net_device *soft_iface)
439{ 459{
440 debugfs_del_meshif(soft_iface); 460 batadv_debugfs_del_meshif(soft_iface);
441 sysfs_del_meshif(soft_iface); 461 batadv_sysfs_del_meshif(soft_iface);
442 mesh_free(soft_iface); 462 batadv_mesh_free(soft_iface);
443 unregister_netdevice(soft_iface); 463 unregister_netdevice(soft_iface);
444} 464}
445 465
446int softif_is_valid(const struct net_device *net_dev) 466int batadv_softif_is_valid(const struct net_device *net_dev)
447{ 467{
448 if (net_dev->netdev_ops->ndo_start_xmit == interface_tx) 468 if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx)
449 return 1; 469 return 1;
450 470
451 return 0; 471 return 0;
452} 472}
453 473
454/* ethtool */ 474/* ethtool */
455static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 475static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
456{ 476{
457 cmd->supported = 0; 477 cmd->supported = 0;
458 cmd->advertising = 0; 478 cmd->advertising = 0;
@@ -468,25 +488,73 @@ static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
468 return 0; 488 return 0;
469} 489}
470 490
471static void bat_get_drvinfo(struct net_device *dev, 491static void batadv_get_drvinfo(struct net_device *dev,
472 struct ethtool_drvinfo *info) 492 struct ethtool_drvinfo *info)
473{ 493{
474 strcpy(info->driver, "B.A.T.M.A.N. advanced"); 494 strcpy(info->driver, "B.A.T.M.A.N. advanced");
475 strcpy(info->version, SOURCE_VERSION); 495 strcpy(info->version, BATADV_SOURCE_VERSION);
476 strcpy(info->fw_version, "N/A"); 496 strcpy(info->fw_version, "N/A");
477 strcpy(info->bus_info, "batman"); 497 strcpy(info->bus_info, "batman");
478} 498}
479 499
480static u32 bat_get_msglevel(struct net_device *dev) 500static u32 batadv_get_msglevel(struct net_device *dev)
481{ 501{
482 return -EOPNOTSUPP; 502 return -EOPNOTSUPP;
483} 503}
484 504
485static void bat_set_msglevel(struct net_device *dev, u32 value) 505static void batadv_set_msglevel(struct net_device *dev, u32 value)
486{ 506{
487} 507}
488 508
489static u32 bat_get_link(struct net_device *dev) 509static u32 batadv_get_link(struct net_device *dev)
490{ 510{
491 return 1; 511 return 1;
492} 512}
513
514/* Inspired by drivers/net/ethernet/dlink/sundance.c:1702
515 * Declare each description string in struct.name[] to get fixed sized buffer
516 * and compile time checking for strings longer than ETH_GSTRING_LEN.
517 */
518static const struct {
519 const char name[ETH_GSTRING_LEN];
520} batadv_counters_strings[] = {
521 { "forward" },
522 { "forward_bytes" },
523 { "mgmt_tx" },
524 { "mgmt_tx_bytes" },
525 { "mgmt_rx" },
526 { "mgmt_rx_bytes" },
527 { "tt_request_tx" },
528 { "tt_request_rx" },
529 { "tt_response_tx" },
530 { "tt_response_rx" },
531 { "tt_roam_adv_tx" },
532 { "tt_roam_adv_rx" },
533};
534
535static void batadv_get_strings(struct net_device *dev, uint32_t stringset,
536 uint8_t *data)
537{
538 if (stringset == ETH_SS_STATS)
539 memcpy(data, batadv_counters_strings,
540 sizeof(batadv_counters_strings));
541}
542
543static void batadv_get_ethtool_stats(struct net_device *dev,
544 struct ethtool_stats *stats,
545 uint64_t *data)
546{
547 struct batadv_priv *bat_priv = netdev_priv(dev);
548 int i;
549
550 for (i = 0; i < BATADV_CNT_NUM; i++)
551 data[i] = batadv_sum_counter(bat_priv, i);
552}
553
554static int batadv_get_sset_count(struct net_device *dev, int stringset)
555{
556 if (stringset == ETH_SS_STATS)
557 return BATADV_CNT_NUM;
558
559 return -EOPNOTSUPP;
560}
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 02030067388..852c683b06a 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,18 +15,16 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_SOFT_INTERFACE_H_ 20#ifndef _NET_BATMAN_ADV_SOFT_INTERFACE_H_
23#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_ 21#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
24 22
25int my_skb_head_push(struct sk_buff *skb, unsigned int len); 23int batadv_skb_head_push(struct sk_buff *skb, unsigned int len);
26void interface_rx(struct net_device *soft_iface, 24void batadv_interface_rx(struct net_device *soft_iface, struct sk_buff *skb,
27 struct sk_buff *skb, struct hard_iface *recv_if, 25 struct batadv_hard_iface *recv_if, int hdr_size);
28 int hdr_size); 26struct net_device *batadv_softif_create(const char *name);
29struct net_device *softif_create(const char *name); 27void batadv_softif_destroy(struct net_device *soft_iface);
30void softif_destroy(struct net_device *soft_iface); 28int batadv_softif_is_valid(const struct net_device *net_dev);
31int softif_is_valid(const struct net_device *net_dev);
32 29
33#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */ 30#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
new file mode 100644
index 00000000000..66518c75c21
--- /dev/null
+++ b/net/batman-adv/sysfs.c
@@ -0,0 +1,787 @@
1/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
18 */
19
20#include "main.h"
21#include "sysfs.h"
22#include "translation-table.h"
23#include "originator.h"
24#include "hard-interface.h"
25#include "gateway_common.h"
26#include "gateway_client.h"
27#include "vis.h"
28
29static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
30{
31 struct device *dev = container_of(obj->parent, struct device, kobj);
32 return to_net_dev(dev);
33}
34
35static struct batadv_priv *batadv_kobj_to_batpriv(struct kobject *obj)
36{
37 struct net_device *net_dev = batadv_kobj_to_netdev(obj);
38 return netdev_priv(net_dev);
39}
40
41#define BATADV_UEV_TYPE_VAR "BATTYPE="
42#define BATADV_UEV_ACTION_VAR "BATACTION="
43#define BATADV_UEV_DATA_VAR "BATDATA="
44
45static char *batadv_uev_action_str[] = {
46 "add",
47 "del",
48 "change"
49};
50
51static char *batadv_uev_type_str[] = {
52 "gw"
53};
54
55/* Use this, if you have customized show and store functions */
56#define BATADV_ATTR(_name, _mode, _show, _store) \
57struct batadv_attribute batadv_attr_##_name = { \
58 .attr = {.name = __stringify(_name), \
59 .mode = _mode }, \
60 .show = _show, \
61 .store = _store, \
62};
63
64#define BATADV_ATTR_SIF_STORE_BOOL(_name, _post_func) \
65ssize_t batadv_store_##_name(struct kobject *kobj, \
66 struct attribute *attr, char *buff, \
67 size_t count) \
68{ \
69 struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
70 struct batadv_priv *bat_priv = netdev_priv(net_dev); \
71 return __batadv_store_bool_attr(buff, count, _post_func, attr, \
72 &bat_priv->_name, net_dev); \
73}
74
75#define BATADV_ATTR_SIF_SHOW_BOOL(_name) \
76ssize_t batadv_show_##_name(struct kobject *kobj, \
77 struct attribute *attr, char *buff) \
78{ \
79 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); \
80 return sprintf(buff, "%s\n", \
81 atomic_read(&bat_priv->_name) == 0 ? \
82 "disabled" : "enabled"); \
83} \
84
85/* Use this, if you are going to turn a [name] in the soft-interface
86 * (bat_priv) on or off
87 */
88#define BATADV_ATTR_SIF_BOOL(_name, _mode, _post_func) \
89 static BATADV_ATTR_SIF_STORE_BOOL(_name, _post_func) \
90 static BATADV_ATTR_SIF_SHOW_BOOL(_name) \
91 static BATADV_ATTR(_name, _mode, batadv_show_##_name, \
92 batadv_store_##_name)
93
94
95#define BATADV_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \
96ssize_t batadv_store_##_name(struct kobject *kobj, \
97 struct attribute *attr, char *buff, \
98 size_t count) \
99{ \
100 struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
101 struct batadv_priv *bat_priv = netdev_priv(net_dev); \
102 return __batadv_store_uint_attr(buff, count, _min, _max, \
103 _post_func, attr, \
104 &bat_priv->_name, net_dev); \
105}
106
107#define BATADV_ATTR_SIF_SHOW_UINT(_name) \
108ssize_t batadv_show_##_name(struct kobject *kobj, \
109 struct attribute *attr, char *buff) \
110{ \
111 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); \
112 return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name)); \
113} \
114
115/* Use this, if you are going to set [name] in the soft-interface
116 * (bat_priv) to an unsigned integer value
117 */
118#define BATADV_ATTR_SIF_UINT(_name, _mode, _min, _max, _post_func) \
119 static BATADV_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func)\
120 static BATADV_ATTR_SIF_SHOW_UINT(_name) \
121 static BATADV_ATTR(_name, _mode, batadv_show_##_name, \
122 batadv_store_##_name)
123
124
125#define BATADV_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \
126ssize_t batadv_store_##_name(struct kobject *kobj, \
127 struct attribute *attr, char *buff, \
128 size_t count) \
129{ \
130 struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
131 struct batadv_hard_iface *hard_iface; \
132 ssize_t length; \
133 \
134 hard_iface = batadv_hardif_get_by_netdev(net_dev); \
135 if (!hard_iface) \
136 return 0; \
137 \
138 length = __batadv_store_uint_attr(buff, count, _min, _max, \
139 _post_func, attr, \
140 &hard_iface->_name, net_dev); \
141 \
142 batadv_hardif_free_ref(hard_iface); \
143 return length; \
144}
145
146#define BATADV_ATTR_HIF_SHOW_UINT(_name) \
147ssize_t batadv_show_##_name(struct kobject *kobj, \
148 struct attribute *attr, char *buff) \
149{ \
150 struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
151 struct batadv_hard_iface *hard_iface; \
152 ssize_t length; \
153 \
154 hard_iface = batadv_hardif_get_by_netdev(net_dev); \
155 if (!hard_iface) \
156 return 0; \
157 \
158 length = sprintf(buff, "%i\n", atomic_read(&hard_iface->_name));\
159 \
160 batadv_hardif_free_ref(hard_iface); \
161 return length; \
162}
163
164/* Use this, if you are going to set [name] in hard_iface to an
165 * unsigned integer value
166 */
167#define BATADV_ATTR_HIF_UINT(_name, _mode, _min, _max, _post_func) \
168 static BATADV_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func)\
169 static BATADV_ATTR_HIF_SHOW_UINT(_name) \
170 static BATADV_ATTR(_name, _mode, batadv_show_##_name, \
171 batadv_store_##_name)
172
173
174static int batadv_store_bool_attr(char *buff, size_t count,
175 struct net_device *net_dev,
176 const char *attr_name, atomic_t *attr)
177{
178 int enabled = -1;
179
180 if (buff[count - 1] == '\n')
181 buff[count - 1] = '\0';
182
183 if ((strncmp(buff, "1", 2) == 0) ||
184 (strncmp(buff, "enable", 7) == 0) ||
185 (strncmp(buff, "enabled", 8) == 0))
186 enabled = 1;
187
188 if ((strncmp(buff, "0", 2) == 0) ||
189 (strncmp(buff, "disable", 8) == 0) ||
190 (strncmp(buff, "disabled", 9) == 0))
191 enabled = 0;
192
193 if (enabled < 0) {
194 batadv_info(net_dev, "%s: Invalid parameter received: %s\n",
195 attr_name, buff);
196 return -EINVAL;
197 }
198
199 if (atomic_read(attr) == enabled)
200 return count;
201
202 batadv_info(net_dev, "%s: Changing from: %s to: %s\n", attr_name,
203 atomic_read(attr) == 1 ? "enabled" : "disabled",
204 enabled == 1 ? "enabled" : "disabled");
205
206 atomic_set(attr, (unsigned int)enabled);
207 return count;
208}
209
210static inline ssize_t
211__batadv_store_bool_attr(char *buff, size_t count,
212 void (*post_func)(struct net_device *),
213 struct attribute *attr,
214 atomic_t *attr_store, struct net_device *net_dev)
215{
216 int ret;
217
218 ret = batadv_store_bool_attr(buff, count, net_dev, attr->name,
219 attr_store);
220 if (post_func && ret)
221 post_func(net_dev);
222
223 return ret;
224}
225
226static int batadv_store_uint_attr(const char *buff, size_t count,
227 struct net_device *net_dev,
228 const char *attr_name,
229 unsigned int min, unsigned int max,
230 atomic_t *attr)
231{
232 unsigned long uint_val;
233 int ret;
234
235 ret = kstrtoul(buff, 10, &uint_val);
236 if (ret) {
237 batadv_info(net_dev, "%s: Invalid parameter received: %s\n",
238 attr_name, buff);
239 return -EINVAL;
240 }
241
242 if (uint_val < min) {
243 batadv_info(net_dev, "%s: Value is too small: %lu min: %u\n",
244 attr_name, uint_val, min);
245 return -EINVAL;
246 }
247
248 if (uint_val > max) {
249 batadv_info(net_dev, "%s: Value is too big: %lu max: %u\n",
250 attr_name, uint_val, max);
251 return -EINVAL;
252 }
253
254 if (atomic_read(attr) == uint_val)
255 return count;
256
257 batadv_info(net_dev, "%s: Changing from: %i to: %lu\n",
258 attr_name, atomic_read(attr), uint_val);
259
260 atomic_set(attr, uint_val);
261 return count;
262}
263
264static inline ssize_t
265__batadv_store_uint_attr(const char *buff, size_t count,
266 int min, int max,
267 void (*post_func)(struct net_device *),
268 const struct attribute *attr,
269 atomic_t *attr_store, struct net_device *net_dev)
270{
271 int ret;
272
273 ret = batadv_store_uint_attr(buff, count, net_dev, attr->name, min, max,
274 attr_store);
275 if (post_func && ret)
276 post_func(net_dev);
277
278 return ret;
279}
280
281static ssize_t batadv_show_vis_mode(struct kobject *kobj,
282 struct attribute *attr, char *buff)
283{
284 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
285 int vis_mode = atomic_read(&bat_priv->vis_mode);
286 const char *mode;
287
288 if (vis_mode == BATADV_VIS_TYPE_CLIENT_UPDATE)
289 mode = "client";
290 else
291 mode = "server";
292
293 return sprintf(buff, "%s\n", mode);
294}
295
296static ssize_t batadv_store_vis_mode(struct kobject *kobj,
297 struct attribute *attr, char *buff,
298 size_t count)
299{
300 struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
301 struct batadv_priv *bat_priv = netdev_priv(net_dev);
302 unsigned long val;
303 int ret, vis_mode_tmp = -1;
304 const char *old_mode, *new_mode;
305
306 ret = kstrtoul(buff, 10, &val);
307
308 if (((count == 2) && (!ret) &&
309 (val == BATADV_VIS_TYPE_CLIENT_UPDATE)) ||
310 (strncmp(buff, "client", 6) == 0) ||
311 (strncmp(buff, "off", 3) == 0))
312 vis_mode_tmp = BATADV_VIS_TYPE_CLIENT_UPDATE;
313
314 if (((count == 2) && (!ret) &&
315 (val == BATADV_VIS_TYPE_SERVER_SYNC)) ||
316 (strncmp(buff, "server", 6) == 0))
317 vis_mode_tmp = BATADV_VIS_TYPE_SERVER_SYNC;
318
319 if (vis_mode_tmp < 0) {
320 if (buff[count - 1] == '\n')
321 buff[count - 1] = '\0';
322
323 batadv_info(net_dev,
324 "Invalid parameter for 'vis mode' setting received: %s\n",
325 buff);
326 return -EINVAL;
327 }
328
329 if (atomic_read(&bat_priv->vis_mode) == vis_mode_tmp)
330 return count;
331
332 if (atomic_read(&bat_priv->vis_mode) == BATADV_VIS_TYPE_CLIENT_UPDATE)
333 old_mode = "client";
334 else
335 old_mode = "server";
336
337 if (vis_mode_tmp == BATADV_VIS_TYPE_CLIENT_UPDATE)
338 new_mode = "client";
339 else
340 new_mode = "server";
341
342 batadv_info(net_dev, "Changing vis mode from: %s to: %s\n", old_mode,
343 new_mode);
344
345 atomic_set(&bat_priv->vis_mode, (unsigned int)vis_mode_tmp);
346 return count;
347}
348
349static ssize_t batadv_show_bat_algo(struct kobject *kobj,
350 struct attribute *attr, char *buff)
351{
352 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
353 return sprintf(buff, "%s\n", bat_priv->bat_algo_ops->name);
354}
355
356static void batadv_post_gw_deselect(struct net_device *net_dev)
357{
358 struct batadv_priv *bat_priv = netdev_priv(net_dev);
359 batadv_gw_deselect(bat_priv);
360}
361
362static ssize_t batadv_show_gw_mode(struct kobject *kobj, struct attribute *attr,
363 char *buff)
364{
365 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
366 int bytes_written;
367
368 switch (atomic_read(&bat_priv->gw_mode)) {
369 case BATADV_GW_MODE_CLIENT:
370 bytes_written = sprintf(buff, "%s\n",
371 BATADV_GW_MODE_CLIENT_NAME);
372 break;
373 case BATADV_GW_MODE_SERVER:
374 bytes_written = sprintf(buff, "%s\n",
375 BATADV_GW_MODE_SERVER_NAME);
376 break;
377 default:
378 bytes_written = sprintf(buff, "%s\n",
379 BATADV_GW_MODE_OFF_NAME);
380 break;
381 }
382
383 return bytes_written;
384}
385
386static ssize_t batadv_store_gw_mode(struct kobject *kobj,
387 struct attribute *attr, char *buff,
388 size_t count)
389{
390 struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
391 struct batadv_priv *bat_priv = netdev_priv(net_dev);
392 char *curr_gw_mode_str;
393 int gw_mode_tmp = -1;
394
395 if (buff[count - 1] == '\n')
396 buff[count - 1] = '\0';
397
398 if (strncmp(buff, BATADV_GW_MODE_OFF_NAME,
399 strlen(BATADV_GW_MODE_OFF_NAME)) == 0)
400 gw_mode_tmp = BATADV_GW_MODE_OFF;
401
402 if (strncmp(buff, BATADV_GW_MODE_CLIENT_NAME,
403 strlen(BATADV_GW_MODE_CLIENT_NAME)) == 0)
404 gw_mode_tmp = BATADV_GW_MODE_CLIENT;
405
406 if (strncmp(buff, BATADV_GW_MODE_SERVER_NAME,
407 strlen(BATADV_GW_MODE_SERVER_NAME)) == 0)
408 gw_mode_tmp = BATADV_GW_MODE_SERVER;
409
410 if (gw_mode_tmp < 0) {
411 batadv_info(net_dev,
412 "Invalid parameter for 'gw mode' setting received: %s\n",
413 buff);
414 return -EINVAL;
415 }
416
417 if (atomic_read(&bat_priv->gw_mode) == gw_mode_tmp)
418 return count;
419
420 switch (atomic_read(&bat_priv->gw_mode)) {
421 case BATADV_GW_MODE_CLIENT:
422 curr_gw_mode_str = BATADV_GW_MODE_CLIENT_NAME;
423 break;
424 case BATADV_GW_MODE_SERVER:
425 curr_gw_mode_str = BATADV_GW_MODE_SERVER_NAME;
426 break;
427 default:
428 curr_gw_mode_str = BATADV_GW_MODE_OFF_NAME;
429 break;
430 }
431
432 batadv_info(net_dev, "Changing gw mode from: %s to: %s\n",
433 curr_gw_mode_str, buff);
434
435 batadv_gw_deselect(bat_priv);
436 atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp);
437 return count;
438}
439
440static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
441 struct attribute *attr, char *buff)
442{
443 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
444 int down, up;
445 int gw_bandwidth = atomic_read(&bat_priv->gw_bandwidth);
446
447 batadv_gw_bandwidth_to_kbit(gw_bandwidth, &down, &up);
448 return sprintf(buff, "%i%s/%i%s\n",
449 (down > 2048 ? down / 1024 : down),
450 (down > 2048 ? "MBit" : "KBit"),
451 (up > 2048 ? up / 1024 : up),
452 (up > 2048 ? "MBit" : "KBit"));
453}
454
455static ssize_t batadv_store_gw_bwidth(struct kobject *kobj,
456 struct attribute *attr, char *buff,
457 size_t count)
458{
459 struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
460
461 if (buff[count - 1] == '\n')
462 buff[count - 1] = '\0';
463
464 return batadv_gw_bandwidth_set(net_dev, buff, count);
465}
466
467BATADV_ATTR_SIF_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
468BATADV_ATTR_SIF_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
469#ifdef CONFIG_BATMAN_ADV_BLA
470BATADV_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
471#endif
472BATADV_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, batadv_update_min_mtu);
473BATADV_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
474static BATADV_ATTR(vis_mode, S_IRUGO | S_IWUSR, batadv_show_vis_mode,
475 batadv_store_vis_mode);
476static BATADV_ATTR(routing_algo, S_IRUGO, batadv_show_bat_algo, NULL);
477static BATADV_ATTR(gw_mode, S_IRUGO | S_IWUSR, batadv_show_gw_mode,
478 batadv_store_gw_mode);
479BATADV_ATTR_SIF_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * BATADV_JITTER,
480 INT_MAX, NULL);
481BATADV_ATTR_SIF_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, BATADV_TQ_MAX_VALUE,
482 NULL);
483BATADV_ATTR_SIF_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, BATADV_TQ_MAX_VALUE,
484 batadv_post_gw_deselect);
485static BATADV_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, batadv_show_gw_bwidth,
486 batadv_store_gw_bwidth);
487#ifdef CONFIG_BATMAN_ADV_DEBUG
488BATADV_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, BATADV_DBG_ALL, NULL);
489#endif
490
491static struct batadv_attribute *batadv_mesh_attrs[] = {
492 &batadv_attr_aggregated_ogms,
493 &batadv_attr_bonding,
494#ifdef CONFIG_BATMAN_ADV_BLA
495 &batadv_attr_bridge_loop_avoidance,
496#endif
497 &batadv_attr_fragmentation,
498 &batadv_attr_ap_isolation,
499 &batadv_attr_vis_mode,
500 &batadv_attr_routing_algo,
501 &batadv_attr_gw_mode,
502 &batadv_attr_orig_interval,
503 &batadv_attr_hop_penalty,
504 &batadv_attr_gw_sel_class,
505 &batadv_attr_gw_bandwidth,
506#ifdef CONFIG_BATMAN_ADV_DEBUG
507 &batadv_attr_log_level,
508#endif
509 NULL,
510};
511
512int batadv_sysfs_add_meshif(struct net_device *dev)
513{
514 struct kobject *batif_kobject = &dev->dev.kobj;
515 struct batadv_priv *bat_priv = netdev_priv(dev);
516 struct batadv_attribute **bat_attr;
517 int err;
518
519 bat_priv->mesh_obj = kobject_create_and_add(BATADV_SYSFS_IF_MESH_SUBDIR,
520 batif_kobject);
521 if (!bat_priv->mesh_obj) {
522 batadv_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
523 BATADV_SYSFS_IF_MESH_SUBDIR);
524 goto out;
525 }
526
527 for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr) {
528 err = sysfs_create_file(bat_priv->mesh_obj,
529 &((*bat_attr)->attr));
530 if (err) {
531 batadv_err(dev, "Can't add sysfs file: %s/%s/%s\n",
532 dev->name, BATADV_SYSFS_IF_MESH_SUBDIR,
533 ((*bat_attr)->attr).name);
534 goto rem_attr;
535 }
536 }
537
538 return 0;
539
540rem_attr:
541 for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr)
542 sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
543
544 kobject_put(bat_priv->mesh_obj);
545 bat_priv->mesh_obj = NULL;
546out:
547 return -ENOMEM;
548}
549
550void batadv_sysfs_del_meshif(struct net_device *dev)
551{
552 struct batadv_priv *bat_priv = netdev_priv(dev);
553 struct batadv_attribute **bat_attr;
554
555 for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr)
556 sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
557
558 kobject_put(bat_priv->mesh_obj);
559 bat_priv->mesh_obj = NULL;
560}
561
562static ssize_t batadv_show_mesh_iface(struct kobject *kobj,
563 struct attribute *attr, char *buff)
564{
565 struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
566 struct batadv_hard_iface *hard_iface;
567 ssize_t length;
568 const char *ifname;
569
570 hard_iface = batadv_hardif_get_by_netdev(net_dev);
571 if (!hard_iface)
572 return 0;
573
574 if (hard_iface->if_status == BATADV_IF_NOT_IN_USE)
575 ifname = "none";
576 else
577 ifname = hard_iface->soft_iface->name;
578
579 length = sprintf(buff, "%s\n", ifname);
580
581 batadv_hardif_free_ref(hard_iface);
582
583 return length;
584}
585
586static ssize_t batadv_store_mesh_iface(struct kobject *kobj,
587 struct attribute *attr, char *buff,
588 size_t count)
589{
590 struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
591 struct batadv_hard_iface *hard_iface;
592 int status_tmp = -1;
593 int ret = count;
594
595 hard_iface = batadv_hardif_get_by_netdev(net_dev);
596 if (!hard_iface)
597 return count;
598
599 if (buff[count - 1] == '\n')
600 buff[count - 1] = '\0';
601
602 if (strlen(buff) >= IFNAMSIZ) {
603 pr_err("Invalid parameter for 'mesh_iface' setting received: interface name too long '%s'\n",
604 buff);
605 batadv_hardif_free_ref(hard_iface);
606 return -EINVAL;
607 }
608
609 if (strncmp(buff, "none", 4) == 0)
610 status_tmp = BATADV_IF_NOT_IN_USE;
611 else
612 status_tmp = BATADV_IF_I_WANT_YOU;
613
614 if (hard_iface->if_status == status_tmp)
615 goto out;
616
617 if ((hard_iface->soft_iface) &&
618 (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0))
619 goto out;
620
621 if (!rtnl_trylock()) {
622 ret = -ERESTARTSYS;
623 goto out;
624 }
625
626 if (status_tmp == BATADV_IF_NOT_IN_USE) {
627 batadv_hardif_disable_interface(hard_iface);
628 goto unlock;
629 }
630
631 /* if the interface already is in use */
632 if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
633 batadv_hardif_disable_interface(hard_iface);
634
635 ret = batadv_hardif_enable_interface(hard_iface, buff);
636
637unlock:
638 rtnl_unlock();
639out:
640 batadv_hardif_free_ref(hard_iface);
641 return ret;
642}
643
644static ssize_t batadv_show_iface_status(struct kobject *kobj,
645 struct attribute *attr, char *buff)
646{
647 struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
648 struct batadv_hard_iface *hard_iface;
649 ssize_t length;
650
651 hard_iface = batadv_hardif_get_by_netdev(net_dev);
652 if (!hard_iface)
653 return 0;
654
655 switch (hard_iface->if_status) {
656 case BATADV_IF_TO_BE_REMOVED:
657 length = sprintf(buff, "disabling\n");
658 break;
659 case BATADV_IF_INACTIVE:
660 length = sprintf(buff, "inactive\n");
661 break;
662 case BATADV_IF_ACTIVE:
663 length = sprintf(buff, "active\n");
664 break;
665 case BATADV_IF_TO_BE_ACTIVATED:
666 length = sprintf(buff, "enabling\n");
667 break;
668 case BATADV_IF_NOT_IN_USE:
669 default:
670 length = sprintf(buff, "not in use\n");
671 break;
672 }
673
674 batadv_hardif_free_ref(hard_iface);
675
676 return length;
677}
678
679static BATADV_ATTR(mesh_iface, S_IRUGO | S_IWUSR, batadv_show_mesh_iface,
680 batadv_store_mesh_iface);
681static BATADV_ATTR(iface_status, S_IRUGO, batadv_show_iface_status, NULL);
682
683static struct batadv_attribute *batadv_batman_attrs[] = {
684 &batadv_attr_mesh_iface,
685 &batadv_attr_iface_status,
686 NULL,
687};
688
689int batadv_sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev)
690{
691 struct kobject *hardif_kobject = &dev->dev.kobj;
692 struct batadv_attribute **bat_attr;
693 int err;
694
695 *hardif_obj = kobject_create_and_add(BATADV_SYSFS_IF_BAT_SUBDIR,
696 hardif_kobject);
697
698 if (!*hardif_obj) {
699 batadv_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
700 BATADV_SYSFS_IF_BAT_SUBDIR);
701 goto out;
702 }
703
704 for (bat_attr = batadv_batman_attrs; *bat_attr; ++bat_attr) {
705 err = sysfs_create_file(*hardif_obj, &((*bat_attr)->attr));
706 if (err) {
707 batadv_err(dev, "Can't add sysfs file: %s/%s/%s\n",
708 dev->name, BATADV_SYSFS_IF_BAT_SUBDIR,
709 ((*bat_attr)->attr).name);
710 goto rem_attr;
711 }
712 }
713
714 return 0;
715
716rem_attr:
717 for (bat_attr = batadv_batman_attrs; *bat_attr; ++bat_attr)
718 sysfs_remove_file(*hardif_obj, &((*bat_attr)->attr));
719out:
720 return -ENOMEM;
721}
722
723void batadv_sysfs_del_hardif(struct kobject **hardif_obj)
724{
725 kobject_put(*hardif_obj);
726 *hardif_obj = NULL;
727}
728
729int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
730 enum batadv_uev_action action, const char *data)
731{
732 int ret = -ENOMEM;
733 struct batadv_hard_iface *primary_if = NULL;
734 struct kobject *bat_kobj;
735 char *uevent_env[4] = { NULL, NULL, NULL, NULL };
736
737 primary_if = batadv_primary_if_get_selected(bat_priv);
738 if (!primary_if)
739 goto out;
740
741 bat_kobj = &primary_if->soft_iface->dev.kobj;
742
743 uevent_env[0] = kmalloc(strlen(BATADV_UEV_TYPE_VAR) +
744 strlen(batadv_uev_type_str[type]) + 1,
745 GFP_ATOMIC);
746 if (!uevent_env[0])
747 goto out;
748
749 sprintf(uevent_env[0], "%s%s", BATADV_UEV_TYPE_VAR,
750 batadv_uev_type_str[type]);
751
752 uevent_env[1] = kmalloc(strlen(BATADV_UEV_ACTION_VAR) +
753 strlen(batadv_uev_action_str[action]) + 1,
754 GFP_ATOMIC);
755 if (!uevent_env[1])
756 goto out;
757
758 sprintf(uevent_env[1], "%s%s", BATADV_UEV_ACTION_VAR,
759 batadv_uev_action_str[action]);
760
761 /* If the event is DEL, ignore the data field */
762 if (action != BATADV_UEV_DEL) {
763 uevent_env[2] = kmalloc(strlen(BATADV_UEV_DATA_VAR) +
764 strlen(data) + 1, GFP_ATOMIC);
765 if (!uevent_env[2])
766 goto out;
767
768 sprintf(uevent_env[2], "%s%s", BATADV_UEV_DATA_VAR, data);
769 }
770
771 ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
772out:
773 kfree(uevent_env[0]);
774 kfree(uevent_env[1]);
775 kfree(uevent_env[2]);
776
777 if (primary_if)
778 batadv_hardif_free_ref(primary_if);
779
780 if (ret)
781 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
782 "Impossible to send uevent for (%s,%s,%s) event (err: %d)\n",
783 batadv_uev_type_str[type],
784 batadv_uev_action_str[action],
785 (action == BATADV_UEV_DEL ? "NULL" : data), ret);
786 return ret;
787}
diff --git a/net/batman-adv/bat_sysfs.h b/net/batman-adv/sysfs.h
index fece77ae586..3fd1412b062 100644
--- a/net/batman-adv/bat_sysfs.h
+++ b/net/batman-adv/sysfs.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner 3 * Marek Lindner
5 * 4 *
@@ -16,17 +15,15 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22
23#ifndef _NET_BATMAN_ADV_SYSFS_H_ 20#ifndef _NET_BATMAN_ADV_SYSFS_H_
24#define _NET_BATMAN_ADV_SYSFS_H_ 21#define _NET_BATMAN_ADV_SYSFS_H_
25 22
26#define SYSFS_IF_MESH_SUBDIR "mesh" 23#define BATADV_SYSFS_IF_MESH_SUBDIR "mesh"
27#define SYSFS_IF_BAT_SUBDIR "batman_adv" 24#define BATADV_SYSFS_IF_BAT_SUBDIR "batman_adv"
28 25
29struct bat_attribute { 26struct batadv_attribute {
30 struct attribute attr; 27 struct attribute attr;
31 ssize_t (*show)(struct kobject *kobj, struct attribute *attr, 28 ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
32 char *buf); 29 char *buf);
@@ -34,11 +31,12 @@ struct bat_attribute {
34 char *buf, size_t count); 31 char *buf, size_t count);
35}; 32};
36 33
37int sysfs_add_meshif(struct net_device *dev); 34int batadv_sysfs_add_meshif(struct net_device *dev);
38void sysfs_del_meshif(struct net_device *dev); 35void batadv_sysfs_del_meshif(struct net_device *dev);
39int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev); 36int batadv_sysfs_add_hardif(struct kobject **hardif_obj,
40void sysfs_del_hardif(struct kobject **hardif_obj); 37 struct net_device *dev);
41int throw_uevent(struct bat_priv *bat_priv, enum uev_type type, 38void batadv_sysfs_del_hardif(struct kobject **hardif_obj);
42 enum uev_action action, const char *data); 39int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
40 enum batadv_uev_action action, const char *data);
43 41
44#endif /* _NET_BATMAN_ADV_SYSFS_H_ */ 42#endif /* _NET_BATMAN_ADV_SYSFS_H_ */
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 2ab83d7fb1f..a438f4b582f 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich, Antonio Quartulli 3 * Marek Lindner, Simon Wunderlich, Antonio Quartulli
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -31,44 +29,46 @@
31 29
32#include <linux/crc16.h> 30#include <linux/crc16.h>
33 31
34static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, 32static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
35 struct orig_node *orig_node); 33 struct batadv_orig_node *orig_node);
36static void tt_purge(struct work_struct *work); 34static void batadv_tt_purge(struct work_struct *work);
37static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry); 35static void
36batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry);
38 37
39/* returns 1 if they are the same mac addr */ 38/* returns 1 if they are the same mac addr */
40static int compare_tt(const struct hlist_node *node, const void *data2) 39static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
41{ 40{
42 const void *data1 = container_of(node, struct tt_common_entry, 41 const void *data1 = container_of(node, struct batadv_tt_common_entry,
43 hash_entry); 42 hash_entry);
44 43
45 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 44 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
46} 45}
47 46
48static void tt_start_timer(struct bat_priv *bat_priv) 47static void batadv_tt_start_timer(struct batadv_priv *bat_priv)
49{ 48{
50 INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge); 49 INIT_DELAYED_WORK(&bat_priv->tt_work, batadv_tt_purge);
51 queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work, 50 queue_delayed_work(batadv_event_workqueue, &bat_priv->tt_work,
52 msecs_to_jiffies(5000)); 51 msecs_to_jiffies(5000));
53} 52}
54 53
55static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash, 54static struct batadv_tt_common_entry *
56 const void *data) 55batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data)
57{ 56{
58 struct hlist_head *head; 57 struct hlist_head *head;
59 struct hlist_node *node; 58 struct hlist_node *node;
60 struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL; 59 struct batadv_tt_common_entry *tt_common_entry;
60 struct batadv_tt_common_entry *tt_common_entry_tmp = NULL;
61 uint32_t index; 61 uint32_t index;
62 62
63 if (!hash) 63 if (!hash)
64 return NULL; 64 return NULL;
65 65
66 index = choose_orig(data, hash->size); 66 index = batadv_choose_orig(data, hash->size);
67 head = &hash->table[index]; 67 head = &hash->table[index];
68 68
69 rcu_read_lock(); 69 rcu_read_lock();
70 hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) { 70 hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
71 if (!compare_eth(tt_common_entry, data)) 71 if (!batadv_compare_eth(tt_common_entry, data))
72 continue; 72 continue;
73 73
74 if (!atomic_inc_not_zero(&tt_common_entry->refcount)) 74 if (!atomic_inc_not_zero(&tt_common_entry->refcount))
@@ -82,80 +82,87 @@ static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash,
82 return tt_common_entry_tmp; 82 return tt_common_entry_tmp;
83} 83}
84 84
85static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv, 85static struct batadv_tt_local_entry *
86 const void *data) 86batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data)
87{ 87{
88 struct tt_common_entry *tt_common_entry; 88 struct batadv_tt_common_entry *tt_common_entry;
89 struct tt_local_entry *tt_local_entry = NULL; 89 struct batadv_tt_local_entry *tt_local_entry = NULL;
90 90
91 tt_common_entry = tt_hash_find(bat_priv->tt_local_hash, data); 91 tt_common_entry = batadv_tt_hash_find(bat_priv->tt_local_hash, data);
92 if (tt_common_entry) 92 if (tt_common_entry)
93 tt_local_entry = container_of(tt_common_entry, 93 tt_local_entry = container_of(tt_common_entry,
94 struct tt_local_entry, common); 94 struct batadv_tt_local_entry,
95 common);
95 return tt_local_entry; 96 return tt_local_entry;
96} 97}
97 98
98static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv, 99static struct batadv_tt_global_entry *
99 const void *data) 100batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data)
100{ 101{
101 struct tt_common_entry *tt_common_entry; 102 struct batadv_tt_common_entry *tt_common_entry;
102 struct tt_global_entry *tt_global_entry = NULL; 103 struct batadv_tt_global_entry *tt_global_entry = NULL;
103 104
104 tt_common_entry = tt_hash_find(bat_priv->tt_global_hash, data); 105 tt_common_entry = batadv_tt_hash_find(bat_priv->tt_global_hash, data);
105 if (tt_common_entry) 106 if (tt_common_entry)
106 tt_global_entry = container_of(tt_common_entry, 107 tt_global_entry = container_of(tt_common_entry,
107 struct tt_global_entry, common); 108 struct batadv_tt_global_entry,
109 common);
108 return tt_global_entry; 110 return tt_global_entry;
109 111
110} 112}
111 113
112static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry) 114static void
115batadv_tt_local_entry_free_ref(struct batadv_tt_local_entry *tt_local_entry)
113{ 116{
114 if (atomic_dec_and_test(&tt_local_entry->common.refcount)) 117 if (atomic_dec_and_test(&tt_local_entry->common.refcount))
115 kfree_rcu(tt_local_entry, common.rcu); 118 kfree_rcu(tt_local_entry, common.rcu);
116} 119}
117 120
118static void tt_global_entry_free_rcu(struct rcu_head *rcu) 121static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
119{ 122{
120 struct tt_common_entry *tt_common_entry; 123 struct batadv_tt_common_entry *tt_common_entry;
121 struct tt_global_entry *tt_global_entry; 124 struct batadv_tt_global_entry *tt_global_entry;
122 125
123 tt_common_entry = container_of(rcu, struct tt_common_entry, rcu); 126 tt_common_entry = container_of(rcu, struct batadv_tt_common_entry, rcu);
124 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, 127 tt_global_entry = container_of(tt_common_entry,
125 common); 128 struct batadv_tt_global_entry, common);
126 129
127 kfree(tt_global_entry); 130 kfree(tt_global_entry);
128} 131}
129 132
130static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry) 133static void
134batadv_tt_global_entry_free_ref(struct batadv_tt_global_entry *tt_global_entry)
131{ 135{
132 if (atomic_dec_and_test(&tt_global_entry->common.refcount)) { 136 if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
133 tt_global_del_orig_list(tt_global_entry); 137 batadv_tt_global_del_orig_list(tt_global_entry);
134 call_rcu(&tt_global_entry->common.rcu, 138 call_rcu(&tt_global_entry->common.rcu,
135 tt_global_entry_free_rcu); 139 batadv_tt_global_entry_free_rcu);
136 } 140 }
137} 141}
138 142
139static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu) 143static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
140{ 144{
141 struct tt_orig_list_entry *orig_entry; 145 struct batadv_tt_orig_list_entry *orig_entry;
142 146
143 orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu); 147 orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
144 orig_node_free_ref(orig_entry->orig_node); 148 batadv_orig_node_free_ref(orig_entry->orig_node);
145 kfree(orig_entry); 149 kfree(orig_entry);
146} 150}
147 151
148static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry) 152static void
153batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
149{ 154{
150 /* to avoid race conditions, immediately decrease the tt counter */ 155 /* to avoid race conditions, immediately decrease the tt counter */
151 atomic_dec(&orig_entry->orig_node->tt_size); 156 atomic_dec(&orig_entry->orig_node->tt_size);
152 call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu); 157 call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
153} 158}
154 159
155static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr, 160static void batadv_tt_local_event(struct batadv_priv *bat_priv,
156 uint8_t flags) 161 const uint8_t *addr, uint8_t flags)
157{ 162{
158 struct tt_change_node *tt_change_node; 163 struct batadv_tt_change_node *tt_change_node, *entry, *safe;
164 bool event_removed = false;
165 bool del_op_requested, del_op_entry;
159 166
160 tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC); 167 tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
161 168
@@ -165,50 +172,82 @@ static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
165 tt_change_node->change.flags = flags; 172 tt_change_node->change.flags = flags;
166 memcpy(tt_change_node->change.addr, addr, ETH_ALEN); 173 memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
167 174
175 del_op_requested = flags & BATADV_TT_CLIENT_DEL;
176
177 /* check for ADD+DEL or DEL+ADD events */
168 spin_lock_bh(&bat_priv->tt_changes_list_lock); 178 spin_lock_bh(&bat_priv->tt_changes_list_lock);
179 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
180 list) {
181 if (!batadv_compare_eth(entry->change.addr, addr))
182 continue;
183
184 /* DEL+ADD in the same orig interval have no effect and can be
185 * removed to avoid silly behaviour on the receiver side. The
186 * other way around (ADD+DEL) can happen in case of roaming of
187 * a client still in the NEW state. Roaming of NEW clients is
188 * now possible due to automatically recognition of "temporary"
189 * clients
190 */
191 del_op_entry = entry->change.flags & BATADV_TT_CLIENT_DEL;
192 if (!del_op_requested && del_op_entry)
193 goto del;
194 if (del_op_requested && !del_op_entry)
195 goto del;
196 continue;
197del:
198 list_del(&entry->list);
199 kfree(entry);
200 event_removed = true;
201 goto unlock;
202 }
203
169 /* track the change in the OGMinterval list */ 204 /* track the change in the OGMinterval list */
170 list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list); 205 list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
171 atomic_inc(&bat_priv->tt_local_changes); 206
207unlock:
172 spin_unlock_bh(&bat_priv->tt_changes_list_lock); 208 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
173 209
174 atomic_set(&bat_priv->tt_ogm_append_cnt, 0); 210 if (event_removed)
211 atomic_dec(&bat_priv->tt_local_changes);
212 else
213 atomic_inc(&bat_priv->tt_local_changes);
175} 214}
176 215
177int tt_len(int changes_num) 216int batadv_tt_len(int changes_num)
178{ 217{
179 return changes_num * sizeof(struct tt_change); 218 return changes_num * sizeof(struct batadv_tt_change);
180} 219}
181 220
182static int tt_local_init(struct bat_priv *bat_priv) 221static int batadv_tt_local_init(struct batadv_priv *bat_priv)
183{ 222{
184 if (bat_priv->tt_local_hash) 223 if (bat_priv->tt_local_hash)
185 return 1; 224 return 0;
186 225
187 bat_priv->tt_local_hash = hash_new(1024); 226 bat_priv->tt_local_hash = batadv_hash_new(1024);
188 227
189 if (!bat_priv->tt_local_hash) 228 if (!bat_priv->tt_local_hash)
190 return 0; 229 return -ENOMEM;
191 230
192 return 1; 231 return 0;
193} 232}
194 233
195void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, 234void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
196 int ifindex) 235 int ifindex)
197{ 236{
198 struct bat_priv *bat_priv = netdev_priv(soft_iface); 237 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
199 struct tt_local_entry *tt_local_entry = NULL; 238 struct batadv_tt_local_entry *tt_local_entry = NULL;
200 struct tt_global_entry *tt_global_entry = NULL; 239 struct batadv_tt_global_entry *tt_global_entry = NULL;
201 struct hlist_head *head; 240 struct hlist_head *head;
202 struct hlist_node *node; 241 struct hlist_node *node;
203 struct tt_orig_list_entry *orig_entry; 242 struct batadv_tt_orig_list_entry *orig_entry;
204 int hash_added; 243 int hash_added;
205 244
206 tt_local_entry = tt_local_hash_find(bat_priv, addr); 245 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
207 246
208 if (tt_local_entry) { 247 if (tt_local_entry) {
209 tt_local_entry->last_seen = jiffies; 248 tt_local_entry->last_seen = jiffies;
210 /* possibly unset the TT_CLIENT_PENDING flag */ 249 /* possibly unset the BATADV_TT_CLIENT_PENDING flag */
211 tt_local_entry->common.flags &= ~TT_CLIENT_PENDING; 250 tt_local_entry->common.flags &= ~BATADV_TT_CLIENT_PENDING;
212 goto out; 251 goto out;
213 } 252 }
214 253
@@ -216,40 +255,42 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
216 if (!tt_local_entry) 255 if (!tt_local_entry)
217 goto out; 256 goto out;
218 257
219 bat_dbg(DBG_TT, bat_priv, 258 batadv_dbg(BATADV_DBG_TT, bat_priv,
220 "Creating new local tt entry: %pM (ttvn: %d)\n", addr, 259 "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
221 (uint8_t)atomic_read(&bat_priv->ttvn)); 260 (uint8_t)atomic_read(&bat_priv->ttvn));
222 261
223 memcpy(tt_local_entry->common.addr, addr, ETH_ALEN); 262 memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
224 tt_local_entry->common.flags = NO_FLAGS; 263 tt_local_entry->common.flags = BATADV_NO_FLAGS;
225 if (is_wifi_iface(ifindex)) 264 if (batadv_is_wifi_iface(ifindex))
226 tt_local_entry->common.flags |= TT_CLIENT_WIFI; 265 tt_local_entry->common.flags |= BATADV_TT_CLIENT_WIFI;
227 atomic_set(&tt_local_entry->common.refcount, 2); 266 atomic_set(&tt_local_entry->common.refcount, 2);
228 tt_local_entry->last_seen = jiffies; 267 tt_local_entry->last_seen = jiffies;
229 268
230 /* the batman interface mac address should never be purged */ 269 /* the batman interface mac address should never be purged */
231 if (compare_eth(addr, soft_iface->dev_addr)) 270 if (batadv_compare_eth(addr, soft_iface->dev_addr))
232 tt_local_entry->common.flags |= TT_CLIENT_NOPURGE; 271 tt_local_entry->common.flags |= BATADV_TT_CLIENT_NOPURGE;
233 272
234 /* The local entry has to be marked as NEW to avoid to send it in 273 /* The local entry has to be marked as NEW to avoid to send it in
235 * a full table response going out before the next ttvn increment 274 * a full table response going out before the next ttvn increment
236 * (consistency check) */ 275 * (consistency check)
237 tt_local_entry->common.flags |= TT_CLIENT_NEW; 276 */
277 tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW;
238 278
239 hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig, 279 hash_added = batadv_hash_add(bat_priv->tt_local_hash, batadv_compare_tt,
240 &tt_local_entry->common, 280 batadv_choose_orig,
241 &tt_local_entry->common.hash_entry); 281 &tt_local_entry->common,
282 &tt_local_entry->common.hash_entry);
242 283
243 if (unlikely(hash_added != 0)) { 284 if (unlikely(hash_added != 0)) {
244 /* remove the reference for the hash */ 285 /* remove the reference for the hash */
245 tt_local_entry_free_ref(tt_local_entry); 286 batadv_tt_local_entry_free_ref(tt_local_entry);
246 goto out; 287 goto out;
247 } 288 }
248 289
249 tt_local_event(bat_priv, addr, tt_local_entry->common.flags); 290 batadv_tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
250 291
251 /* remove address from global hash if present */ 292 /* remove address from global hash if present */
252 tt_global_entry = tt_global_hash_find(bat_priv, addr); 293 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
253 294
254 /* Check whether it is a roaming! */ 295 /* Check whether it is a roaming! */
255 if (tt_global_entry) { 296 if (tt_global_entry) {
@@ -259,31 +300,85 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
259 hlist_for_each_entry_rcu(orig_entry, node, head, list) { 300 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
260 orig_entry->orig_node->tt_poss_change = true; 301 orig_entry->orig_node->tt_poss_change = true;
261 302
262 send_roam_adv(bat_priv, tt_global_entry->common.addr, 303 batadv_send_roam_adv(bat_priv,
263 orig_entry->orig_node); 304 tt_global_entry->common.addr,
305 orig_entry->orig_node);
264 } 306 }
265 rcu_read_unlock(); 307 rcu_read_unlock();
266 /* The global entry has to be marked as ROAMING and 308 /* The global entry has to be marked as ROAMING and
267 * has to be kept for consistency purpose 309 * has to be kept for consistency purpose
268 */ 310 */
269 tt_global_entry->common.flags |= TT_CLIENT_ROAM; 311 tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
270 tt_global_entry->roam_at = jiffies; 312 tt_global_entry->roam_at = jiffies;
271 } 313 }
272out: 314out:
273 if (tt_local_entry) 315 if (tt_local_entry)
274 tt_local_entry_free_ref(tt_local_entry); 316 batadv_tt_local_entry_free_ref(tt_local_entry);
275 if (tt_global_entry) 317 if (tt_global_entry)
276 tt_global_entry_free_ref(tt_global_entry); 318 batadv_tt_global_entry_free_ref(tt_global_entry);
319}
320
321static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff,
322 int *packet_buff_len,
323 int min_packet_len,
324 int new_packet_len)
325{
326 unsigned char *new_buff;
327
328 new_buff = kmalloc(new_packet_len, GFP_ATOMIC);
329
330 /* keep old buffer if kmalloc should fail */
331 if (new_buff) {
332 memcpy(new_buff, *packet_buff, min_packet_len);
333 kfree(*packet_buff);
334 *packet_buff = new_buff;
335 *packet_buff_len = new_packet_len;
336 }
337}
338
339static void batadv_tt_prepare_packet_buff(struct batadv_priv *bat_priv,
340 unsigned char **packet_buff,
341 int *packet_buff_len,
342 int min_packet_len)
343{
344 struct batadv_hard_iface *primary_if;
345 int req_len;
346
347 primary_if = batadv_primary_if_get_selected(bat_priv);
348
349 req_len = min_packet_len;
350 req_len += batadv_tt_len(atomic_read(&bat_priv->tt_local_changes));
351
352 /* if we have too many changes for one packet don't send any
353 * and wait for the tt table request which will be fragmented
354 */
355 if ((!primary_if) || (req_len > primary_if->soft_iface->mtu))
356 req_len = min_packet_len;
357
358 batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
359 min_packet_len, req_len);
360
361 if (primary_if)
362 batadv_hardif_free_ref(primary_if);
277} 363}
278 364
279int tt_changes_fill_buffer(struct bat_priv *bat_priv, 365static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
280 unsigned char *buff, int buff_len) 366 unsigned char **packet_buff,
367 int *packet_buff_len,
368 int min_packet_len)
281{ 369{
282 int count = 0, tot_changes = 0; 370 struct batadv_tt_change_node *entry, *safe;
283 struct tt_change_node *entry, *safe; 371 int count = 0, tot_changes = 0, new_len;
372 unsigned char *tt_buff;
284 373
285 if (buff_len > 0) 374 batadv_tt_prepare_packet_buff(bat_priv, packet_buff,
286 tot_changes = buff_len / tt_len(1); 375 packet_buff_len, min_packet_len);
376
377 new_len = *packet_buff_len - min_packet_len;
378 tt_buff = *packet_buff + min_packet_len;
379
380 if (new_len > 0)
381 tot_changes = new_len / batadv_tt_len(1);
287 382
288 spin_lock_bh(&bat_priv->tt_changes_list_lock); 383 spin_lock_bh(&bat_priv->tt_changes_list_lock);
289 atomic_set(&bat_priv->tt_local_changes, 0); 384 atomic_set(&bat_priv->tt_local_changes, 0);
@@ -291,8 +386,8 @@ int tt_changes_fill_buffer(struct bat_priv *bat_priv,
291 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, 386 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
292 list) { 387 list) {
293 if (count < tot_changes) { 388 if (count < tot_changes) {
294 memcpy(buff + tt_len(count), 389 memcpy(tt_buff + batadv_tt_len(count),
295 &entry->change, sizeof(struct tt_change)); 390 &entry->change, sizeof(struct batadv_tt_change));
296 count++; 391 count++;
297 } 392 }
298 list_del(&entry->list); 393 list_del(&entry->list);
@@ -305,37 +400,35 @@ int tt_changes_fill_buffer(struct bat_priv *bat_priv,
305 kfree(bat_priv->tt_buff); 400 kfree(bat_priv->tt_buff);
306 bat_priv->tt_buff_len = 0; 401 bat_priv->tt_buff_len = 0;
307 bat_priv->tt_buff = NULL; 402 bat_priv->tt_buff = NULL;
308 /* We check whether this new OGM has no changes due to size 403 /* check whether this new OGM has no changes due to size problems */
309 * problems */ 404 if (new_len > 0) {
310 if (buff_len > 0) { 405 /* if kmalloc() fails we will reply with the full table
311 /**
312 * if kmalloc() fails we will reply with the full table
313 * instead of providing the diff 406 * instead of providing the diff
314 */ 407 */
315 bat_priv->tt_buff = kmalloc(buff_len, GFP_ATOMIC); 408 bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC);
316 if (bat_priv->tt_buff) { 409 if (bat_priv->tt_buff) {
317 memcpy(bat_priv->tt_buff, buff, buff_len); 410 memcpy(bat_priv->tt_buff, tt_buff, new_len);
318 bat_priv->tt_buff_len = buff_len; 411 bat_priv->tt_buff_len = new_len;
319 } 412 }
320 } 413 }
321 spin_unlock_bh(&bat_priv->tt_buff_lock); 414 spin_unlock_bh(&bat_priv->tt_buff_lock);
322 415
323 return tot_changes; 416 return count;
324} 417}
325 418
326int tt_local_seq_print_text(struct seq_file *seq, void *offset) 419int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
327{ 420{
328 struct net_device *net_dev = (struct net_device *)seq->private; 421 struct net_device *net_dev = (struct net_device *)seq->private;
329 struct bat_priv *bat_priv = netdev_priv(net_dev); 422 struct batadv_priv *bat_priv = netdev_priv(net_dev);
330 struct hashtable_t *hash = bat_priv->tt_local_hash; 423 struct batadv_hashtable *hash = bat_priv->tt_local_hash;
331 struct tt_common_entry *tt_common_entry; 424 struct batadv_tt_common_entry *tt_common_entry;
332 struct hard_iface *primary_if; 425 struct batadv_hard_iface *primary_if;
333 struct hlist_node *node; 426 struct hlist_node *node;
334 struct hlist_head *head; 427 struct hlist_head *head;
335 uint32_t i; 428 uint32_t i;
336 int ret = 0; 429 int ret = 0;
337 430
338 primary_if = primary_if_get_selected(bat_priv); 431 primary_if = batadv_primary_if_get_selected(bat_priv);
339 if (!primary_if) { 432 if (!primary_if) {
340 ret = seq_printf(seq, 433 ret = seq_printf(seq,
341 "BATMAN mesh %s disabled - please specify interfaces to enable it\n", 434 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
@@ -343,7 +436,7 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
343 goto out; 436 goto out;
344 } 437 }
345 438
346 if (primary_if->if_status != IF_ACTIVE) { 439 if (primary_if->if_status != BATADV_IF_ACTIVE) {
347 ret = seq_printf(seq, 440 ret = seq_printf(seq,
348 "BATMAN mesh %s disabled - primary interface not active\n", 441 "BATMAN mesh %s disabled - primary interface not active\n",
349 net_dev->name); 442 net_dev->name);
@@ -363,63 +456,94 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
363 seq_printf(seq, " * %pM [%c%c%c%c%c]\n", 456 seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
364 tt_common_entry->addr, 457 tt_common_entry->addr,
365 (tt_common_entry->flags & 458 (tt_common_entry->flags &
366 TT_CLIENT_ROAM ? 'R' : '.'), 459 BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
367 (tt_common_entry->flags & 460 (tt_common_entry->flags &
368 TT_CLIENT_NOPURGE ? 'P' : '.'), 461 BATADV_TT_CLIENT_NOPURGE ? 'P' : '.'),
369 (tt_common_entry->flags & 462 (tt_common_entry->flags &
370 TT_CLIENT_NEW ? 'N' : '.'), 463 BATADV_TT_CLIENT_NEW ? 'N' : '.'),
371 (tt_common_entry->flags & 464 (tt_common_entry->flags &
372 TT_CLIENT_PENDING ? 'X' : '.'), 465 BATADV_TT_CLIENT_PENDING ? 'X' : '.'),
373 (tt_common_entry->flags & 466 (tt_common_entry->flags &
374 TT_CLIENT_WIFI ? 'W' : '.')); 467 BATADV_TT_CLIENT_WIFI ? 'W' : '.'));
375 } 468 }
376 rcu_read_unlock(); 469 rcu_read_unlock();
377 } 470 }
378out: 471out:
379 if (primary_if) 472 if (primary_if)
380 hardif_free_ref(primary_if); 473 batadv_hardif_free_ref(primary_if);
381 return ret; 474 return ret;
382} 475}
383 476
384static void tt_local_set_pending(struct bat_priv *bat_priv, 477static void
385 struct tt_local_entry *tt_local_entry, 478batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
386 uint16_t flags, const char *message) 479 struct batadv_tt_local_entry *tt_local_entry,
480 uint16_t flags, const char *message)
387{ 481{
388 tt_local_event(bat_priv, tt_local_entry->common.addr, 482 batadv_tt_local_event(bat_priv, tt_local_entry->common.addr,
389 tt_local_entry->common.flags | flags); 483 tt_local_entry->common.flags | flags);
390 484
391 /* The local client has to be marked as "pending to be removed" but has 485 /* The local client has to be marked as "pending to be removed" but has
392 * to be kept in the table in order to send it in a full table 486 * to be kept in the table in order to send it in a full table
393 * response issued before the net ttvn increment (consistency check) */ 487 * response issued before the net ttvn increment (consistency check)
394 tt_local_entry->common.flags |= TT_CLIENT_PENDING; 488 */
489 tt_local_entry->common.flags |= BATADV_TT_CLIENT_PENDING;
395 490
396 bat_dbg(DBG_TT, bat_priv, 491 batadv_dbg(BATADV_DBG_TT, bat_priv,
397 "Local tt entry (%pM) pending to be removed: %s\n", 492 "Local tt entry (%pM) pending to be removed: %s\n",
398 tt_local_entry->common.addr, message); 493 tt_local_entry->common.addr, message);
399} 494}
400 495
401void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr, 496void batadv_tt_local_remove(struct batadv_priv *bat_priv, const uint8_t *addr,
402 const char *message, bool roaming) 497 const char *message, bool roaming)
403{ 498{
404 struct tt_local_entry *tt_local_entry = NULL; 499 struct batadv_tt_local_entry *tt_local_entry = NULL;
500 uint16_t flags;
405 501
406 tt_local_entry = tt_local_hash_find(bat_priv, addr); 502 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
407 if (!tt_local_entry) 503 if (!tt_local_entry)
408 goto out; 504 goto out;
409 505
410 tt_local_set_pending(bat_priv, tt_local_entry, TT_CLIENT_DEL | 506 flags = BATADV_TT_CLIENT_DEL;
411 (roaming ? TT_CLIENT_ROAM : NO_FLAGS), message); 507 if (roaming)
508 flags |= BATADV_TT_CLIENT_ROAM;
509
510 batadv_tt_local_set_pending(bat_priv, tt_local_entry, flags, message);
412out: 511out:
413 if (tt_local_entry) 512 if (tt_local_entry)
414 tt_local_entry_free_ref(tt_local_entry); 513 batadv_tt_local_entry_free_ref(tt_local_entry);
415} 514}
416 515
417static void tt_local_purge(struct bat_priv *bat_priv) 516static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
517 struct hlist_head *head)
418{ 518{
419 struct hashtable_t *hash = bat_priv->tt_local_hash; 519 struct batadv_tt_local_entry *tt_local_entry;
420 struct tt_local_entry *tt_local_entry; 520 struct batadv_tt_common_entry *tt_common_entry;
421 struct tt_common_entry *tt_common_entry;
422 struct hlist_node *node, *node_tmp; 521 struct hlist_node *node, *node_tmp;
522
523 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
524 hash_entry) {
525 tt_local_entry = container_of(tt_common_entry,
526 struct batadv_tt_local_entry,
527 common);
528 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_NOPURGE)
529 continue;
530
531 /* entry already marked for deletion */
532 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
533 continue;
534
535 if (!batadv_has_timed_out(tt_local_entry->last_seen,
536 BATADV_TT_LOCAL_TIMEOUT))
537 continue;
538
539 batadv_tt_local_set_pending(bat_priv, tt_local_entry,
540 BATADV_TT_CLIENT_DEL, "timed out");
541 }
542}
543
544static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
545{
546 struct batadv_hashtable *hash = bat_priv->tt_local_hash;
423 struct hlist_head *head; 547 struct hlist_head *head;
424 spinlock_t *list_lock; /* protects write access to the hash lists */ 548 spinlock_t *list_lock; /* protects write access to the hash lists */
425 uint32_t i; 549 uint32_t i;
@@ -429,36 +553,18 @@ static void tt_local_purge(struct bat_priv *bat_priv)
429 list_lock = &hash->list_locks[i]; 553 list_lock = &hash->list_locks[i];
430 554
431 spin_lock_bh(list_lock); 555 spin_lock_bh(list_lock);
432 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, 556 batadv_tt_local_purge_list(bat_priv, head);
433 head, hash_entry) {
434 tt_local_entry = container_of(tt_common_entry,
435 struct tt_local_entry,
436 common);
437 if (tt_local_entry->common.flags & TT_CLIENT_NOPURGE)
438 continue;
439
440 /* entry already marked for deletion */
441 if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
442 continue;
443
444 if (!has_timed_out(tt_local_entry->last_seen,
445 TT_LOCAL_TIMEOUT))
446 continue;
447
448 tt_local_set_pending(bat_priv, tt_local_entry,
449 TT_CLIENT_DEL, "timed out");
450 }
451 spin_unlock_bh(list_lock); 557 spin_unlock_bh(list_lock);
452 } 558 }
453 559
454} 560}
455 561
456static void tt_local_table_free(struct bat_priv *bat_priv) 562static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
457{ 563{
458 struct hashtable_t *hash; 564 struct batadv_hashtable *hash;
459 spinlock_t *list_lock; /* protects write access to the hash lists */ 565 spinlock_t *list_lock; /* protects write access to the hash lists */
460 struct tt_common_entry *tt_common_entry; 566 struct batadv_tt_common_entry *tt_common_entry;
461 struct tt_local_entry *tt_local_entry; 567 struct batadv_tt_local_entry *tt_local;
462 struct hlist_node *node, *node_tmp; 568 struct hlist_node *node, *node_tmp;
463 struct hlist_head *head; 569 struct hlist_head *head;
464 uint32_t i; 570 uint32_t i;
@@ -476,35 +582,35 @@ static void tt_local_table_free(struct bat_priv *bat_priv)
476 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, 582 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
477 head, hash_entry) { 583 head, hash_entry) {
478 hlist_del_rcu(node); 584 hlist_del_rcu(node);
479 tt_local_entry = container_of(tt_common_entry, 585 tt_local = container_of(tt_common_entry,
480 struct tt_local_entry, 586 struct batadv_tt_local_entry,
481 common); 587 common);
482 tt_local_entry_free_ref(tt_local_entry); 588 batadv_tt_local_entry_free_ref(tt_local);
483 } 589 }
484 spin_unlock_bh(list_lock); 590 spin_unlock_bh(list_lock);
485 } 591 }
486 592
487 hash_destroy(hash); 593 batadv_hash_destroy(hash);
488 594
489 bat_priv->tt_local_hash = NULL; 595 bat_priv->tt_local_hash = NULL;
490} 596}
491 597
492static int tt_global_init(struct bat_priv *bat_priv) 598static int batadv_tt_global_init(struct batadv_priv *bat_priv)
493{ 599{
494 if (bat_priv->tt_global_hash) 600 if (bat_priv->tt_global_hash)
495 return 1; 601 return 0;
496 602
497 bat_priv->tt_global_hash = hash_new(1024); 603 bat_priv->tt_global_hash = batadv_hash_new(1024);
498 604
499 if (!bat_priv->tt_global_hash) 605 if (!bat_priv->tt_global_hash)
500 return 0; 606 return -ENOMEM;
501 607
502 return 1; 608 return 0;
503} 609}
504 610
505static void tt_changes_list_free(struct bat_priv *bat_priv) 611static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
506{ 612{
507 struct tt_change_node *entry, *safe; 613 struct batadv_tt_change_node *entry, *safe;
508 614
509 spin_lock_bh(&bat_priv->tt_changes_list_lock); 615 spin_lock_bh(&bat_priv->tt_changes_list_lock);
510 616
@@ -521,10 +627,11 @@ static void tt_changes_list_free(struct bat_priv *bat_priv)
521/* find out if an orig_node is already in the list of a tt_global_entry. 627/* find out if an orig_node is already in the list of a tt_global_entry.
522 * returns 1 if found, 0 otherwise 628 * returns 1 if found, 0 otherwise
523 */ 629 */
524static bool tt_global_entry_has_orig(const struct tt_global_entry *entry, 630static bool
525 const struct orig_node *orig_node) 631batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
632 const struct batadv_orig_node *orig_node)
526{ 633{
527 struct tt_orig_list_entry *tmp_orig_entry; 634 struct batadv_tt_orig_list_entry *tmp_orig_entry;
528 const struct hlist_head *head; 635 const struct hlist_head *head;
529 struct hlist_node *node; 636 struct hlist_node *node;
530 bool found = false; 637 bool found = false;
@@ -541,11 +648,11 @@ static bool tt_global_entry_has_orig(const struct tt_global_entry *entry,
541 return found; 648 return found;
542} 649}
543 650
544static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry, 651static void
545 struct orig_node *orig_node, 652batadv_tt_global_add_orig_entry(struct batadv_tt_global_entry *tt_global_entry,
546 int ttvn) 653 struct batadv_orig_node *orig_node, int ttvn)
547{ 654{
548 struct tt_orig_list_entry *orig_entry; 655 struct batadv_tt_orig_list_entry *orig_entry;
549 656
550 orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC); 657 orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
551 if (!orig_entry) 658 if (!orig_entry)
@@ -564,91 +671,95 @@ static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry,
564} 671}
565 672
566/* caller must hold orig_node refcount */ 673/* caller must hold orig_node refcount */
567int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, 674int batadv_tt_global_add(struct batadv_priv *bat_priv,
568 const unsigned char *tt_addr, uint8_t ttvn, bool roaming, 675 struct batadv_orig_node *orig_node,
569 bool wifi) 676 const unsigned char *tt_addr, uint8_t flags,
677 uint8_t ttvn)
570{ 678{
571 struct tt_global_entry *tt_global_entry = NULL; 679 struct batadv_tt_global_entry *tt_global_entry = NULL;
572 int ret = 0; 680 int ret = 0;
573 int hash_added; 681 int hash_added;
682 struct batadv_tt_common_entry *common;
574 683
575 tt_global_entry = tt_global_hash_find(bat_priv, tt_addr); 684 tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr);
576 685
577 if (!tt_global_entry) { 686 if (!tt_global_entry) {
578 tt_global_entry = kzalloc(sizeof(*tt_global_entry), 687 tt_global_entry = kzalloc(sizeof(*tt_global_entry), GFP_ATOMIC);
579 GFP_ATOMIC);
580 if (!tt_global_entry) 688 if (!tt_global_entry)
581 goto out; 689 goto out;
582 690
583 memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN); 691 common = &tt_global_entry->common;
692 memcpy(common->addr, tt_addr, ETH_ALEN);
584 693
585 tt_global_entry->common.flags = NO_FLAGS; 694 common->flags = flags;
586 tt_global_entry->roam_at = 0; 695 tt_global_entry->roam_at = 0;
587 atomic_set(&tt_global_entry->common.refcount, 2); 696 atomic_set(&common->refcount, 2);
588 697
589 INIT_HLIST_HEAD(&tt_global_entry->orig_list); 698 INIT_HLIST_HEAD(&tt_global_entry->orig_list);
590 spin_lock_init(&tt_global_entry->list_lock); 699 spin_lock_init(&tt_global_entry->list_lock);
591 700
592 hash_added = hash_add(bat_priv->tt_global_hash, compare_tt, 701 hash_added = batadv_hash_add(bat_priv->tt_global_hash,
593 choose_orig, &tt_global_entry->common, 702 batadv_compare_tt,
594 &tt_global_entry->common.hash_entry); 703 batadv_choose_orig, common,
704 &common->hash_entry);
595 705
596 if (unlikely(hash_added != 0)) { 706 if (unlikely(hash_added != 0)) {
597 /* remove the reference for the hash */ 707 /* remove the reference for the hash */
598 tt_global_entry_free_ref(tt_global_entry); 708 batadv_tt_global_entry_free_ref(tt_global_entry);
599 goto out_remove; 709 goto out_remove;
600 } 710 }
601 711
602 tt_global_add_orig_entry(tt_global_entry, orig_node, ttvn); 712 batadv_tt_global_add_orig_entry(tt_global_entry, orig_node,
713 ttvn);
603 } else { 714 } else {
604 /* there is already a global entry, use this one. */ 715 /* there is already a global entry, use this one. */
605 716
606 /* If there is the TT_CLIENT_ROAM flag set, there is only one 717 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
607 * originator left in the list and we previously received a 718 * one originator left in the list and we previously received a
608 * delete + roaming change for this originator. 719 * delete + roaming change for this originator.
609 * 720 *
610 * We should first delete the old originator before adding the 721 * We should first delete the old originator before adding the
611 * new one. 722 * new one.
612 */ 723 */
613 if (tt_global_entry->common.flags & TT_CLIENT_ROAM) { 724 if (tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM) {
614 tt_global_del_orig_list(tt_global_entry); 725 batadv_tt_global_del_orig_list(tt_global_entry);
615 tt_global_entry->common.flags &= ~TT_CLIENT_ROAM; 726 tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM;
616 tt_global_entry->roam_at = 0; 727 tt_global_entry->roam_at = 0;
617 } 728 }
618 729
619 if (!tt_global_entry_has_orig(tt_global_entry, orig_node)) 730 if (!batadv_tt_global_entry_has_orig(tt_global_entry,
620 tt_global_add_orig_entry(tt_global_entry, orig_node, 731 orig_node))
621 ttvn); 732 batadv_tt_global_add_orig_entry(tt_global_entry,
733 orig_node, ttvn);
622 } 734 }
623 735
624 if (wifi) 736 batadv_dbg(BATADV_DBG_TT, bat_priv,
625 tt_global_entry->common.flags |= TT_CLIENT_WIFI; 737 "Creating new global tt entry: %pM (via %pM)\n",
626 738 tt_global_entry->common.addr, orig_node->orig);
627 bat_dbg(DBG_TT, bat_priv,
628 "Creating new global tt entry: %pM (via %pM)\n",
629 tt_global_entry->common.addr, orig_node->orig);
630 739
631out_remove: 740out_remove:
632 /* remove address from local hash if present */ 741 /* remove address from local hash if present */
633 tt_local_remove(bat_priv, tt_global_entry->common.addr, 742 batadv_tt_local_remove(bat_priv, tt_global_entry->common.addr,
634 "global tt received", roaming); 743 "global tt received",
744 flags & BATADV_TT_CLIENT_ROAM);
635 ret = 1; 745 ret = 1;
636out: 746out:
637 if (tt_global_entry) 747 if (tt_global_entry)
638 tt_global_entry_free_ref(tt_global_entry); 748 batadv_tt_global_entry_free_ref(tt_global_entry);
639 return ret; 749 return ret;
640} 750}
641 751
642/* print all orig nodes who announce the address for this global entry. 752/* print all orig nodes who announce the address for this global entry.
643 * it is assumed that the caller holds rcu_read_lock(); 753 * it is assumed that the caller holds rcu_read_lock();
644 */ 754 */
645static void tt_global_print_entry(struct tt_global_entry *tt_global_entry, 755static void
646 struct seq_file *seq) 756batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
757 struct seq_file *seq)
647{ 758{
648 struct hlist_head *head; 759 struct hlist_head *head;
649 struct hlist_node *node; 760 struct hlist_node *node;
650 struct tt_orig_list_entry *orig_entry; 761 struct batadv_tt_orig_list_entry *orig_entry;
651 struct tt_common_entry *tt_common_entry; 762 struct batadv_tt_common_entry *tt_common_entry;
652 uint16_t flags; 763 uint16_t flags;
653 uint8_t last_ttvn; 764 uint8_t last_ttvn;
654 765
@@ -662,25 +773,25 @@ static void tt_global_print_entry(struct tt_global_entry *tt_global_entry,
662 seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c]\n", 773 seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c]\n",
663 tt_global_entry->common.addr, orig_entry->ttvn, 774 tt_global_entry->common.addr, orig_entry->ttvn,
664 orig_entry->orig_node->orig, last_ttvn, 775 orig_entry->orig_node->orig, last_ttvn,
665 (flags & TT_CLIENT_ROAM ? 'R' : '.'), 776 (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
666 (flags & TT_CLIENT_WIFI ? 'W' : '.')); 777 (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'));
667 } 778 }
668} 779}
669 780
670int tt_global_seq_print_text(struct seq_file *seq, void *offset) 781int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
671{ 782{
672 struct net_device *net_dev = (struct net_device *)seq->private; 783 struct net_device *net_dev = (struct net_device *)seq->private;
673 struct bat_priv *bat_priv = netdev_priv(net_dev); 784 struct batadv_priv *bat_priv = netdev_priv(net_dev);
674 struct hashtable_t *hash = bat_priv->tt_global_hash; 785 struct batadv_hashtable *hash = bat_priv->tt_global_hash;
675 struct tt_common_entry *tt_common_entry; 786 struct batadv_tt_common_entry *tt_common_entry;
676 struct tt_global_entry *tt_global_entry; 787 struct batadv_tt_global_entry *tt_global;
677 struct hard_iface *primary_if; 788 struct batadv_hard_iface *primary_if;
678 struct hlist_node *node; 789 struct hlist_node *node;
679 struct hlist_head *head; 790 struct hlist_head *head;
680 uint32_t i; 791 uint32_t i;
681 int ret = 0; 792 int ret = 0;
682 793
683 primary_if = primary_if_get_selected(bat_priv); 794 primary_if = batadv_primary_if_get_selected(bat_priv);
684 if (!primary_if) { 795 if (!primary_if) {
685 ret = seq_printf(seq, 796 ret = seq_printf(seq,
686 "BATMAN mesh %s disabled - please specify interfaces to enable it\n", 797 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
@@ -688,7 +799,7 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
688 goto out; 799 goto out;
689 } 800 }
690 801
691 if (primary_if->if_status != IF_ACTIVE) { 802 if (primary_if->if_status != BATADV_IF_ACTIVE) {
692 ret = seq_printf(seq, 803 ret = seq_printf(seq,
693 "BATMAN mesh %s disabled - primary interface not active\n", 804 "BATMAN mesh %s disabled - primary interface not active\n",
694 net_dev->name); 805 net_dev->name);
@@ -707,87 +818,91 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
707 rcu_read_lock(); 818 rcu_read_lock();
708 hlist_for_each_entry_rcu(tt_common_entry, node, 819 hlist_for_each_entry_rcu(tt_common_entry, node,
709 head, hash_entry) { 820 head, hash_entry) {
710 tt_global_entry = container_of(tt_common_entry, 821 tt_global = container_of(tt_common_entry,
711 struct tt_global_entry, 822 struct batadv_tt_global_entry,
712 common); 823 common);
713 tt_global_print_entry(tt_global_entry, seq); 824 batadv_tt_global_print_entry(tt_global, seq);
714 } 825 }
715 rcu_read_unlock(); 826 rcu_read_unlock();
716 } 827 }
717out: 828out:
718 if (primary_if) 829 if (primary_if)
719 hardif_free_ref(primary_if); 830 batadv_hardif_free_ref(primary_if);
720 return ret; 831 return ret;
721} 832}
722 833
723/* deletes the orig list of a tt_global_entry */ 834/* deletes the orig list of a tt_global_entry */
724static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry) 835static void
836batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
725{ 837{
726 struct hlist_head *head; 838 struct hlist_head *head;
727 struct hlist_node *node, *safe; 839 struct hlist_node *node, *safe;
728 struct tt_orig_list_entry *orig_entry; 840 struct batadv_tt_orig_list_entry *orig_entry;
729 841
730 spin_lock_bh(&tt_global_entry->list_lock); 842 spin_lock_bh(&tt_global_entry->list_lock);
731 head = &tt_global_entry->orig_list; 843 head = &tt_global_entry->orig_list;
732 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) { 844 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
733 hlist_del_rcu(node); 845 hlist_del_rcu(node);
734 tt_orig_list_entry_free_ref(orig_entry); 846 batadv_tt_orig_list_entry_free_ref(orig_entry);
735 } 847 }
736 spin_unlock_bh(&tt_global_entry->list_lock); 848 spin_unlock_bh(&tt_global_entry->list_lock);
737 849
738} 850}
739 851
740static void tt_global_del_orig_entry(struct bat_priv *bat_priv, 852static void
741 struct tt_global_entry *tt_global_entry, 853batadv_tt_global_del_orig_entry(struct batadv_priv *bat_priv,
742 struct orig_node *orig_node, 854 struct batadv_tt_global_entry *tt_global_entry,
743 const char *message) 855 struct batadv_orig_node *orig_node,
856 const char *message)
744{ 857{
745 struct hlist_head *head; 858 struct hlist_head *head;
746 struct hlist_node *node, *safe; 859 struct hlist_node *node, *safe;
747 struct tt_orig_list_entry *orig_entry; 860 struct batadv_tt_orig_list_entry *orig_entry;
748 861
749 spin_lock_bh(&tt_global_entry->list_lock); 862 spin_lock_bh(&tt_global_entry->list_lock);
750 head = &tt_global_entry->orig_list; 863 head = &tt_global_entry->orig_list;
751 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) { 864 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
752 if (orig_entry->orig_node == orig_node) { 865 if (orig_entry->orig_node == orig_node) {
753 bat_dbg(DBG_TT, bat_priv, 866 batadv_dbg(BATADV_DBG_TT, bat_priv,
754 "Deleting %pM from global tt entry %pM: %s\n", 867 "Deleting %pM from global tt entry %pM: %s\n",
755 orig_node->orig, tt_global_entry->common.addr, 868 orig_node->orig,
756 message); 869 tt_global_entry->common.addr, message);
757 hlist_del_rcu(node); 870 hlist_del_rcu(node);
758 tt_orig_list_entry_free_ref(orig_entry); 871 batadv_tt_orig_list_entry_free_ref(orig_entry);
759 } 872 }
760 } 873 }
761 spin_unlock_bh(&tt_global_entry->list_lock); 874 spin_unlock_bh(&tt_global_entry->list_lock);
762} 875}
763 876
764static void tt_global_del_struct(struct bat_priv *bat_priv, 877static void
765 struct tt_global_entry *tt_global_entry, 878batadv_tt_global_del_struct(struct batadv_priv *bat_priv,
766 const char *message) 879 struct batadv_tt_global_entry *tt_global_entry,
880 const char *message)
767{ 881{
768 bat_dbg(DBG_TT, bat_priv, 882 batadv_dbg(BATADV_DBG_TT, bat_priv,
769 "Deleting global tt entry %pM: %s\n", 883 "Deleting global tt entry %pM: %s\n",
770 tt_global_entry->common.addr, message); 884 tt_global_entry->common.addr, message);
771 885
772 hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig, 886 batadv_hash_remove(bat_priv->tt_global_hash, batadv_compare_tt,
773 tt_global_entry->common.addr); 887 batadv_choose_orig, tt_global_entry->common.addr);
774 tt_global_entry_free_ref(tt_global_entry); 888 batadv_tt_global_entry_free_ref(tt_global_entry);
775 889
776} 890}
777 891
778/* If the client is to be deleted, we check if it is the last origantor entry 892/* If the client is to be deleted, we check if it is the last origantor entry
779 * within tt_global entry. If yes, we set the TT_CLIENT_ROAM flag and the timer, 893 * within tt_global entry. If yes, we set the BATADV_TT_CLIENT_ROAM flag and the
780 * otherwise we simply remove the originator scheduled for deletion. 894 * timer, otherwise we simply remove the originator scheduled for deletion.
781 */ 895 */
782static void tt_global_del_roaming(struct bat_priv *bat_priv, 896static void
783 struct tt_global_entry *tt_global_entry, 897batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
784 struct orig_node *orig_node, 898 struct batadv_tt_global_entry *tt_global_entry,
785 const char *message) 899 struct batadv_orig_node *orig_node,
900 const char *message)
786{ 901{
787 bool last_entry = true; 902 bool last_entry = true;
788 struct hlist_head *head; 903 struct hlist_head *head;
789 struct hlist_node *node; 904 struct hlist_node *node;
790 struct tt_orig_list_entry *orig_entry; 905 struct batadv_tt_orig_list_entry *orig_entry;
791 906
792 /* no local entry exists, case 1: 907 /* no local entry exists, case 1:
793 * Check if this is the last one or if other entries exist. 908 * Check if this is the last one or if other entries exist.
@@ -805,37 +920,37 @@ static void tt_global_del_roaming(struct bat_priv *bat_priv,
805 920
806 if (last_entry) { 921 if (last_entry) {
807 /* its the last one, mark for roaming. */ 922 /* its the last one, mark for roaming. */
808 tt_global_entry->common.flags |= TT_CLIENT_ROAM; 923 tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
809 tt_global_entry->roam_at = jiffies; 924 tt_global_entry->roam_at = jiffies;
810 } else 925 } else
811 /* there is another entry, we can simply delete this 926 /* there is another entry, we can simply delete this
812 * one and can still use the other one. 927 * one and can still use the other one.
813 */ 928 */
814 tt_global_del_orig_entry(bat_priv, tt_global_entry, 929 batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry,
815 orig_node, message); 930 orig_node, message);
816} 931}
817 932
818 933
819 934
820static void tt_global_del(struct bat_priv *bat_priv, 935static void batadv_tt_global_del(struct batadv_priv *bat_priv,
821 struct orig_node *orig_node, 936 struct batadv_orig_node *orig_node,
822 const unsigned char *addr, 937 const unsigned char *addr,
823 const char *message, bool roaming) 938 const char *message, bool roaming)
824{ 939{
825 struct tt_global_entry *tt_global_entry = NULL; 940 struct batadv_tt_global_entry *tt_global_entry = NULL;
826 struct tt_local_entry *tt_local_entry = NULL; 941 struct batadv_tt_local_entry *local_entry = NULL;
827 942
828 tt_global_entry = tt_global_hash_find(bat_priv, addr); 943 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
829 if (!tt_global_entry) 944 if (!tt_global_entry)
830 goto out; 945 goto out;
831 946
832 if (!roaming) { 947 if (!roaming) {
833 tt_global_del_orig_entry(bat_priv, tt_global_entry, orig_node, 948 batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry,
834 message); 949 orig_node, message);
835 950
836 if (hlist_empty(&tt_global_entry->orig_list)) 951 if (hlist_empty(&tt_global_entry->orig_list))
837 tt_global_del_struct(bat_priv, tt_global_entry, 952 batadv_tt_global_del_struct(bat_priv, tt_global_entry,
838 message); 953 message);
839 954
840 goto out; 955 goto out;
841 } 956 }
@@ -844,41 +959,42 @@ static void tt_global_del(struct bat_priv *bat_priv,
844 * event, there are two possibilities: 959 * event, there are two possibilities:
845 * 1) the client roamed from node A to node B => if there 960 * 1) the client roamed from node A to node B => if there
846 * is only one originator left for this client, we mark 961 * is only one originator left for this client, we mark
847 * it with TT_CLIENT_ROAM, we start a timer and we 962 * it with BATADV_TT_CLIENT_ROAM, we start a timer and we
848 * wait for node B to claim it. In case of timeout 963 * wait for node B to claim it. In case of timeout
849 * the entry is purged. 964 * the entry is purged.
850 * 965 *
851 * If there are other originators left, we directly delete 966 * If there are other originators left, we directly delete
852 * the originator. 967 * the originator.
853 * 2) the client roamed to us => we can directly delete 968 * 2) the client roamed to us => we can directly delete
854 * the global entry, since it is useless now. */ 969 * the global entry, since it is useless now.
855 970 */
856 tt_local_entry = tt_local_hash_find(bat_priv, 971 local_entry = batadv_tt_local_hash_find(bat_priv,
857 tt_global_entry->common.addr); 972 tt_global_entry->common.addr);
858 if (tt_local_entry) { 973 if (local_entry) {
859 /* local entry exists, case 2: client roamed to us. */ 974 /* local entry exists, case 2: client roamed to us. */
860 tt_global_del_orig_list(tt_global_entry); 975 batadv_tt_global_del_orig_list(tt_global_entry);
861 tt_global_del_struct(bat_priv, tt_global_entry, message); 976 batadv_tt_global_del_struct(bat_priv, tt_global_entry, message);
862 } else 977 } else
863 /* no local entry exists, case 1: check for roaming */ 978 /* no local entry exists, case 1: check for roaming */
864 tt_global_del_roaming(bat_priv, tt_global_entry, orig_node, 979 batadv_tt_global_del_roaming(bat_priv, tt_global_entry,
865 message); 980 orig_node, message);
866 981
867 982
868out: 983out:
869 if (tt_global_entry) 984 if (tt_global_entry)
870 tt_global_entry_free_ref(tt_global_entry); 985 batadv_tt_global_entry_free_ref(tt_global_entry);
871 if (tt_local_entry) 986 if (local_entry)
872 tt_local_entry_free_ref(tt_local_entry); 987 batadv_tt_local_entry_free_ref(local_entry);
873} 988}
874 989
875void tt_global_del_orig(struct bat_priv *bat_priv, 990void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
876 struct orig_node *orig_node, const char *message) 991 struct batadv_orig_node *orig_node,
992 const char *message)
877{ 993{
878 struct tt_global_entry *tt_global_entry; 994 struct batadv_tt_global_entry *tt_global;
879 struct tt_common_entry *tt_common_entry; 995 struct batadv_tt_common_entry *tt_common_entry;
880 uint32_t i; 996 uint32_t i;
881 struct hashtable_t *hash = bat_priv->tt_global_hash; 997 struct batadv_hashtable *hash = bat_priv->tt_global_hash;
882 struct hlist_node *node, *safe; 998 struct hlist_node *node, *safe;
883 struct hlist_head *head; 999 struct hlist_head *head;
884 spinlock_t *list_lock; /* protects write access to the hash lists */ 1000 spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -893,20 +1009,19 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
893 spin_lock_bh(list_lock); 1009 spin_lock_bh(list_lock);
894 hlist_for_each_entry_safe(tt_common_entry, node, safe, 1010 hlist_for_each_entry_safe(tt_common_entry, node, safe,
895 head, hash_entry) { 1011 head, hash_entry) {
896 tt_global_entry = container_of(tt_common_entry, 1012 tt_global = container_of(tt_common_entry,
897 struct tt_global_entry, 1013 struct batadv_tt_global_entry,
898 common); 1014 common);
899 1015
900 tt_global_del_orig_entry(bat_priv, tt_global_entry, 1016 batadv_tt_global_del_orig_entry(bat_priv, tt_global,
901 orig_node, message); 1017 orig_node, message);
902 1018
903 if (hlist_empty(&tt_global_entry->orig_list)) { 1019 if (hlist_empty(&tt_global->orig_list)) {
904 bat_dbg(DBG_TT, bat_priv, 1020 batadv_dbg(BATADV_DBG_TT, bat_priv,
905 "Deleting global tt entry %pM: %s\n", 1021 "Deleting global tt entry %pM: %s\n",
906 tt_global_entry->common.addr, 1022 tt_global->common.addr, message);
907 message);
908 hlist_del_rcu(node); 1023 hlist_del_rcu(node);
909 tt_global_entry_free_ref(tt_global_entry); 1024 batadv_tt_global_entry_free_ref(tt_global);
910 } 1025 }
911 } 1026 }
912 spin_unlock_bh(list_lock); 1027 spin_unlock_bh(list_lock);
@@ -914,12 +1029,36 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
914 orig_node->tt_initialised = false; 1029 orig_node->tt_initialised = false;
915} 1030}
916 1031
917static void tt_global_roam_purge(struct bat_priv *bat_priv) 1032static void batadv_tt_global_roam_purge_list(struct batadv_priv *bat_priv,
1033 struct hlist_head *head)
918{ 1034{
919 struct hashtable_t *hash = bat_priv->tt_global_hash; 1035 struct batadv_tt_common_entry *tt_common_entry;
920 struct tt_common_entry *tt_common_entry; 1036 struct batadv_tt_global_entry *tt_global_entry;
921 struct tt_global_entry *tt_global_entry;
922 struct hlist_node *node, *node_tmp; 1037 struct hlist_node *node, *node_tmp;
1038
1039 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
1040 hash_entry) {
1041 tt_global_entry = container_of(tt_common_entry,
1042 struct batadv_tt_global_entry,
1043 common);
1044 if (!(tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM))
1045 continue;
1046 if (!batadv_has_timed_out(tt_global_entry->roam_at,
1047 BATADV_TT_CLIENT_ROAM_TIMEOUT))
1048 continue;
1049
1050 batadv_dbg(BATADV_DBG_TT, bat_priv,
1051 "Deleting global tt entry (%pM): Roaming timeout\n",
1052 tt_global_entry->common.addr);
1053
1054 hlist_del_rcu(node);
1055 batadv_tt_global_entry_free_ref(tt_global_entry);
1056 }
1057}
1058
1059static void batadv_tt_global_roam_purge(struct batadv_priv *bat_priv)
1060{
1061 struct batadv_hashtable *hash = bat_priv->tt_global_hash;
923 struct hlist_head *head; 1062 struct hlist_head *head;
924 spinlock_t *list_lock; /* protects write access to the hash lists */ 1063 spinlock_t *list_lock; /* protects write access to the hash lists */
925 uint32_t i; 1064 uint32_t i;
@@ -929,35 +1068,18 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv)
929 list_lock = &hash->list_locks[i]; 1068 list_lock = &hash->list_locks[i];
930 1069
931 spin_lock_bh(list_lock); 1070 spin_lock_bh(list_lock);
932 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, 1071 batadv_tt_global_roam_purge_list(bat_priv, head);
933 head, hash_entry) {
934 tt_global_entry = container_of(tt_common_entry,
935 struct tt_global_entry,
936 common);
937 if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM))
938 continue;
939 if (!has_timed_out(tt_global_entry->roam_at,
940 TT_CLIENT_ROAM_TIMEOUT))
941 continue;
942
943 bat_dbg(DBG_TT, bat_priv,
944 "Deleting global tt entry (%pM): Roaming timeout\n",
945 tt_global_entry->common.addr);
946
947 hlist_del_rcu(node);
948 tt_global_entry_free_ref(tt_global_entry);
949 }
950 spin_unlock_bh(list_lock); 1072 spin_unlock_bh(list_lock);
951 } 1073 }
952 1074
953} 1075}
954 1076
955static void tt_global_table_free(struct bat_priv *bat_priv) 1077static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
956{ 1078{
957 struct hashtable_t *hash; 1079 struct batadv_hashtable *hash;
958 spinlock_t *list_lock; /* protects write access to the hash lists */ 1080 spinlock_t *list_lock; /* protects write access to the hash lists */
959 struct tt_common_entry *tt_common_entry; 1081 struct batadv_tt_common_entry *tt_common_entry;
960 struct tt_global_entry *tt_global_entry; 1082 struct batadv_tt_global_entry *tt_global;
961 struct hlist_node *node, *node_tmp; 1083 struct hlist_node *node, *node_tmp;
962 struct hlist_head *head; 1084 struct hlist_head *head;
963 uint32_t i; 1085 uint32_t i;
@@ -975,56 +1097,60 @@ static void tt_global_table_free(struct bat_priv *bat_priv)
975 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, 1097 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
976 head, hash_entry) { 1098 head, hash_entry) {
977 hlist_del_rcu(node); 1099 hlist_del_rcu(node);
978 tt_global_entry = container_of(tt_common_entry, 1100 tt_global = container_of(tt_common_entry,
979 struct tt_global_entry, 1101 struct batadv_tt_global_entry,
980 common); 1102 common);
981 tt_global_entry_free_ref(tt_global_entry); 1103 batadv_tt_global_entry_free_ref(tt_global);
982 } 1104 }
983 spin_unlock_bh(list_lock); 1105 spin_unlock_bh(list_lock);
984 } 1106 }
985 1107
986 hash_destroy(hash); 1108 batadv_hash_destroy(hash);
987 1109
988 bat_priv->tt_global_hash = NULL; 1110 bat_priv->tt_global_hash = NULL;
989} 1111}
990 1112
991static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry, 1113static bool
992 struct tt_global_entry *tt_global_entry) 1114_batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry,
1115 struct batadv_tt_global_entry *tt_global_entry)
993{ 1116{
994 bool ret = false; 1117 bool ret = false;
995 1118
996 if (tt_local_entry->common.flags & TT_CLIENT_WIFI && 1119 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_WIFI &&
997 tt_global_entry->common.flags & TT_CLIENT_WIFI) 1120 tt_global_entry->common.flags & BATADV_TT_CLIENT_WIFI)
998 ret = true; 1121 ret = true;
999 1122
1000 return ret; 1123 return ret;
1001} 1124}
1002 1125
1003struct orig_node *transtable_search(struct bat_priv *bat_priv, 1126struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
1004 const uint8_t *src, const uint8_t *addr) 1127 const uint8_t *src,
1128 const uint8_t *addr)
1005{ 1129{
1006 struct tt_local_entry *tt_local_entry = NULL; 1130 struct batadv_tt_local_entry *tt_local_entry = NULL;
1007 struct tt_global_entry *tt_global_entry = NULL; 1131 struct batadv_tt_global_entry *tt_global_entry = NULL;
1008 struct orig_node *orig_node = NULL; 1132 struct batadv_orig_node *orig_node = NULL;
1009 struct neigh_node *router = NULL; 1133 struct batadv_neigh_node *router = NULL;
1010 struct hlist_head *head; 1134 struct hlist_head *head;
1011 struct hlist_node *node; 1135 struct hlist_node *node;
1012 struct tt_orig_list_entry *orig_entry; 1136 struct batadv_tt_orig_list_entry *orig_entry;
1013 int best_tq; 1137 int best_tq;
1014 1138
1015 if (src && atomic_read(&bat_priv->ap_isolation)) { 1139 if (src && atomic_read(&bat_priv->ap_isolation)) {
1016 tt_local_entry = tt_local_hash_find(bat_priv, src); 1140 tt_local_entry = batadv_tt_local_hash_find(bat_priv, src);
1017 if (!tt_local_entry) 1141 if (!tt_local_entry)
1018 goto out; 1142 goto out;
1019 } 1143 }
1020 1144
1021 tt_global_entry = tt_global_hash_find(bat_priv, addr); 1145 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
1022 if (!tt_global_entry) 1146 if (!tt_global_entry)
1023 goto out; 1147 goto out;
1024 1148
1025 /* check whether the clients should not communicate due to AP 1149 /* check whether the clients should not communicate due to AP
1026 * isolation */ 1150 * isolation
1027 if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry)) 1151 */
1152 if (tt_local_entry &&
1153 _batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
1028 goto out; 1154 goto out;
1029 1155
1030 best_tq = 0; 1156 best_tq = 0;
@@ -1032,7 +1158,7 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
1032 rcu_read_lock(); 1158 rcu_read_lock();
1033 head = &tt_global_entry->orig_list; 1159 head = &tt_global_entry->orig_list;
1034 hlist_for_each_entry_rcu(orig_entry, node, head, list) { 1160 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
1035 router = orig_node_get_router(orig_entry->orig_node); 1161 router = batadv_orig_node_get_router(orig_entry->orig_node);
1036 if (!router) 1162 if (!router)
1037 continue; 1163 continue;
1038 1164
@@ -1040,7 +1166,7 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
1040 orig_node = orig_entry->orig_node; 1166 orig_node = orig_entry->orig_node;
1041 best_tq = router->tq_avg; 1167 best_tq = router->tq_avg;
1042 } 1168 }
1043 neigh_node_free_ref(router); 1169 batadv_neigh_node_free_ref(router);
1044 } 1170 }
1045 /* found anything? */ 1171 /* found anything? */
1046 if (orig_node && !atomic_inc_not_zero(&orig_node->refcount)) 1172 if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
@@ -1048,21 +1174,21 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
1048 rcu_read_unlock(); 1174 rcu_read_unlock();
1049out: 1175out:
1050 if (tt_global_entry) 1176 if (tt_global_entry)
1051 tt_global_entry_free_ref(tt_global_entry); 1177 batadv_tt_global_entry_free_ref(tt_global_entry);
1052 if (tt_local_entry) 1178 if (tt_local_entry)
1053 tt_local_entry_free_ref(tt_local_entry); 1179 batadv_tt_local_entry_free_ref(tt_local_entry);
1054 1180
1055 return orig_node; 1181 return orig_node;
1056} 1182}
1057 1183
1058/* Calculates the checksum of the local table of a given orig_node */ 1184/* Calculates the checksum of the local table of a given orig_node */
1059static uint16_t tt_global_crc(struct bat_priv *bat_priv, 1185static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
1060 struct orig_node *orig_node) 1186 struct batadv_orig_node *orig_node)
1061{ 1187{
1062 uint16_t total = 0, total_one; 1188 uint16_t total = 0, total_one;
1063 struct hashtable_t *hash = bat_priv->tt_global_hash; 1189 struct batadv_hashtable *hash = bat_priv->tt_global_hash;
1064 struct tt_common_entry *tt_common_entry; 1190 struct batadv_tt_common_entry *tt_common;
1065 struct tt_global_entry *tt_global_entry; 1191 struct batadv_tt_global_entry *tt_global;
1066 struct hlist_node *node; 1192 struct hlist_node *node;
1067 struct hlist_head *head; 1193 struct hlist_head *head;
1068 uint32_t i; 1194 uint32_t i;
@@ -1072,30 +1198,29 @@ static uint16_t tt_global_crc(struct bat_priv *bat_priv,
1072 head = &hash->table[i]; 1198 head = &hash->table[i];
1073 1199
1074 rcu_read_lock(); 1200 rcu_read_lock();
1075 hlist_for_each_entry_rcu(tt_common_entry, node, 1201 hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) {
1076 head, hash_entry) { 1202 tt_global = container_of(tt_common,
1077 tt_global_entry = container_of(tt_common_entry, 1203 struct batadv_tt_global_entry,
1078 struct tt_global_entry, 1204 common);
1079 common);
1080 /* Roaming clients are in the global table for 1205 /* Roaming clients are in the global table for
1081 * consistency only. They don't have to be 1206 * consistency only. They don't have to be
1082 * taken into account while computing the 1207 * taken into account while computing the
1083 * global crc 1208 * global crc
1084 */ 1209 */
1085 if (tt_global_entry->common.flags & TT_CLIENT_ROAM) 1210 if (tt_common->flags & BATADV_TT_CLIENT_ROAM)
1086 continue; 1211 continue;
1087 1212
1088 /* find out if this global entry is announced by this 1213 /* find out if this global entry is announced by this
1089 * originator 1214 * originator
1090 */ 1215 */
1091 if (!tt_global_entry_has_orig(tt_global_entry, 1216 if (!batadv_tt_global_entry_has_orig(tt_global,
1092 orig_node)) 1217 orig_node))
1093 continue; 1218 continue;
1094 1219
1095 total_one = 0; 1220 total_one = 0;
1096 for (j = 0; j < ETH_ALEN; j++) 1221 for (j = 0; j < ETH_ALEN; j++)
1097 total_one = crc16_byte(total_one, 1222 total_one = crc16_byte(total_one,
1098 tt_global_entry->common.addr[j]); 1223 tt_common->addr[j]);
1099 total ^= total_one; 1224 total ^= total_one;
1100 } 1225 }
1101 rcu_read_unlock(); 1226 rcu_read_unlock();
@@ -1105,11 +1230,11 @@ static uint16_t tt_global_crc(struct bat_priv *bat_priv,
1105} 1230}
1106 1231
1107/* Calculates the checksum of the local table */ 1232/* Calculates the checksum of the local table */
1108uint16_t tt_local_crc(struct bat_priv *bat_priv) 1233static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
1109{ 1234{
1110 uint16_t total = 0, total_one; 1235 uint16_t total = 0, total_one;
1111 struct hashtable_t *hash = bat_priv->tt_local_hash; 1236 struct batadv_hashtable *hash = bat_priv->tt_local_hash;
1112 struct tt_common_entry *tt_common_entry; 1237 struct batadv_tt_common_entry *tt_common;
1113 struct hlist_node *node; 1238 struct hlist_node *node;
1114 struct hlist_head *head; 1239 struct hlist_head *head;
1115 uint32_t i; 1240 uint32_t i;
@@ -1119,16 +1244,16 @@ uint16_t tt_local_crc(struct bat_priv *bat_priv)
1119 head = &hash->table[i]; 1244 head = &hash->table[i];
1120 1245
1121 rcu_read_lock(); 1246 rcu_read_lock();
1122 hlist_for_each_entry_rcu(tt_common_entry, node, 1247 hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) {
1123 head, hash_entry) {
1124 /* not yet committed clients have not to be taken into 1248 /* not yet committed clients have not to be taken into
1125 * account while computing the CRC */ 1249 * account while computing the CRC
1126 if (tt_common_entry->flags & TT_CLIENT_NEW) 1250 */
1251 if (tt_common->flags & BATADV_TT_CLIENT_NEW)
1127 continue; 1252 continue;
1128 total_one = 0; 1253 total_one = 0;
1129 for (j = 0; j < ETH_ALEN; j++) 1254 for (j = 0; j < ETH_ALEN; j++)
1130 total_one = crc16_byte(total_one, 1255 total_one = crc16_byte(total_one,
1131 tt_common_entry->addr[j]); 1256 tt_common->addr[j]);
1132 total ^= total_one; 1257 total ^= total_one;
1133 } 1258 }
1134 rcu_read_unlock(); 1259 rcu_read_unlock();
@@ -1137,9 +1262,9 @@ uint16_t tt_local_crc(struct bat_priv *bat_priv)
1137 return total; 1262 return total;
1138} 1263}
1139 1264
1140static void tt_req_list_free(struct bat_priv *bat_priv) 1265static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
1141{ 1266{
1142 struct tt_req_node *node, *safe; 1267 struct batadv_tt_req_node *node, *safe;
1143 1268
1144 spin_lock_bh(&bat_priv->tt_req_list_lock); 1269 spin_lock_bh(&bat_priv->tt_req_list_lock);
1145 1270
@@ -1151,15 +1276,16 @@ static void tt_req_list_free(struct bat_priv *bat_priv)
1151 spin_unlock_bh(&bat_priv->tt_req_list_lock); 1276 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1152} 1277}
1153 1278
1154static void tt_save_orig_buffer(struct bat_priv *bat_priv, 1279static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
1155 struct orig_node *orig_node, 1280 struct batadv_orig_node *orig_node,
1156 const unsigned char *tt_buff, 1281 const unsigned char *tt_buff,
1157 uint8_t tt_num_changes) 1282 uint8_t tt_num_changes)
1158{ 1283{
1159 uint16_t tt_buff_len = tt_len(tt_num_changes); 1284 uint16_t tt_buff_len = batadv_tt_len(tt_num_changes);
1160 1285
1161 /* Replace the old buffer only if I received something in the 1286 /* Replace the old buffer only if I received something in the
1162 * last OGM (the OGM could carry no changes) */ 1287 * last OGM (the OGM could carry no changes)
1288 */
1163 spin_lock_bh(&orig_node->tt_buff_lock); 1289 spin_lock_bh(&orig_node->tt_buff_lock);
1164 if (tt_buff_len > 0) { 1290 if (tt_buff_len > 0) {
1165 kfree(orig_node->tt_buff); 1291 kfree(orig_node->tt_buff);
@@ -1173,13 +1299,14 @@ static void tt_save_orig_buffer(struct bat_priv *bat_priv,
1173 spin_unlock_bh(&orig_node->tt_buff_lock); 1299 spin_unlock_bh(&orig_node->tt_buff_lock);
1174} 1300}
1175 1301
1176static void tt_req_purge(struct bat_priv *bat_priv) 1302static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
1177{ 1303{
1178 struct tt_req_node *node, *safe; 1304 struct batadv_tt_req_node *node, *safe;
1179 1305
1180 spin_lock_bh(&bat_priv->tt_req_list_lock); 1306 spin_lock_bh(&bat_priv->tt_req_list_lock);
1181 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { 1307 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1182 if (has_timed_out(node->issued_at, TT_REQUEST_TIMEOUT)) { 1308 if (batadv_has_timed_out(node->issued_at,
1309 BATADV_TT_REQUEST_TIMEOUT)) {
1183 list_del(&node->list); 1310 list_del(&node->list);
1184 kfree(node); 1311 kfree(node);
1185 } 1312 }
@@ -1188,17 +1315,19 @@ static void tt_req_purge(struct bat_priv *bat_priv)
1188} 1315}
1189 1316
1190/* returns the pointer to the new tt_req_node struct if no request 1317/* returns the pointer to the new tt_req_node struct if no request
1191 * has already been issued for this orig_node, NULL otherwise */ 1318 * has already been issued for this orig_node, NULL otherwise
1192static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv, 1319 */
1193 struct orig_node *orig_node) 1320static struct batadv_tt_req_node *
1321batadv_new_tt_req_node(struct batadv_priv *bat_priv,
1322 struct batadv_orig_node *orig_node)
1194{ 1323{
1195 struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL; 1324 struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
1196 1325
1197 spin_lock_bh(&bat_priv->tt_req_list_lock); 1326 spin_lock_bh(&bat_priv->tt_req_list_lock);
1198 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) { 1327 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
1199 if (compare_eth(tt_req_node_tmp, orig_node) && 1328 if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
1200 !has_timed_out(tt_req_node_tmp->issued_at, 1329 !batadv_has_timed_out(tt_req_node_tmp->issued_at,
1201 TT_REQUEST_TIMEOUT)) 1330 BATADV_TT_REQUEST_TIMEOUT))
1202 goto unlock; 1331 goto unlock;
1203 } 1332 }
1204 1333
@@ -1216,63 +1345,67 @@ unlock:
1216} 1345}
1217 1346
1218/* data_ptr is useless here, but has to be kept to respect the prototype */ 1347/* data_ptr is useless here, but has to be kept to respect the prototype */
1219static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr) 1348static int batadv_tt_local_valid_entry(const void *entry_ptr,
1349 const void *data_ptr)
1220{ 1350{
1221 const struct tt_common_entry *tt_common_entry = entry_ptr; 1351 const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
1222 1352
1223 if (tt_common_entry->flags & TT_CLIENT_NEW) 1353 if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW)
1224 return 0; 1354 return 0;
1225 return 1; 1355 return 1;
1226} 1356}
1227 1357
1228static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr) 1358static int batadv_tt_global_valid(const void *entry_ptr,
1359 const void *data_ptr)
1229{ 1360{
1230 const struct tt_common_entry *tt_common_entry = entry_ptr; 1361 const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
1231 const struct tt_global_entry *tt_global_entry; 1362 const struct batadv_tt_global_entry *tt_global_entry;
1232 const struct orig_node *orig_node = data_ptr; 1363 const struct batadv_orig_node *orig_node = data_ptr;
1233 1364
1234 if (tt_common_entry->flags & TT_CLIENT_ROAM) 1365 if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM)
1235 return 0; 1366 return 0;
1236 1367
1237 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, 1368 tt_global_entry = container_of(tt_common_entry,
1369 struct batadv_tt_global_entry,
1238 common); 1370 common);
1239 1371
1240 return tt_global_entry_has_orig(tt_global_entry, orig_node); 1372 return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node);
1241} 1373}
1242 1374
1243static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, 1375static struct sk_buff *
1244 struct hashtable_t *hash, 1376batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1245 struct hard_iface *primary_if, 1377 struct batadv_hashtable *hash,
1246 int (*valid_cb)(const void *, 1378 struct batadv_hard_iface *primary_if,
1247 const void *), 1379 int (*valid_cb)(const void *, const void *),
1248 void *cb_data) 1380 void *cb_data)
1249{ 1381{
1250 struct tt_common_entry *tt_common_entry; 1382 struct batadv_tt_common_entry *tt_common_entry;
1251 struct tt_query_packet *tt_response; 1383 struct batadv_tt_query_packet *tt_response;
1252 struct tt_change *tt_change; 1384 struct batadv_tt_change *tt_change;
1253 struct hlist_node *node; 1385 struct hlist_node *node;
1254 struct hlist_head *head; 1386 struct hlist_head *head;
1255 struct sk_buff *skb = NULL; 1387 struct sk_buff *skb = NULL;
1256 uint16_t tt_tot, tt_count; 1388 uint16_t tt_tot, tt_count;
1257 ssize_t tt_query_size = sizeof(struct tt_query_packet); 1389 ssize_t tt_query_size = sizeof(struct batadv_tt_query_packet);
1258 uint32_t i; 1390 uint32_t i;
1391 size_t len;
1259 1392
1260 if (tt_query_size + tt_len > primary_if->soft_iface->mtu) { 1393 if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
1261 tt_len = primary_if->soft_iface->mtu - tt_query_size; 1394 tt_len = primary_if->soft_iface->mtu - tt_query_size;
1262 tt_len -= tt_len % sizeof(struct tt_change); 1395 tt_len -= tt_len % sizeof(struct batadv_tt_change);
1263 } 1396 }
1264 tt_tot = tt_len / sizeof(struct tt_change); 1397 tt_tot = tt_len / sizeof(struct batadv_tt_change);
1265 1398
1266 skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN); 1399 len = tt_query_size + tt_len;
1400 skb = dev_alloc_skb(len + ETH_HLEN);
1267 if (!skb) 1401 if (!skb)
1268 goto out; 1402 goto out;
1269 1403
1270 skb_reserve(skb, ETH_HLEN); 1404 skb_reserve(skb, ETH_HLEN);
1271 tt_response = (struct tt_query_packet *)skb_put(skb, 1405 tt_response = (struct batadv_tt_query_packet *)skb_put(skb, len);
1272 tt_query_size + tt_len);
1273 tt_response->ttvn = ttvn; 1406 tt_response->ttvn = ttvn;
1274 1407
1275 tt_change = (struct tt_change *)(skb->data + tt_query_size); 1408 tt_change = (struct batadv_tt_change *)(skb->data + tt_query_size);
1276 tt_count = 0; 1409 tt_count = 0;
1277 1410
1278 rcu_read_lock(); 1411 rcu_read_lock();
@@ -1289,7 +1422,7 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1289 1422
1290 memcpy(tt_change->addr, tt_common_entry->addr, 1423 memcpy(tt_change->addr, tt_common_entry->addr,
1291 ETH_ALEN); 1424 ETH_ALEN);
1292 tt_change->flags = NO_FLAGS; 1425 tt_change->flags = BATADV_NO_FLAGS;
1293 1426
1294 tt_count++; 1427 tt_count++;
1295 tt_change++; 1428 tt_change++;
@@ -1298,72 +1431,78 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1298 rcu_read_unlock(); 1431 rcu_read_unlock();
1299 1432
1300 /* store in the message the number of entries we have successfully 1433 /* store in the message the number of entries we have successfully
1301 * copied */ 1434 * copied
1435 */
1302 tt_response->tt_data = htons(tt_count); 1436 tt_response->tt_data = htons(tt_count);
1303 1437
1304out: 1438out:
1305 return skb; 1439 return skb;
1306} 1440}
1307 1441
1308static int send_tt_request(struct bat_priv *bat_priv, 1442static int batadv_send_tt_request(struct batadv_priv *bat_priv,
1309 struct orig_node *dst_orig_node, 1443 struct batadv_orig_node *dst_orig_node,
1310 uint8_t ttvn, uint16_t tt_crc, bool full_table) 1444 uint8_t ttvn, uint16_t tt_crc,
1445 bool full_table)
1311{ 1446{
1312 struct sk_buff *skb = NULL; 1447 struct sk_buff *skb = NULL;
1313 struct tt_query_packet *tt_request; 1448 struct batadv_tt_query_packet *tt_request;
1314 struct neigh_node *neigh_node = NULL; 1449 struct batadv_neigh_node *neigh_node = NULL;
1315 struct hard_iface *primary_if; 1450 struct batadv_hard_iface *primary_if;
1316 struct tt_req_node *tt_req_node = NULL; 1451 struct batadv_tt_req_node *tt_req_node = NULL;
1317 int ret = 1; 1452 int ret = 1;
1453 size_t tt_req_len;
1318 1454
1319 primary_if = primary_if_get_selected(bat_priv); 1455 primary_if = batadv_primary_if_get_selected(bat_priv);
1320 if (!primary_if) 1456 if (!primary_if)
1321 goto out; 1457 goto out;
1322 1458
1323 /* The new tt_req will be issued only if I'm not waiting for a 1459 /* The new tt_req will be issued only if I'm not waiting for a
1324 * reply from the same orig_node yet */ 1460 * reply from the same orig_node yet
1325 tt_req_node = new_tt_req_node(bat_priv, dst_orig_node); 1461 */
1462 tt_req_node = batadv_new_tt_req_node(bat_priv, dst_orig_node);
1326 if (!tt_req_node) 1463 if (!tt_req_node)
1327 goto out; 1464 goto out;
1328 1465
1329 skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN); 1466 skb = dev_alloc_skb(sizeof(*tt_request) + ETH_HLEN);
1330 if (!skb) 1467 if (!skb)
1331 goto out; 1468 goto out;
1332 1469
1333 skb_reserve(skb, ETH_HLEN); 1470 skb_reserve(skb, ETH_HLEN);
1334 1471
1335 tt_request = (struct tt_query_packet *)skb_put(skb, 1472 tt_req_len = sizeof(*tt_request);
1336 sizeof(struct tt_query_packet)); 1473 tt_request = (struct batadv_tt_query_packet *)skb_put(skb, tt_req_len);
1337 1474
1338 tt_request->header.packet_type = BAT_TT_QUERY; 1475 tt_request->header.packet_type = BATADV_TT_QUERY;
1339 tt_request->header.version = COMPAT_VERSION; 1476 tt_request->header.version = BATADV_COMPAT_VERSION;
1340 memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN); 1477 memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1341 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN); 1478 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
1342 tt_request->header.ttl = TTL; 1479 tt_request->header.ttl = BATADV_TTL;
1343 tt_request->ttvn = ttvn; 1480 tt_request->ttvn = ttvn;
1344 tt_request->tt_data = htons(tt_crc); 1481 tt_request->tt_data = htons(tt_crc);
1345 tt_request->flags = TT_REQUEST; 1482 tt_request->flags = BATADV_TT_REQUEST;
1346 1483
1347 if (full_table) 1484 if (full_table)
1348 tt_request->flags |= TT_FULL_TABLE; 1485 tt_request->flags |= BATADV_TT_FULL_TABLE;
1349 1486
1350 neigh_node = orig_node_get_router(dst_orig_node); 1487 neigh_node = batadv_orig_node_get_router(dst_orig_node);
1351 if (!neigh_node) 1488 if (!neigh_node)
1352 goto out; 1489 goto out;
1353 1490
1354 bat_dbg(DBG_TT, bat_priv, 1491 batadv_dbg(BATADV_DBG_TT, bat_priv,
1355 "Sending TT_REQUEST to %pM via %pM [%c]\n", 1492 "Sending TT_REQUEST to %pM via %pM [%c]\n",
1356 dst_orig_node->orig, neigh_node->addr, 1493 dst_orig_node->orig, neigh_node->addr,
1357 (full_table ? 'F' : '.')); 1494 (full_table ? 'F' : '.'));
1495
1496 batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_TX);
1358 1497
1359 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1498 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1360 ret = 0; 1499 ret = 0;
1361 1500
1362out: 1501out:
1363 if (neigh_node) 1502 if (neigh_node)
1364 neigh_node_free_ref(neigh_node); 1503 batadv_neigh_node_free_ref(neigh_node);
1365 if (primary_if) 1504 if (primary_if)
1366 hardif_free_ref(primary_if); 1505 batadv_hardif_free_ref(primary_if);
1367 if (ret) 1506 if (ret)
1368 kfree_skb(skb); 1507 kfree_skb(skb);
1369 if (ret && tt_req_node) { 1508 if (ret && tt_req_node) {
@@ -1375,39 +1514,42 @@ out:
1375 return ret; 1514 return ret;
1376} 1515}
1377 1516
1378static bool send_other_tt_response(struct bat_priv *bat_priv, 1517static bool
1379 struct tt_query_packet *tt_request) 1518batadv_send_other_tt_response(struct batadv_priv *bat_priv,
1519 struct batadv_tt_query_packet *tt_request)
1380{ 1520{
1381 struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL; 1521 struct batadv_orig_node *req_dst_orig_node = NULL;
1382 struct neigh_node *neigh_node = NULL; 1522 struct batadv_orig_node *res_dst_orig_node = NULL;
1383 struct hard_iface *primary_if = NULL; 1523 struct batadv_neigh_node *neigh_node = NULL;
1524 struct batadv_hard_iface *primary_if = NULL;
1384 uint8_t orig_ttvn, req_ttvn, ttvn; 1525 uint8_t orig_ttvn, req_ttvn, ttvn;
1385 int ret = false; 1526 int ret = false;
1386 unsigned char *tt_buff; 1527 unsigned char *tt_buff;
1387 bool full_table; 1528 bool full_table;
1388 uint16_t tt_len, tt_tot; 1529 uint16_t tt_len, tt_tot;
1389 struct sk_buff *skb = NULL; 1530 struct sk_buff *skb = NULL;
1390 struct tt_query_packet *tt_response; 1531 struct batadv_tt_query_packet *tt_response;
1532 size_t len;
1391 1533
1392 bat_dbg(DBG_TT, bat_priv, 1534 batadv_dbg(BATADV_DBG_TT, bat_priv,
1393 "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n", 1535 "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
1394 tt_request->src, tt_request->ttvn, tt_request->dst, 1536 tt_request->src, tt_request->ttvn, tt_request->dst,
1395 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.')); 1537 (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1396 1538
1397 /* Let's get the orig node of the REAL destination */ 1539 /* Let's get the orig node of the REAL destination */
1398 req_dst_orig_node = orig_hash_find(bat_priv, tt_request->dst); 1540 req_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->dst);
1399 if (!req_dst_orig_node) 1541 if (!req_dst_orig_node)
1400 goto out; 1542 goto out;
1401 1543
1402 res_dst_orig_node = orig_hash_find(bat_priv, tt_request->src); 1544 res_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
1403 if (!res_dst_orig_node) 1545 if (!res_dst_orig_node)
1404 goto out; 1546 goto out;
1405 1547
1406 neigh_node = orig_node_get_router(res_dst_orig_node); 1548 neigh_node = batadv_orig_node_get_router(res_dst_orig_node);
1407 if (!neigh_node) 1549 if (!neigh_node)
1408 goto out; 1550 goto out;
1409 1551
1410 primary_if = primary_if_get_selected(bat_priv); 1552 primary_if = batadv_primary_if_get_selected(bat_priv);
1411 if (!primary_if) 1553 if (!primary_if)
1412 goto out; 1554 goto out;
1413 1555
@@ -1416,71 +1558,75 @@ static bool send_other_tt_response(struct bat_priv *bat_priv,
1416 1558
1417 /* I don't have the requested data */ 1559 /* I don't have the requested data */
1418 if (orig_ttvn != req_ttvn || 1560 if (orig_ttvn != req_ttvn ||
1419 tt_request->tt_data != req_dst_orig_node->tt_crc) 1561 tt_request->tt_data != htons(req_dst_orig_node->tt_crc))
1420 goto out; 1562 goto out;
1421 1563
1422 /* If the full table has been explicitly requested */ 1564 /* If the full table has been explicitly requested */
1423 if (tt_request->flags & TT_FULL_TABLE || 1565 if (tt_request->flags & BATADV_TT_FULL_TABLE ||
1424 !req_dst_orig_node->tt_buff) 1566 !req_dst_orig_node->tt_buff)
1425 full_table = true; 1567 full_table = true;
1426 else 1568 else
1427 full_table = false; 1569 full_table = false;
1428 1570
1429 /* In this version, fragmentation is not implemented, then 1571 /* In this version, fragmentation is not implemented, then
1430 * I'll send only one packet with as much TT entries as I can */ 1572 * I'll send only one packet with as much TT entries as I can
1573 */
1431 if (!full_table) { 1574 if (!full_table) {
1432 spin_lock_bh(&req_dst_orig_node->tt_buff_lock); 1575 spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
1433 tt_len = req_dst_orig_node->tt_buff_len; 1576 tt_len = req_dst_orig_node->tt_buff_len;
1434 tt_tot = tt_len / sizeof(struct tt_change); 1577 tt_tot = tt_len / sizeof(struct batadv_tt_change);
1435 1578
1436 skb = dev_alloc_skb(sizeof(struct tt_query_packet) + 1579 len = sizeof(*tt_response) + tt_len;
1437 tt_len + ETH_HLEN); 1580 skb = dev_alloc_skb(len + ETH_HLEN);
1438 if (!skb) 1581 if (!skb)
1439 goto unlock; 1582 goto unlock;
1440 1583
1441 skb_reserve(skb, ETH_HLEN); 1584 skb_reserve(skb, ETH_HLEN);
1442 tt_response = (struct tt_query_packet *)skb_put(skb, 1585 tt_response = (struct batadv_tt_query_packet *)skb_put(skb,
1443 sizeof(struct tt_query_packet) + tt_len); 1586 len);
1444 tt_response->ttvn = req_ttvn; 1587 tt_response->ttvn = req_ttvn;
1445 tt_response->tt_data = htons(tt_tot); 1588 tt_response->tt_data = htons(tt_tot);
1446 1589
1447 tt_buff = skb->data + sizeof(struct tt_query_packet); 1590 tt_buff = skb->data + sizeof(*tt_response);
1448 /* Copy the last orig_node's OGM buffer */ 1591 /* Copy the last orig_node's OGM buffer */
1449 memcpy(tt_buff, req_dst_orig_node->tt_buff, 1592 memcpy(tt_buff, req_dst_orig_node->tt_buff,
1450 req_dst_orig_node->tt_buff_len); 1593 req_dst_orig_node->tt_buff_len);
1451 1594
1452 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock); 1595 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1453 } else { 1596 } else {
1454 tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) * 1597 tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size);
1455 sizeof(struct tt_change); 1598 tt_len *= sizeof(struct batadv_tt_change);
1456 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn); 1599 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1457 1600
1458 skb = tt_response_fill_table(tt_len, ttvn, 1601 skb = batadv_tt_response_fill_table(tt_len, ttvn,
1459 bat_priv->tt_global_hash, 1602 bat_priv->tt_global_hash,
1460 primary_if, tt_global_valid_entry, 1603 primary_if,
1461 req_dst_orig_node); 1604 batadv_tt_global_valid,
1605 req_dst_orig_node);
1462 if (!skb) 1606 if (!skb)
1463 goto out; 1607 goto out;
1464 1608
1465 tt_response = (struct tt_query_packet *)skb->data; 1609 tt_response = (struct batadv_tt_query_packet *)skb->data;
1466 } 1610 }
1467 1611
1468 tt_response->header.packet_type = BAT_TT_QUERY; 1612 tt_response->header.packet_type = BATADV_TT_QUERY;
1469 tt_response->header.version = COMPAT_VERSION; 1613 tt_response->header.version = BATADV_COMPAT_VERSION;
1470 tt_response->header.ttl = TTL; 1614 tt_response->header.ttl = BATADV_TTL;
1471 memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN); 1615 memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
1472 memcpy(tt_response->dst, tt_request->src, ETH_ALEN); 1616 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1473 tt_response->flags = TT_RESPONSE; 1617 tt_response->flags = BATADV_TT_RESPONSE;
1474 1618
1475 if (full_table) 1619 if (full_table)
1476 tt_response->flags |= TT_FULL_TABLE; 1620 tt_response->flags |= BATADV_TT_FULL_TABLE;
1477 1621
1478 bat_dbg(DBG_TT, bat_priv, 1622 batadv_dbg(BATADV_DBG_TT, bat_priv,
1479 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n", 1623 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1480 res_dst_orig_node->orig, neigh_node->addr, 1624 res_dst_orig_node->orig, neigh_node->addr,
1481 req_dst_orig_node->orig, req_ttvn); 1625 req_dst_orig_node->orig, req_ttvn);
1482 1626
1483 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1627 batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
1628
1629 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1484 ret = true; 1630 ret = true;
1485 goto out; 1631 goto out;
1486 1632
@@ -1489,114 +1635,122 @@ unlock:
1489 1635
1490out: 1636out:
1491 if (res_dst_orig_node) 1637 if (res_dst_orig_node)
1492 orig_node_free_ref(res_dst_orig_node); 1638 batadv_orig_node_free_ref(res_dst_orig_node);
1493 if (req_dst_orig_node) 1639 if (req_dst_orig_node)
1494 orig_node_free_ref(req_dst_orig_node); 1640 batadv_orig_node_free_ref(req_dst_orig_node);
1495 if (neigh_node) 1641 if (neigh_node)
1496 neigh_node_free_ref(neigh_node); 1642 batadv_neigh_node_free_ref(neigh_node);
1497 if (primary_if) 1643 if (primary_if)
1498 hardif_free_ref(primary_if); 1644 batadv_hardif_free_ref(primary_if);
1499 if (!ret) 1645 if (!ret)
1500 kfree_skb(skb); 1646 kfree_skb(skb);
1501 return ret; 1647 return ret;
1502 1648
1503} 1649}
1504static bool send_my_tt_response(struct bat_priv *bat_priv, 1650
1505 struct tt_query_packet *tt_request) 1651static bool
1652batadv_send_my_tt_response(struct batadv_priv *bat_priv,
1653 struct batadv_tt_query_packet *tt_request)
1506{ 1654{
1507 struct orig_node *orig_node = NULL; 1655 struct batadv_orig_node *orig_node = NULL;
1508 struct neigh_node *neigh_node = NULL; 1656 struct batadv_neigh_node *neigh_node = NULL;
1509 struct hard_iface *primary_if = NULL; 1657 struct batadv_hard_iface *primary_if = NULL;
1510 uint8_t my_ttvn, req_ttvn, ttvn; 1658 uint8_t my_ttvn, req_ttvn, ttvn;
1511 int ret = false; 1659 int ret = false;
1512 unsigned char *tt_buff; 1660 unsigned char *tt_buff;
1513 bool full_table; 1661 bool full_table;
1514 uint16_t tt_len, tt_tot; 1662 uint16_t tt_len, tt_tot;
1515 struct sk_buff *skb = NULL; 1663 struct sk_buff *skb = NULL;
1516 struct tt_query_packet *tt_response; 1664 struct batadv_tt_query_packet *tt_response;
1665 size_t len;
1517 1666
1518 bat_dbg(DBG_TT, bat_priv, 1667 batadv_dbg(BATADV_DBG_TT, bat_priv,
1519 "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n", 1668 "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
1520 tt_request->src, tt_request->ttvn, 1669 tt_request->src, tt_request->ttvn,
1521 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.')); 1670 (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1522 1671
1523 1672
1524 my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); 1673 my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1525 req_ttvn = tt_request->ttvn; 1674 req_ttvn = tt_request->ttvn;
1526 1675
1527 orig_node = orig_hash_find(bat_priv, tt_request->src); 1676 orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
1528 if (!orig_node) 1677 if (!orig_node)
1529 goto out; 1678 goto out;
1530 1679
1531 neigh_node = orig_node_get_router(orig_node); 1680 neigh_node = batadv_orig_node_get_router(orig_node);
1532 if (!neigh_node) 1681 if (!neigh_node)
1533 goto out; 1682 goto out;
1534 1683
1535 primary_if = primary_if_get_selected(bat_priv); 1684 primary_if = batadv_primary_if_get_selected(bat_priv);
1536 if (!primary_if) 1685 if (!primary_if)
1537 goto out; 1686 goto out;
1538 1687
1539 /* If the full table has been explicitly requested or the gap 1688 /* If the full table has been explicitly requested or the gap
1540 * is too big send the whole local translation table */ 1689 * is too big send the whole local translation table
1541 if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn || 1690 */
1691 if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
1542 !bat_priv->tt_buff) 1692 !bat_priv->tt_buff)
1543 full_table = true; 1693 full_table = true;
1544 else 1694 else
1545 full_table = false; 1695 full_table = false;
1546 1696
1547 /* In this version, fragmentation is not implemented, then 1697 /* In this version, fragmentation is not implemented, then
1548 * I'll send only one packet with as much TT entries as I can */ 1698 * I'll send only one packet with as much TT entries as I can
1699 */
1549 if (!full_table) { 1700 if (!full_table) {
1550 spin_lock_bh(&bat_priv->tt_buff_lock); 1701 spin_lock_bh(&bat_priv->tt_buff_lock);
1551 tt_len = bat_priv->tt_buff_len; 1702 tt_len = bat_priv->tt_buff_len;
1552 tt_tot = tt_len / sizeof(struct tt_change); 1703 tt_tot = tt_len / sizeof(struct batadv_tt_change);
1553 1704
1554 skb = dev_alloc_skb(sizeof(struct tt_query_packet) + 1705 len = sizeof(*tt_response) + tt_len;
1555 tt_len + ETH_HLEN); 1706 skb = dev_alloc_skb(len + ETH_HLEN);
1556 if (!skb) 1707 if (!skb)
1557 goto unlock; 1708 goto unlock;
1558 1709
1559 skb_reserve(skb, ETH_HLEN); 1710 skb_reserve(skb, ETH_HLEN);
1560 tt_response = (struct tt_query_packet *)skb_put(skb, 1711 tt_response = (struct batadv_tt_query_packet *)skb_put(skb,
1561 sizeof(struct tt_query_packet) + tt_len); 1712 len);
1562 tt_response->ttvn = req_ttvn; 1713 tt_response->ttvn = req_ttvn;
1563 tt_response->tt_data = htons(tt_tot); 1714 tt_response->tt_data = htons(tt_tot);
1564 1715
1565 tt_buff = skb->data + sizeof(struct tt_query_packet); 1716 tt_buff = skb->data + sizeof(*tt_response);
1566 memcpy(tt_buff, bat_priv->tt_buff, 1717 memcpy(tt_buff, bat_priv->tt_buff,
1567 bat_priv->tt_buff_len); 1718 bat_priv->tt_buff_len);
1568 spin_unlock_bh(&bat_priv->tt_buff_lock); 1719 spin_unlock_bh(&bat_priv->tt_buff_lock);
1569 } else { 1720 } else {
1570 tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) * 1721 tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt);
1571 sizeof(struct tt_change); 1722 tt_len *= sizeof(struct batadv_tt_change);
1572 ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); 1723 ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1573 1724
1574 skb = tt_response_fill_table(tt_len, ttvn, 1725 skb = batadv_tt_response_fill_table(tt_len, ttvn,
1575 bat_priv->tt_local_hash, 1726 bat_priv->tt_local_hash,
1576 primary_if, tt_local_valid_entry, 1727 primary_if,
1577 NULL); 1728 batadv_tt_local_valid_entry,
1729 NULL);
1578 if (!skb) 1730 if (!skb)
1579 goto out; 1731 goto out;
1580 1732
1581 tt_response = (struct tt_query_packet *)skb->data; 1733 tt_response = (struct batadv_tt_query_packet *)skb->data;
1582 } 1734 }
1583 1735
1584 tt_response->header.packet_type = BAT_TT_QUERY; 1736 tt_response->header.packet_type = BATADV_TT_QUERY;
1585 tt_response->header.version = COMPAT_VERSION; 1737 tt_response->header.version = BATADV_COMPAT_VERSION;
1586 tt_response->header.ttl = TTL; 1738 tt_response->header.ttl = BATADV_TTL;
1587 memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN); 1739 memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1588 memcpy(tt_response->dst, tt_request->src, ETH_ALEN); 1740 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1589 tt_response->flags = TT_RESPONSE; 1741 tt_response->flags = BATADV_TT_RESPONSE;
1590 1742
1591 if (full_table) 1743 if (full_table)
1592 tt_response->flags |= TT_FULL_TABLE; 1744 tt_response->flags |= BATADV_TT_FULL_TABLE;
1593 1745
1594 bat_dbg(DBG_TT, bat_priv, 1746 batadv_dbg(BATADV_DBG_TT, bat_priv,
1595 "Sending TT_RESPONSE to %pM via %pM [%c]\n", 1747 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1596 orig_node->orig, neigh_node->addr, 1748 orig_node->orig, neigh_node->addr,
1597 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.')); 1749 (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1598 1750
1599 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1751 batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
1752
1753 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1600 ret = true; 1754 ret = true;
1601 goto out; 1755 goto out;
1602 1756
@@ -1604,49 +1758,50 @@ unlock:
1604 spin_unlock_bh(&bat_priv->tt_buff_lock); 1758 spin_unlock_bh(&bat_priv->tt_buff_lock);
1605out: 1759out:
1606 if (orig_node) 1760 if (orig_node)
1607 orig_node_free_ref(orig_node); 1761 batadv_orig_node_free_ref(orig_node);
1608 if (neigh_node) 1762 if (neigh_node)
1609 neigh_node_free_ref(neigh_node); 1763 batadv_neigh_node_free_ref(neigh_node);
1610 if (primary_if) 1764 if (primary_if)
1611 hardif_free_ref(primary_if); 1765 batadv_hardif_free_ref(primary_if);
1612 if (!ret) 1766 if (!ret)
1613 kfree_skb(skb); 1767 kfree_skb(skb);
1614 /* This packet was for me, so it doesn't need to be re-routed */ 1768 /* This packet was for me, so it doesn't need to be re-routed */
1615 return true; 1769 return true;
1616} 1770}
1617 1771
1618bool send_tt_response(struct bat_priv *bat_priv, 1772bool batadv_send_tt_response(struct batadv_priv *bat_priv,
1619 struct tt_query_packet *tt_request) 1773 struct batadv_tt_query_packet *tt_request)
1620{ 1774{
1621 if (is_my_mac(tt_request->dst)) { 1775 if (batadv_is_my_mac(tt_request->dst)) {
1622 /* don't answer backbone gws! */ 1776 /* don't answer backbone gws! */
1623 if (bla_is_backbone_gw_orig(bat_priv, tt_request->src)) 1777 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
1624 return true; 1778 return true;
1625 1779
1626 return send_my_tt_response(bat_priv, tt_request); 1780 return batadv_send_my_tt_response(bat_priv, tt_request);
1627 } else { 1781 } else {
1628 return send_other_tt_response(bat_priv, tt_request); 1782 return batadv_send_other_tt_response(bat_priv, tt_request);
1629 } 1783 }
1630} 1784}
1631 1785
1632static void _tt_update_changes(struct bat_priv *bat_priv, 1786static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
1633 struct orig_node *orig_node, 1787 struct batadv_orig_node *orig_node,
1634 struct tt_change *tt_change, 1788 struct batadv_tt_change *tt_change,
1635 uint16_t tt_num_changes, uint8_t ttvn) 1789 uint16_t tt_num_changes, uint8_t ttvn)
1636{ 1790{
1637 int i; 1791 int i;
1792 int roams;
1638 1793
1639 for (i = 0; i < tt_num_changes; i++) { 1794 for (i = 0; i < tt_num_changes; i++) {
1640 if ((tt_change + i)->flags & TT_CLIENT_DEL) 1795 if ((tt_change + i)->flags & BATADV_TT_CLIENT_DEL) {
1641 tt_global_del(bat_priv, orig_node, 1796 roams = (tt_change + i)->flags & BATADV_TT_CLIENT_ROAM;
1642 (tt_change + i)->addr, 1797 batadv_tt_global_del(bat_priv, orig_node,
1643 "tt removed by changes", 1798 (tt_change + i)->addr,
1644 (tt_change + i)->flags & TT_CLIENT_ROAM); 1799 "tt removed by changes",
1645 else 1800 roams);
1646 if (!tt_global_add(bat_priv, orig_node, 1801 } else {
1647 (tt_change + i)->addr, ttvn, false, 1802 if (!batadv_tt_global_add(bat_priv, orig_node,
1648 (tt_change + i)->flags & 1803 (tt_change + i)->addr,
1649 TT_CLIENT_WIFI)) 1804 (tt_change + i)->flags, ttvn))
1650 /* In case of problem while storing a 1805 /* In case of problem while storing a
1651 * global_entry, we stop the updating 1806 * global_entry, we stop the updating
1652 * procedure without committing the 1807 * procedure without committing the
@@ -1654,25 +1809,27 @@ static void _tt_update_changes(struct bat_priv *bat_priv,
1654 * corrupted data on tt_request 1809 * corrupted data on tt_request
1655 */ 1810 */
1656 return; 1811 return;
1812 }
1657 } 1813 }
1658 orig_node->tt_initialised = true; 1814 orig_node->tt_initialised = true;
1659} 1815}
1660 1816
1661static void tt_fill_gtable(struct bat_priv *bat_priv, 1817static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
1662 struct tt_query_packet *tt_response) 1818 struct batadv_tt_query_packet *tt_response)
1663{ 1819{
1664 struct orig_node *orig_node = NULL; 1820 struct batadv_orig_node *orig_node = NULL;
1665 1821
1666 orig_node = orig_hash_find(bat_priv, tt_response->src); 1822 orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
1667 if (!orig_node) 1823 if (!orig_node)
1668 goto out; 1824 goto out;
1669 1825
1670 /* Purge the old table first.. */ 1826 /* Purge the old table first.. */
1671 tt_global_del_orig(bat_priv, orig_node, "Received full table"); 1827 batadv_tt_global_del_orig(bat_priv, orig_node, "Received full table");
1672 1828
1673 _tt_update_changes(bat_priv, orig_node, 1829 _batadv_tt_update_changes(bat_priv, orig_node,
1674 (struct tt_change *)(tt_response + 1), 1830 (struct batadv_tt_change *)(tt_response + 1),
1675 tt_response->tt_data, tt_response->ttvn); 1831 ntohs(tt_response->tt_data),
1832 tt_response->ttvn);
1676 1833
1677 spin_lock_bh(&orig_node->tt_buff_lock); 1834 spin_lock_bh(&orig_node->tt_buff_lock);
1678 kfree(orig_node->tt_buff); 1835 kfree(orig_node->tt_buff);
@@ -1684,71 +1841,76 @@ static void tt_fill_gtable(struct bat_priv *bat_priv,
1684 1841
1685out: 1842out:
1686 if (orig_node) 1843 if (orig_node)
1687 orig_node_free_ref(orig_node); 1844 batadv_orig_node_free_ref(orig_node);
1688} 1845}
1689 1846
1690static void tt_update_changes(struct bat_priv *bat_priv, 1847static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
1691 struct orig_node *orig_node, 1848 struct batadv_orig_node *orig_node,
1692 uint16_t tt_num_changes, uint8_t ttvn, 1849 uint16_t tt_num_changes, uint8_t ttvn,
1693 struct tt_change *tt_change) 1850 struct batadv_tt_change *tt_change)
1694{ 1851{
1695 _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes, 1852 _batadv_tt_update_changes(bat_priv, orig_node, tt_change,
1696 ttvn); 1853 tt_num_changes, ttvn);
1697 1854
1698 tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change, 1855 batadv_tt_save_orig_buffer(bat_priv, orig_node,
1699 tt_num_changes); 1856 (unsigned char *)tt_change, tt_num_changes);
1700 atomic_set(&orig_node->last_ttvn, ttvn); 1857 atomic_set(&orig_node->last_ttvn, ttvn);
1701} 1858}
1702 1859
1703bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr) 1860bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr)
1704{ 1861{
1705 struct tt_local_entry *tt_local_entry = NULL; 1862 struct batadv_tt_local_entry *tt_local_entry = NULL;
1706 bool ret = false; 1863 bool ret = false;
1707 1864
1708 tt_local_entry = tt_local_hash_find(bat_priv, addr); 1865 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
1709 if (!tt_local_entry) 1866 if (!tt_local_entry)
1710 goto out; 1867 goto out;
1711 /* Check if the client has been logically deleted (but is kept for 1868 /* Check if the client has been logically deleted (but is kept for
1712 * consistency purpose) */ 1869 * consistency purpose)
1713 if (tt_local_entry->common.flags & TT_CLIENT_PENDING) 1870 */
1871 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
1714 goto out; 1872 goto out;
1715 ret = true; 1873 ret = true;
1716out: 1874out:
1717 if (tt_local_entry) 1875 if (tt_local_entry)
1718 tt_local_entry_free_ref(tt_local_entry); 1876 batadv_tt_local_entry_free_ref(tt_local_entry);
1719 return ret; 1877 return ret;
1720} 1878}
1721 1879
1722void handle_tt_response(struct bat_priv *bat_priv, 1880void batadv_handle_tt_response(struct batadv_priv *bat_priv,
1723 struct tt_query_packet *tt_response) 1881 struct batadv_tt_query_packet *tt_response)
1724{ 1882{
1725 struct tt_req_node *node, *safe; 1883 struct batadv_tt_req_node *node, *safe;
1726 struct orig_node *orig_node = NULL; 1884 struct batadv_orig_node *orig_node = NULL;
1885 struct batadv_tt_change *tt_change;
1727 1886
1728 bat_dbg(DBG_TT, bat_priv, 1887 batadv_dbg(BATADV_DBG_TT, bat_priv,
1729 "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n", 1888 "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
1730 tt_response->src, tt_response->ttvn, tt_response->tt_data, 1889 tt_response->src, tt_response->ttvn,
1731 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.')); 1890 ntohs(tt_response->tt_data),
1891 (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1732 1892
1733 /* we should have never asked a backbone gw */ 1893 /* we should have never asked a backbone gw */
1734 if (bla_is_backbone_gw_orig(bat_priv, tt_response->src)) 1894 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_response->src))
1735 goto out; 1895 goto out;
1736 1896
1737 orig_node = orig_hash_find(bat_priv, tt_response->src); 1897 orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
1738 if (!orig_node) 1898 if (!orig_node)
1739 goto out; 1899 goto out;
1740 1900
1741 if (tt_response->flags & TT_FULL_TABLE) 1901 if (tt_response->flags & BATADV_TT_FULL_TABLE) {
1742 tt_fill_gtable(bat_priv, tt_response); 1902 batadv_tt_fill_gtable(bat_priv, tt_response);
1743 else 1903 } else {
1744 tt_update_changes(bat_priv, orig_node, tt_response->tt_data, 1904 tt_change = (struct batadv_tt_change *)(tt_response + 1);
1745 tt_response->ttvn, 1905 batadv_tt_update_changes(bat_priv, orig_node,
1746 (struct tt_change *)(tt_response + 1)); 1906 ntohs(tt_response->tt_data),
1907 tt_response->ttvn, tt_change);
1908 }
1747 1909
1748 /* Delete the tt_req_node from pending tt_requests list */ 1910 /* Delete the tt_req_node from pending tt_requests list */
1749 spin_lock_bh(&bat_priv->tt_req_list_lock); 1911 spin_lock_bh(&bat_priv->tt_req_list_lock);
1750 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { 1912 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1751 if (!compare_eth(node->addr, tt_response->src)) 1913 if (!batadv_compare_eth(node->addr, tt_response->src))
1752 continue; 1914 continue;
1753 list_del(&node->list); 1915 list_del(&node->list);
1754 kfree(node); 1916 kfree(node);
@@ -1756,31 +1918,36 @@ void handle_tt_response(struct bat_priv *bat_priv,
1756 spin_unlock_bh(&bat_priv->tt_req_list_lock); 1918 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1757 1919
1758 /* Recalculate the CRC for this orig_node and store it */ 1920 /* Recalculate the CRC for this orig_node and store it */
1759 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node); 1921 orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
1760 /* Roaming phase is over: tables are in sync again. I can 1922 /* Roaming phase is over: tables are in sync again. I can
1761 * unset the flag */ 1923 * unset the flag
1924 */
1762 orig_node->tt_poss_change = false; 1925 orig_node->tt_poss_change = false;
1763out: 1926out:
1764 if (orig_node) 1927 if (orig_node)
1765 orig_node_free_ref(orig_node); 1928 batadv_orig_node_free_ref(orig_node);
1766} 1929}
1767 1930
1768int tt_init(struct bat_priv *bat_priv) 1931int batadv_tt_init(struct batadv_priv *bat_priv)
1769{ 1932{
1770 if (!tt_local_init(bat_priv)) 1933 int ret;
1771 return 0;
1772 1934
1773 if (!tt_global_init(bat_priv)) 1935 ret = batadv_tt_local_init(bat_priv);
1774 return 0; 1936 if (ret < 0)
1937 return ret;
1775 1938
1776 tt_start_timer(bat_priv); 1939 ret = batadv_tt_global_init(bat_priv);
1940 if (ret < 0)
1941 return ret;
1942
1943 batadv_tt_start_timer(bat_priv);
1777 1944
1778 return 1; 1945 return 1;
1779} 1946}
1780 1947
1781static void tt_roam_list_free(struct bat_priv *bat_priv) 1948static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
1782{ 1949{
1783 struct tt_roam_node *node, *safe; 1950 struct batadv_tt_roam_node *node, *safe;
1784 1951
1785 spin_lock_bh(&bat_priv->tt_roam_list_lock); 1952 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1786 1953
@@ -1792,13 +1959,14 @@ static void tt_roam_list_free(struct bat_priv *bat_priv)
1792 spin_unlock_bh(&bat_priv->tt_roam_list_lock); 1959 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1793} 1960}
1794 1961
1795static void tt_roam_purge(struct bat_priv *bat_priv) 1962static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
1796{ 1963{
1797 struct tt_roam_node *node, *safe; 1964 struct batadv_tt_roam_node *node, *safe;
1798 1965
1799 spin_lock_bh(&bat_priv->tt_roam_list_lock); 1966 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1800 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) { 1967 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1801 if (!has_timed_out(node->first_time, ROAMING_MAX_TIME)) 1968 if (!batadv_has_timed_out(node->first_time,
1969 BATADV_ROAMING_MAX_TIME))
1802 continue; 1970 continue;
1803 1971
1804 list_del(&node->list); 1972 list_del(&node->list);
@@ -1811,24 +1979,27 @@ static void tt_roam_purge(struct bat_priv *bat_priv)
1811 * maximum number of possible roaming phases. In this case the ROAMING_ADV 1979 * maximum number of possible roaming phases. In this case the ROAMING_ADV
1812 * will not be sent. 1980 * will not be sent.
1813 * 1981 *
1814 * returns true if the ROAMING_ADV can be sent, false otherwise */ 1982 * returns true if the ROAMING_ADV can be sent, false otherwise
1815static bool tt_check_roam_count(struct bat_priv *bat_priv, 1983 */
1816 uint8_t *client) 1984static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
1985 uint8_t *client)
1817{ 1986{
1818 struct tt_roam_node *tt_roam_node; 1987 struct batadv_tt_roam_node *tt_roam_node;
1819 bool ret = false; 1988 bool ret = false;
1820 1989
1821 spin_lock_bh(&bat_priv->tt_roam_list_lock); 1990 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1822 /* The new tt_req will be issued only if I'm not waiting for a 1991 /* The new tt_req will be issued only if I'm not waiting for a
1823 * reply from the same orig_node yet */ 1992 * reply from the same orig_node yet
1993 */
1824 list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) { 1994 list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
1825 if (!compare_eth(tt_roam_node->addr, client)) 1995 if (!batadv_compare_eth(tt_roam_node->addr, client))
1826 continue; 1996 continue;
1827 1997
1828 if (has_timed_out(tt_roam_node->first_time, ROAMING_MAX_TIME)) 1998 if (batadv_has_timed_out(tt_roam_node->first_time,
1999 BATADV_ROAMING_MAX_TIME))
1829 continue; 2000 continue;
1830 2001
1831 if (!atomic_dec_not_zero(&tt_roam_node->counter)) 2002 if (!batadv_atomic_dec_not_zero(&tt_roam_node->counter))
1832 /* Sorry, you roamed too many times! */ 2003 /* Sorry, you roamed too many times! */
1833 goto unlock; 2004 goto unlock;
1834 ret = true; 2005 ret = true;
@@ -1841,7 +2012,8 @@ static bool tt_check_roam_count(struct bat_priv *bat_priv,
1841 goto unlock; 2012 goto unlock;
1842 2013
1843 tt_roam_node->first_time = jiffies; 2014 tt_roam_node->first_time = jiffies;
1844 atomic_set(&tt_roam_node->counter, ROAMING_MAX_COUNT - 1); 2015 atomic_set(&tt_roam_node->counter,
2016 BATADV_ROAMING_MAX_COUNT - 1);
1845 memcpy(tt_roam_node->addr, client, ETH_ALEN); 2017 memcpy(tt_roam_node->addr, client, ETH_ALEN);
1846 2018
1847 list_add(&tt_roam_node->list, &bat_priv->tt_roam_list); 2019 list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
@@ -1853,97 +2025,103 @@ unlock:
1853 return ret; 2025 return ret;
1854} 2026}
1855 2027
1856static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, 2028static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
1857 struct orig_node *orig_node) 2029 struct batadv_orig_node *orig_node)
1858{ 2030{
1859 struct neigh_node *neigh_node = NULL; 2031 struct batadv_neigh_node *neigh_node = NULL;
1860 struct sk_buff *skb = NULL; 2032 struct sk_buff *skb = NULL;
1861 struct roam_adv_packet *roam_adv_packet; 2033 struct batadv_roam_adv_packet *roam_adv_packet;
1862 int ret = 1; 2034 int ret = 1;
1863 struct hard_iface *primary_if; 2035 struct batadv_hard_iface *primary_if;
2036 size_t len = sizeof(*roam_adv_packet);
1864 2037
1865 /* before going on we have to check whether the client has 2038 /* before going on we have to check whether the client has
1866 * already roamed to us too many times */ 2039 * already roamed to us too many times
1867 if (!tt_check_roam_count(bat_priv, client)) 2040 */
2041 if (!batadv_tt_check_roam_count(bat_priv, client))
1868 goto out; 2042 goto out;
1869 2043
1870 skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN); 2044 skb = dev_alloc_skb(sizeof(*roam_adv_packet) + ETH_HLEN);
1871 if (!skb) 2045 if (!skb)
1872 goto out; 2046 goto out;
1873 2047
1874 skb_reserve(skb, ETH_HLEN); 2048 skb_reserve(skb, ETH_HLEN);
1875 2049
1876 roam_adv_packet = (struct roam_adv_packet *)skb_put(skb, 2050 roam_adv_packet = (struct batadv_roam_adv_packet *)skb_put(skb, len);
1877 sizeof(struct roam_adv_packet));
1878 2051
1879 roam_adv_packet->header.packet_type = BAT_ROAM_ADV; 2052 roam_adv_packet->header.packet_type = BATADV_ROAM_ADV;
1880 roam_adv_packet->header.version = COMPAT_VERSION; 2053 roam_adv_packet->header.version = BATADV_COMPAT_VERSION;
1881 roam_adv_packet->header.ttl = TTL; 2054 roam_adv_packet->header.ttl = BATADV_TTL;
1882 primary_if = primary_if_get_selected(bat_priv); 2055 roam_adv_packet->reserved = 0;
2056 primary_if = batadv_primary_if_get_selected(bat_priv);
1883 if (!primary_if) 2057 if (!primary_if)
1884 goto out; 2058 goto out;
1885 memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN); 2059 memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1886 hardif_free_ref(primary_if); 2060 batadv_hardif_free_ref(primary_if);
1887 memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN); 2061 memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
1888 memcpy(roam_adv_packet->client, client, ETH_ALEN); 2062 memcpy(roam_adv_packet->client, client, ETH_ALEN);
1889 2063
1890 neigh_node = orig_node_get_router(orig_node); 2064 neigh_node = batadv_orig_node_get_router(orig_node);
1891 if (!neigh_node) 2065 if (!neigh_node)
1892 goto out; 2066 goto out;
1893 2067
1894 bat_dbg(DBG_TT, bat_priv, 2068 batadv_dbg(BATADV_DBG_TT, bat_priv,
1895 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n", 2069 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
1896 orig_node->orig, client, neigh_node->addr); 2070 orig_node->orig, client, neigh_node->addr);
2071
2072 batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX);
1897 2073
1898 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 2074 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1899 ret = 0; 2075 ret = 0;
1900 2076
1901out: 2077out:
1902 if (neigh_node) 2078 if (neigh_node)
1903 neigh_node_free_ref(neigh_node); 2079 batadv_neigh_node_free_ref(neigh_node);
1904 if (ret) 2080 if (ret)
1905 kfree_skb(skb); 2081 kfree_skb(skb);
1906 return; 2082 return;
1907} 2083}
1908 2084
1909static void tt_purge(struct work_struct *work) 2085static void batadv_tt_purge(struct work_struct *work)
1910{ 2086{
1911 struct delayed_work *delayed_work = 2087 struct delayed_work *delayed_work;
1912 container_of(work, struct delayed_work, work); 2088 struct batadv_priv *bat_priv;
1913 struct bat_priv *bat_priv =
1914 container_of(delayed_work, struct bat_priv, tt_work);
1915 2089
1916 tt_local_purge(bat_priv); 2090 delayed_work = container_of(work, struct delayed_work, work);
1917 tt_global_roam_purge(bat_priv); 2091 bat_priv = container_of(delayed_work, struct batadv_priv, tt_work);
1918 tt_req_purge(bat_priv);
1919 tt_roam_purge(bat_priv);
1920 2092
1921 tt_start_timer(bat_priv); 2093 batadv_tt_local_purge(bat_priv);
2094 batadv_tt_global_roam_purge(bat_priv);
2095 batadv_tt_req_purge(bat_priv);
2096 batadv_tt_roam_purge(bat_priv);
2097
2098 batadv_tt_start_timer(bat_priv);
1922} 2099}
1923 2100
1924void tt_free(struct bat_priv *bat_priv) 2101void batadv_tt_free(struct batadv_priv *bat_priv)
1925{ 2102{
1926 cancel_delayed_work_sync(&bat_priv->tt_work); 2103 cancel_delayed_work_sync(&bat_priv->tt_work);
1927 2104
1928 tt_local_table_free(bat_priv); 2105 batadv_tt_local_table_free(bat_priv);
1929 tt_global_table_free(bat_priv); 2106 batadv_tt_global_table_free(bat_priv);
1930 tt_req_list_free(bat_priv); 2107 batadv_tt_req_list_free(bat_priv);
1931 tt_changes_list_free(bat_priv); 2108 batadv_tt_changes_list_free(bat_priv);
1932 tt_roam_list_free(bat_priv); 2109 batadv_tt_roam_list_free(bat_priv);
1933 2110
1934 kfree(bat_priv->tt_buff); 2111 kfree(bat_priv->tt_buff);
1935} 2112}
1936 2113
1937/* This function will enable or disable the specified flags for all the entries 2114/* This function will enable or disable the specified flags for all the entries
1938 * in the given hash table and returns the number of modified entries */ 2115 * in the given hash table and returns the number of modified entries
1939static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags, 2116 */
1940 bool enable) 2117static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
2118 uint16_t flags, bool enable)
1941{ 2119{
1942 uint32_t i; 2120 uint32_t i;
1943 uint16_t changed_num = 0; 2121 uint16_t changed_num = 0;
1944 struct hlist_head *head; 2122 struct hlist_head *head;
1945 struct hlist_node *node; 2123 struct hlist_node *node;
1946 struct tt_common_entry *tt_common_entry; 2124 struct batadv_tt_common_entry *tt_common_entry;
1947 2125
1948 if (!hash) 2126 if (!hash)
1949 goto out; 2127 goto out;
@@ -1971,12 +2149,12 @@ out:
1971 return changed_num; 2149 return changed_num;
1972} 2150}
1973 2151
1974/* Purge out all the tt local entries marked with TT_CLIENT_PENDING */ 2152/* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
1975static void tt_local_purge_pending_clients(struct bat_priv *bat_priv) 2153static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
1976{ 2154{
1977 struct hashtable_t *hash = bat_priv->tt_local_hash; 2155 struct batadv_hashtable *hash = bat_priv->tt_local_hash;
1978 struct tt_common_entry *tt_common_entry; 2156 struct batadv_tt_common_entry *tt_common;
1979 struct tt_local_entry *tt_local_entry; 2157 struct batadv_tt_local_entry *tt_local;
1980 struct hlist_node *node, *node_tmp; 2158 struct hlist_node *node, *node_tmp;
1981 struct hlist_head *head; 2159 struct hlist_head *head;
1982 spinlock_t *list_lock; /* protects write access to the hash lists */ 2160 spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -1990,103 +2168,149 @@ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
1990 list_lock = &hash->list_locks[i]; 2168 list_lock = &hash->list_locks[i];
1991 2169
1992 spin_lock_bh(list_lock); 2170 spin_lock_bh(list_lock);
1993 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, 2171 hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
1994 head, hash_entry) { 2172 hash_entry) {
1995 if (!(tt_common_entry->flags & TT_CLIENT_PENDING)) 2173 if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING))
1996 continue; 2174 continue;
1997 2175
1998 bat_dbg(DBG_TT, bat_priv, 2176 batadv_dbg(BATADV_DBG_TT, bat_priv,
1999 "Deleting local tt entry (%pM): pending\n", 2177 "Deleting local tt entry (%pM): pending\n",
2000 tt_common_entry->addr); 2178 tt_common->addr);
2001 2179
2002 atomic_dec(&bat_priv->num_local_tt); 2180 atomic_dec(&bat_priv->num_local_tt);
2003 hlist_del_rcu(node); 2181 hlist_del_rcu(node);
2004 tt_local_entry = container_of(tt_common_entry, 2182 tt_local = container_of(tt_common,
2005 struct tt_local_entry, 2183 struct batadv_tt_local_entry,
2006 common); 2184 common);
2007 tt_local_entry_free_ref(tt_local_entry); 2185 batadv_tt_local_entry_free_ref(tt_local);
2008 } 2186 }
2009 spin_unlock_bh(list_lock); 2187 spin_unlock_bh(list_lock);
2010 } 2188 }
2011 2189
2012} 2190}
2013 2191
2014void tt_commit_changes(struct bat_priv *bat_priv) 2192static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
2193 unsigned char **packet_buff,
2194 int *packet_buff_len, int packet_min_len)
2015{ 2195{
2016 uint16_t changed_num = tt_set_flags(bat_priv->tt_local_hash, 2196 uint16_t changed_num = 0;
2017 TT_CLIENT_NEW, false); 2197
2018 /* all the reset entries have now to be effectively counted as local 2198 if (atomic_read(&bat_priv->tt_local_changes) < 1)
2019 * entries */ 2199 return -ENOENT;
2200
2201 changed_num = batadv_tt_set_flags(bat_priv->tt_local_hash,
2202 BATADV_TT_CLIENT_NEW, false);
2203
2204 /* all reset entries have to be counted as local entries */
2020 atomic_add(changed_num, &bat_priv->num_local_tt); 2205 atomic_add(changed_num, &bat_priv->num_local_tt);
2021 tt_local_purge_pending_clients(bat_priv); 2206 batadv_tt_local_purge_pending_clients(bat_priv);
2207 bat_priv->tt_crc = batadv_tt_local_crc(bat_priv);
2022 2208
2023 /* Increment the TTVN only once per OGM interval */ 2209 /* Increment the TTVN only once per OGM interval */
2024 atomic_inc(&bat_priv->ttvn); 2210 atomic_inc(&bat_priv->ttvn);
2025 bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n", 2211 batadv_dbg(BATADV_DBG_TT, bat_priv,
2026 (uint8_t)atomic_read(&bat_priv->ttvn)); 2212 "Local changes committed, updating to ttvn %u\n",
2213 (uint8_t)atomic_read(&bat_priv->ttvn));
2027 bat_priv->tt_poss_change = false; 2214 bat_priv->tt_poss_change = false;
2215
2216 /* reset the sending counter */
2217 atomic_set(&bat_priv->tt_ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
2218
2219 return batadv_tt_changes_fill_buff(bat_priv, packet_buff,
2220 packet_buff_len, packet_min_len);
2221}
2222
2223/* when calling this function (hard_iface == primary_if) has to be true */
2224int batadv_tt_append_diff(struct batadv_priv *bat_priv,
2225 unsigned char **packet_buff, int *packet_buff_len,
2226 int packet_min_len)
2227{
2228 int tt_num_changes;
2229
2230 /* if at least one change happened */
2231 tt_num_changes = batadv_tt_commit_changes(bat_priv, packet_buff,
2232 packet_buff_len,
2233 packet_min_len);
2234
2235 /* if the changes have been sent often enough */
2236 if ((tt_num_changes < 0) &&
2237 (!batadv_atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) {
2238 batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
2239 packet_min_len, packet_min_len);
2240 tt_num_changes = 0;
2241 }
2242
2243 return tt_num_changes;
2028} 2244}
2029 2245
2030bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst) 2246bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
2247 uint8_t *dst)
2031{ 2248{
2032 struct tt_local_entry *tt_local_entry = NULL; 2249 struct batadv_tt_local_entry *tt_local_entry = NULL;
2033 struct tt_global_entry *tt_global_entry = NULL; 2250 struct batadv_tt_global_entry *tt_global_entry = NULL;
2034 bool ret = false; 2251 bool ret = false;
2035 2252
2036 if (!atomic_read(&bat_priv->ap_isolation)) 2253 if (!atomic_read(&bat_priv->ap_isolation))
2037 goto out; 2254 goto out;
2038 2255
2039 tt_local_entry = tt_local_hash_find(bat_priv, dst); 2256 tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst);
2040 if (!tt_local_entry) 2257 if (!tt_local_entry)
2041 goto out; 2258 goto out;
2042 2259
2043 tt_global_entry = tt_global_hash_find(bat_priv, src); 2260 tt_global_entry = batadv_tt_global_hash_find(bat_priv, src);
2044 if (!tt_global_entry) 2261 if (!tt_global_entry)
2045 goto out; 2262 goto out;
2046 2263
2047 if (!_is_ap_isolated(tt_local_entry, tt_global_entry)) 2264 if (!_batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
2048 goto out; 2265 goto out;
2049 2266
2050 ret = true; 2267 ret = true;
2051 2268
2052out: 2269out:
2053 if (tt_global_entry) 2270 if (tt_global_entry)
2054 tt_global_entry_free_ref(tt_global_entry); 2271 batadv_tt_global_entry_free_ref(tt_global_entry);
2055 if (tt_local_entry) 2272 if (tt_local_entry)
2056 tt_local_entry_free_ref(tt_local_entry); 2273 batadv_tt_local_entry_free_ref(tt_local_entry);
2057 return ret; 2274 return ret;
2058} 2275}
2059 2276
2060void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, 2277void batadv_tt_update_orig(struct batadv_priv *bat_priv,
2061 const unsigned char *tt_buff, uint8_t tt_num_changes, 2278 struct batadv_orig_node *orig_node,
2062 uint8_t ttvn, uint16_t tt_crc) 2279 const unsigned char *tt_buff, uint8_t tt_num_changes,
2280 uint8_t ttvn, uint16_t tt_crc)
2063{ 2281{
2064 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); 2282 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
2065 bool full_table = true; 2283 bool full_table = true;
2284 struct batadv_tt_change *tt_change;
2066 2285
2067 /* don't care about a backbone gateways updates. */ 2286 /* don't care about a backbone gateways updates. */
2068 if (bla_is_backbone_gw_orig(bat_priv, orig_node->orig)) 2287 if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
2069 return; 2288 return;
2070 2289
2071 /* orig table not initialised AND first diff is in the OGM OR the ttvn 2290 /* orig table not initialised AND first diff is in the OGM OR the ttvn
2072 * increased by one -> we can apply the attached changes */ 2291 * increased by one -> we can apply the attached changes
2292 */
2073 if ((!orig_node->tt_initialised && ttvn == 1) || 2293 if ((!orig_node->tt_initialised && ttvn == 1) ||
2074 ttvn - orig_ttvn == 1) { 2294 ttvn - orig_ttvn == 1) {
2075 /* the OGM could not contain the changes due to their size or 2295 /* the OGM could not contain the changes due to their size or
2076 * because they have already been sent TT_OGM_APPEND_MAX times. 2296 * because they have already been sent BATADV_TT_OGM_APPEND_MAX
2077 * In this case send a tt request */ 2297 * times.
2298 * In this case send a tt request
2299 */
2078 if (!tt_num_changes) { 2300 if (!tt_num_changes) {
2079 full_table = false; 2301 full_table = false;
2080 goto request_table; 2302 goto request_table;
2081 } 2303 }
2082 2304
2083 tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn, 2305 tt_change = (struct batadv_tt_change *)tt_buff;
2084 (struct tt_change *)tt_buff); 2306 batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes,
2307 ttvn, tt_change);
2085 2308
2086 /* Even if we received the precomputed crc with the OGM, we 2309 /* Even if we received the precomputed crc with the OGM, we
2087 * prefer to recompute it to spot any possible inconsistency 2310 * prefer to recompute it to spot any possible inconsistency
2088 * in the global table */ 2311 * in the global table
2089 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node); 2312 */
2313 orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
2090 2314
2091 /* The ttvn alone is not enough to guarantee consistency 2315 /* The ttvn alone is not enough to guarantee consistency
2092 * because a single value could represent different states 2316 * because a single value could represent different states
@@ -2095,26 +2319,28 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
2095 * consistent or not. E.g. a node could disconnect while its 2319 * consistent or not. E.g. a node could disconnect while its
2096 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case 2320 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
2097 * checking the CRC value is mandatory to detect the 2321 * checking the CRC value is mandatory to detect the
2098 * inconsistency */ 2322 * inconsistency
2323 */
2099 if (orig_node->tt_crc != tt_crc) 2324 if (orig_node->tt_crc != tt_crc)
2100 goto request_table; 2325 goto request_table;
2101 2326
2102 /* Roaming phase is over: tables are in sync again. I can 2327 /* Roaming phase is over: tables are in sync again. I can
2103 * unset the flag */ 2328 * unset the flag
2329 */
2104 orig_node->tt_poss_change = false; 2330 orig_node->tt_poss_change = false;
2105 } else { 2331 } else {
2106 /* if we missed more than one change or our tables are not 2332 /* if we missed more than one change or our tables are not
2107 * in sync anymore -> request fresh tt data */ 2333 * in sync anymore -> request fresh tt data
2108 2334 */
2109 if (!orig_node->tt_initialised || ttvn != orig_ttvn || 2335 if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
2110 orig_node->tt_crc != tt_crc) { 2336 orig_node->tt_crc != tt_crc) {
2111request_table: 2337request_table:
2112 bat_dbg(DBG_TT, bat_priv, 2338 batadv_dbg(BATADV_DBG_TT, bat_priv,
2113 "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n", 2339 "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
2114 orig_node->orig, ttvn, orig_ttvn, tt_crc, 2340 orig_node->orig, ttvn, orig_ttvn, tt_crc,
2115 orig_node->tt_crc, tt_num_changes); 2341 orig_node->tt_crc, tt_num_changes);
2116 send_tt_request(bat_priv, orig_node, ttvn, tt_crc, 2342 batadv_send_tt_request(bat_priv, orig_node, ttvn,
2117 full_table); 2343 tt_crc, full_table);
2118 return; 2344 return;
2119 } 2345 }
2120 } 2346 }
@@ -2124,17 +2350,18 @@ request_table:
2124 * originator to another one. This entry is kept is still kept for consistency 2350 * originator to another one. This entry is kept is still kept for consistency
2125 * purposes 2351 * purposes
2126 */ 2352 */
2127bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr) 2353bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
2354 uint8_t *addr)
2128{ 2355{
2129 struct tt_global_entry *tt_global_entry; 2356 struct batadv_tt_global_entry *tt_global_entry;
2130 bool ret = false; 2357 bool ret = false;
2131 2358
2132 tt_global_entry = tt_global_hash_find(bat_priv, addr); 2359 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
2133 if (!tt_global_entry) 2360 if (!tt_global_entry)
2134 goto out; 2361 goto out;
2135 2362
2136 ret = tt_global_entry->common.flags & TT_CLIENT_ROAM; 2363 ret = tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM;
2137 tt_global_entry_free_ref(tt_global_entry); 2364 batadv_tt_global_entry_free_ref(tt_global_entry);
2138out: 2365out:
2139 return ret; 2366 return ret;
2140} 2367}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index c43374dc364..ffa87355096 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich, Antonio Quartulli 3 * Marek Lindner, Simon Wunderlich, Antonio Quartulli
5 * 4 *
@@ -16,44 +15,50 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ 20#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
23#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ 21#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
24 22
25int tt_len(int changes_num); 23int batadv_tt_len(int changes_num);
26int tt_changes_fill_buffer(struct bat_priv *bat_priv, 24int batadv_tt_init(struct batadv_priv *bat_priv);
27 unsigned char *buff, int buff_len); 25void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
28int tt_init(struct bat_priv *bat_priv); 26 int ifindex);
29void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, 27void batadv_tt_local_remove(struct batadv_priv *bat_priv,
30 int ifindex); 28 const uint8_t *addr, const char *message,
31void tt_local_remove(struct bat_priv *bat_priv, 29 bool roaming);
32 const uint8_t *addr, const char *message, bool roaming); 30int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset);
33int tt_local_seq_print_text(struct seq_file *seq, void *offset); 31void batadv_tt_global_add_orig(struct batadv_priv *bat_priv,
34void tt_global_add_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, 32 struct batadv_orig_node *orig_node,
35 const unsigned char *tt_buff, int tt_buff_len); 33 const unsigned char *tt_buff, int tt_buff_len);
36int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, 34int batadv_tt_global_add(struct batadv_priv *bat_priv,
37 const unsigned char *addr, uint8_t ttvn, bool roaming, 35 struct batadv_orig_node *orig_node,
38 bool wifi); 36 const unsigned char *addr, uint8_t flags,
39int tt_global_seq_print_text(struct seq_file *seq, void *offset); 37 uint8_t ttvn);
40void tt_global_del_orig(struct bat_priv *bat_priv, 38int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset);
41 struct orig_node *orig_node, const char *message); 39void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
42struct orig_node *transtable_search(struct bat_priv *bat_priv, 40 struct batadv_orig_node *orig_node,
43 const uint8_t *src, const uint8_t *addr); 41 const char *message);
44uint16_t tt_local_crc(struct bat_priv *bat_priv); 42struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
45void tt_free(struct bat_priv *bat_priv); 43 const uint8_t *src,
46bool send_tt_response(struct bat_priv *bat_priv, 44 const uint8_t *addr);
47 struct tt_query_packet *tt_request); 45void batadv_tt_free(struct batadv_priv *bat_priv);
48bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr); 46bool batadv_send_tt_response(struct batadv_priv *bat_priv,
49void handle_tt_response(struct bat_priv *bat_priv, 47 struct batadv_tt_query_packet *tt_request);
50 struct tt_query_packet *tt_response); 48bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr);
51void tt_commit_changes(struct bat_priv *bat_priv); 49void batadv_handle_tt_response(struct batadv_priv *bat_priv,
52bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst); 50 struct batadv_tt_query_packet *tt_response);
53void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, 51bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
54 const unsigned char *tt_buff, uint8_t tt_num_changes, 52 uint8_t *dst);
55 uint8_t ttvn, uint16_t tt_crc); 53void batadv_tt_update_orig(struct batadv_priv *bat_priv,
56bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr); 54 struct batadv_orig_node *orig_node,
55 const unsigned char *tt_buff, uint8_t tt_num_changes,
56 uint8_t ttvn, uint16_t tt_crc);
57int batadv_tt_append_diff(struct batadv_priv *bat_priv,
58 unsigned char **packet_buff, int *packet_buff_len,
59 int packet_min_len);
60bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
61 uint8_t *addr);
57 62
58 63
59#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ 64#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 61308e8016f..12635fd2c3d 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,24 +15,20 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22
23
24#ifndef _NET_BATMAN_ADV_TYPES_H_ 20#ifndef _NET_BATMAN_ADV_TYPES_H_
25#define _NET_BATMAN_ADV_TYPES_H_ 21#define _NET_BATMAN_ADV_TYPES_H_
26 22
27#include "packet.h" 23#include "packet.h"
28#include "bitarray.h" 24#include "bitarray.h"
25#include <linux/kernel.h>
29 26
30#define BAT_HEADER_LEN (ETH_HLEN + \ 27#define BATADV_HEADER_LEN \
31 ((sizeof(struct unicast_packet) > sizeof(struct bcast_packet) ? \ 28 (ETH_HLEN + max(sizeof(struct batadv_unicast_packet), \
32 sizeof(struct unicast_packet) : \ 29 sizeof(struct batadv_bcast_packet)))
33 sizeof(struct bcast_packet))))
34
35 30
36struct hard_iface { 31struct batadv_hard_iface {
37 struct list_head list; 32 struct list_head list;
38 int16_t if_num; 33 int16_t if_num;
39 char if_status; 34 char if_status;
@@ -50,7 +45,7 @@ struct hard_iface {
50}; 45};
51 46
52/** 47/**
53 * orig_node - structure for orig_list maintaining nodes of mesh 48 * struct batadv_orig_node - structure for orig_list maintaining nodes of mesh
54 * @primary_addr: hosts primary interface address 49 * @primary_addr: hosts primary interface address
55 * @last_seen: when last packet from this node was received 50 * @last_seen: when last packet from this node was received
56 * @bcast_seqno_reset: time when the broadcast seqno window was reset 51 * @bcast_seqno_reset: time when the broadcast seqno window was reset
@@ -64,10 +59,10 @@ struct hard_iface {
64 * @candidates: how many candidates are available 59 * @candidates: how many candidates are available
65 * @selected: next bonding candidate 60 * @selected: next bonding candidate
66 */ 61 */
67struct orig_node { 62struct batadv_orig_node {
68 uint8_t orig[ETH_ALEN]; 63 uint8_t orig[ETH_ALEN];
69 uint8_t primary_addr[ETH_ALEN]; 64 uint8_t primary_addr[ETH_ALEN];
70 struct neigh_node __rcu *router; /* rcu protected pointer */ 65 struct batadv_neigh_node __rcu *router; /* rcu protected pointer */
71 unsigned long *bcast_own; 66 unsigned long *bcast_own;
72 uint8_t *bcast_own_sum; 67 uint8_t *bcast_own_sum;
73 unsigned long last_seen; 68 unsigned long last_seen;
@@ -86,11 +81,12 @@ struct orig_node {
86 * If true, then I sent a Roaming_adv to this orig_node and I have to 81 * If true, then I sent a Roaming_adv to this orig_node and I have to
87 * inspect every packet directed to it to check whether it is still 82 * inspect every packet directed to it to check whether it is still
88 * the true destination or not. This flag will be reset to false as 83 * the true destination or not. This flag will be reset to false as
89 * soon as I receive a new TTVN from this orig_node */ 84 * soon as I receive a new TTVN from this orig_node
85 */
90 bool tt_poss_change; 86 bool tt_poss_change;
91 uint32_t last_real_seqno; 87 uint32_t last_real_seqno;
92 uint8_t last_ttl; 88 uint8_t last_ttl;
93 DECLARE_BITMAP(bcast_bits, TQ_LOCAL_WINDOW_SIZE); 89 DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
94 uint32_t last_bcast_seqno; 90 uint32_t last_bcast_seqno;
95 struct hlist_head neigh_list; 91 struct hlist_head neigh_list;
96 struct list_head frag_list; 92 struct list_head frag_list;
@@ -98,10 +94,11 @@ struct orig_node {
98 atomic_t refcount; 94 atomic_t refcount;
99 struct rcu_head rcu; 95 struct rcu_head rcu;
100 struct hlist_node hash_entry; 96 struct hlist_node hash_entry;
101 struct bat_priv *bat_priv; 97 struct batadv_priv *bat_priv;
102 unsigned long last_frag_packet; 98 unsigned long last_frag_packet;
103 /* ogm_cnt_lock protects: bcast_own, bcast_own_sum, 99 /* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
104 * neigh_node->real_bits, neigh_node->real_packet_count */ 100 * neigh_node->real_bits, neigh_node->real_packet_count
101 */
105 spinlock_t ogm_cnt_lock; 102 spinlock_t ogm_cnt_lock;
106 /* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */ 103 /* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */
107 spinlock_t bcast_seqno_lock; 104 spinlock_t bcast_seqno_lock;
@@ -110,47 +107,63 @@ struct orig_node {
110 struct list_head bond_list; 107 struct list_head bond_list;
111}; 108};
112 109
113struct gw_node { 110struct batadv_gw_node {
114 struct hlist_node list; 111 struct hlist_node list;
115 struct orig_node *orig_node; 112 struct batadv_orig_node *orig_node;
116 unsigned long deleted; 113 unsigned long deleted;
117 atomic_t refcount; 114 atomic_t refcount;
118 struct rcu_head rcu; 115 struct rcu_head rcu;
119}; 116};
120 117
121/** 118/* batadv_neigh_node
122 * neigh_node
123 * @last_seen: when last packet via this neighbor was received 119 * @last_seen: when last packet via this neighbor was received
124 */ 120 */
125struct neigh_node { 121struct batadv_neigh_node {
126 struct hlist_node list; 122 struct hlist_node list;
127 uint8_t addr[ETH_ALEN]; 123 uint8_t addr[ETH_ALEN];
128 uint8_t real_packet_count; 124 uint8_t real_packet_count;
129 uint8_t tq_recv[TQ_GLOBAL_WINDOW_SIZE]; 125 uint8_t tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
130 uint8_t tq_index; 126 uint8_t tq_index;
131 uint8_t tq_avg; 127 uint8_t tq_avg;
132 uint8_t last_ttl; 128 uint8_t last_ttl;
133 struct list_head bonding_list; 129 struct list_head bonding_list;
134 unsigned long last_seen; 130 unsigned long last_seen;
135 DECLARE_BITMAP(real_bits, TQ_LOCAL_WINDOW_SIZE); 131 DECLARE_BITMAP(real_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
136 atomic_t refcount; 132 atomic_t refcount;
137 struct rcu_head rcu; 133 struct rcu_head rcu;
138 struct orig_node *orig_node; 134 struct batadv_orig_node *orig_node;
139 struct hard_iface *if_incoming; 135 struct batadv_hard_iface *if_incoming;
140 spinlock_t lq_update_lock; /* protects: tq_recv, tq_index */ 136 spinlock_t lq_update_lock; /* protects: tq_recv, tq_index */
141}; 137};
142 138
143#ifdef CONFIG_BATMAN_ADV_BLA 139#ifdef CONFIG_BATMAN_ADV_BLA
144struct bcast_duplist_entry { 140struct batadv_bcast_duplist_entry {
145 uint8_t orig[ETH_ALEN]; 141 uint8_t orig[ETH_ALEN];
146 uint16_t crc; 142 uint16_t crc;
147 unsigned long entrytime; 143 unsigned long entrytime;
148}; 144};
149#endif 145#endif
150 146
151struct bat_priv { 147enum batadv_counters {
148 BATADV_CNT_FORWARD,
149 BATADV_CNT_FORWARD_BYTES,
150 BATADV_CNT_MGMT_TX,
151 BATADV_CNT_MGMT_TX_BYTES,
152 BATADV_CNT_MGMT_RX,
153 BATADV_CNT_MGMT_RX_BYTES,
154 BATADV_CNT_TT_REQUEST_TX,
155 BATADV_CNT_TT_REQUEST_RX,
156 BATADV_CNT_TT_RESPONSE_TX,
157 BATADV_CNT_TT_RESPONSE_RX,
158 BATADV_CNT_TT_ROAM_ADV_TX,
159 BATADV_CNT_TT_ROAM_ADV_RX,
160 BATADV_CNT_NUM,
161};
162
163struct batadv_priv {
152 atomic_t mesh_state; 164 atomic_t mesh_state;
153 struct net_device_stats stats; 165 struct net_device_stats stats;
166 uint64_t __percpu *bat_counters; /* Per cpu counters */
154 atomic_t aggregated_ogms; /* boolean */ 167 atomic_t aggregated_ogms; /* boolean */
155 atomic_t bonding; /* boolean */ 168 atomic_t bonding; /* boolean */
156 atomic_t fragmentation; /* boolean */ 169 atomic_t fragmentation; /* boolean */
@@ -174,10 +187,11 @@ struct bat_priv {
174 * If true, then I received a Roaming_adv and I have to inspect every 187 * If true, then I received a Roaming_adv and I have to inspect every
175 * packet directed to me to check whether I am still the true 188 * packet directed to me to check whether I am still the true
176 * destination or not. This flag will be reset to false as soon as I 189 * destination or not. This flag will be reset to false as soon as I
177 * increase my TTVN */ 190 * increase my TTVN
191 */
178 bool tt_poss_change; 192 bool tt_poss_change;
179 char num_ifaces; 193 char num_ifaces;
180 struct debug_log *debug_log; 194 struct batadv_debug_log *debug_log;
181 struct kobject *mesh_obj; 195 struct kobject *mesh_obj;
182 struct dentry *debug_dir; 196 struct dentry *debug_dir;
183 struct hlist_head forw_bat_list; 197 struct hlist_head forw_bat_list;
@@ -185,20 +199,20 @@ struct bat_priv {
185 struct hlist_head gw_list; 199 struct hlist_head gw_list;
186 struct list_head tt_changes_list; /* tracks changes in a OGM int */ 200 struct list_head tt_changes_list; /* tracks changes in a OGM int */
187 struct list_head vis_send_list; 201 struct list_head vis_send_list;
188 struct hashtable_t *orig_hash; 202 struct batadv_hashtable *orig_hash;
189 struct hashtable_t *tt_local_hash; 203 struct batadv_hashtable *tt_local_hash;
190 struct hashtable_t *tt_global_hash; 204 struct batadv_hashtable *tt_global_hash;
191#ifdef CONFIG_BATMAN_ADV_BLA 205#ifdef CONFIG_BATMAN_ADV_BLA
192 struct hashtable_t *claim_hash; 206 struct batadv_hashtable *claim_hash;
193 struct hashtable_t *backbone_hash; 207 struct batadv_hashtable *backbone_hash;
194#endif 208#endif
195 struct list_head tt_req_list; /* list of pending tt_requests */ 209 struct list_head tt_req_list; /* list of pending tt_requests */
196 struct list_head tt_roam_list; 210 struct list_head tt_roam_list;
197 struct hashtable_t *vis_hash; 211 struct batadv_hashtable *vis_hash;
198#ifdef CONFIG_BATMAN_ADV_BLA 212#ifdef CONFIG_BATMAN_ADV_BLA
199 struct bcast_duplist_entry bcast_duplist[DUPLIST_SIZE]; 213 struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
200 int bcast_duplist_curr; 214 int bcast_duplist_curr;
201 struct bla_claim_dst claim_dest; 215 struct batadv_bla_claim_dst claim_dest;
202#endif 216#endif
203 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ 217 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
204 spinlock_t forw_bcast_list_lock; /* protects */ 218 spinlock_t forw_bcast_list_lock; /* protects */
@@ -210,7 +224,7 @@ struct bat_priv {
210 spinlock_t vis_list_lock; /* protects vis_info::recv_list */ 224 spinlock_t vis_list_lock; /* protects vis_info::recv_list */
211 atomic_t num_local_tt; 225 atomic_t num_local_tt;
212 /* Checksum of the local table, recomputed before sending a new OGM */ 226 /* Checksum of the local table, recomputed before sending a new OGM */
213 atomic_t tt_crc; 227 uint16_t tt_crc;
214 unsigned char *tt_buff; 228 unsigned char *tt_buff;
215 int16_t tt_buff_len; 229 int16_t tt_buff_len;
216 spinlock_t tt_buff_lock; /* protects tt_buff */ 230 spinlock_t tt_buff_lock; /* protects tt_buff */
@@ -218,29 +232,29 @@ struct bat_priv {
218 struct delayed_work orig_work; 232 struct delayed_work orig_work;
219 struct delayed_work vis_work; 233 struct delayed_work vis_work;
220 struct delayed_work bla_work; 234 struct delayed_work bla_work;
221 struct gw_node __rcu *curr_gw; /* rcu protected pointer */ 235 struct batadv_gw_node __rcu *curr_gw; /* rcu protected pointer */
222 atomic_t gw_reselect; 236 atomic_t gw_reselect;
223 struct hard_iface __rcu *primary_if; /* rcu protected pointer */ 237 struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */
224 struct vis_info *my_vis_info; 238 struct batadv_vis_info *my_vis_info;
225 struct bat_algo_ops *bat_algo_ops; 239 struct batadv_algo_ops *bat_algo_ops;
226}; 240};
227 241
228struct socket_client { 242struct batadv_socket_client {
229 struct list_head queue_list; 243 struct list_head queue_list;
230 unsigned int queue_len; 244 unsigned int queue_len;
231 unsigned char index; 245 unsigned char index;
232 spinlock_t lock; /* protects queue_list, queue_len, index */ 246 spinlock_t lock; /* protects queue_list, queue_len, index */
233 wait_queue_head_t queue_wait; 247 wait_queue_head_t queue_wait;
234 struct bat_priv *bat_priv; 248 struct batadv_priv *bat_priv;
235}; 249};
236 250
237struct socket_packet { 251struct batadv_socket_packet {
238 struct list_head list; 252 struct list_head list;
239 size_t icmp_len; 253 size_t icmp_len;
240 struct icmp_packet_rr icmp_packet; 254 struct batadv_icmp_packet_rr icmp_packet;
241}; 255};
242 256
243struct tt_common_entry { 257struct batadv_tt_common_entry {
244 uint8_t addr[ETH_ALEN]; 258 uint8_t addr[ETH_ALEN];
245 struct hlist_node hash_entry; 259 struct hlist_node hash_entry;
246 uint16_t flags; 260 uint16_t flags;
@@ -248,31 +262,31 @@ struct tt_common_entry {
248 struct rcu_head rcu; 262 struct rcu_head rcu;
249}; 263};
250 264
251struct tt_local_entry { 265struct batadv_tt_local_entry {
252 struct tt_common_entry common; 266 struct batadv_tt_common_entry common;
253 unsigned long last_seen; 267 unsigned long last_seen;
254}; 268};
255 269
256struct tt_global_entry { 270struct batadv_tt_global_entry {
257 struct tt_common_entry common; 271 struct batadv_tt_common_entry common;
258 struct hlist_head orig_list; 272 struct hlist_head orig_list;
259 spinlock_t list_lock; /* protects the list */ 273 spinlock_t list_lock; /* protects the list */
260 unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */ 274 unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
261}; 275};
262 276
263struct tt_orig_list_entry { 277struct batadv_tt_orig_list_entry {
264 struct orig_node *orig_node; 278 struct batadv_orig_node *orig_node;
265 uint8_t ttvn; 279 uint8_t ttvn;
266 struct rcu_head rcu; 280 struct rcu_head rcu;
267 struct hlist_node list; 281 struct hlist_node list;
268}; 282};
269 283
270#ifdef CONFIG_BATMAN_ADV_BLA 284#ifdef CONFIG_BATMAN_ADV_BLA
271struct backbone_gw { 285struct batadv_backbone_gw {
272 uint8_t orig[ETH_ALEN]; 286 uint8_t orig[ETH_ALEN];
273 short vid; /* used VLAN ID */ 287 short vid; /* used VLAN ID */
274 struct hlist_node hash_entry; 288 struct hlist_node hash_entry;
275 struct bat_priv *bat_priv; 289 struct batadv_priv *bat_priv;
276 unsigned long lasttime; /* last time we heard of this backbone gw */ 290 unsigned long lasttime; /* last time we heard of this backbone gw */
277 atomic_t request_sent; 291 atomic_t request_sent;
278 atomic_t refcount; 292 atomic_t refcount;
@@ -280,10 +294,10 @@ struct backbone_gw {
280 uint16_t crc; /* crc checksum over all claims */ 294 uint16_t crc; /* crc checksum over all claims */
281}; 295};
282 296
283struct claim { 297struct batadv_claim {
284 uint8_t addr[ETH_ALEN]; 298 uint8_t addr[ETH_ALEN];
285 short vid; 299 short vid;
286 struct backbone_gw *backbone_gw; 300 struct batadv_backbone_gw *backbone_gw;
287 unsigned long lasttime; /* last time we heard of claim (locals only) */ 301 unsigned long lasttime; /* last time we heard of claim (locals only) */
288 struct rcu_head rcu; 302 struct rcu_head rcu;
289 atomic_t refcount; 303 atomic_t refcount;
@@ -291,29 +305,28 @@ struct claim {
291}; 305};
292#endif 306#endif
293 307
294struct tt_change_node { 308struct batadv_tt_change_node {
295 struct list_head list; 309 struct list_head list;
296 struct tt_change change; 310 struct batadv_tt_change change;
297}; 311};
298 312
299struct tt_req_node { 313struct batadv_tt_req_node {
300 uint8_t addr[ETH_ALEN]; 314 uint8_t addr[ETH_ALEN];
301 unsigned long issued_at; 315 unsigned long issued_at;
302 struct list_head list; 316 struct list_head list;
303}; 317};
304 318
305struct tt_roam_node { 319struct batadv_tt_roam_node {
306 uint8_t addr[ETH_ALEN]; 320 uint8_t addr[ETH_ALEN];
307 atomic_t counter; 321 atomic_t counter;
308 unsigned long first_time; 322 unsigned long first_time;
309 struct list_head list; 323 struct list_head list;
310}; 324};
311 325
312/** 326/* forw_packet - structure for forw_list maintaining packets to be
313 * forw_packet - structure for forw_list maintaining packets to be
314 * send/forwarded 327 * send/forwarded
315 */ 328 */
316struct forw_packet { 329struct batadv_forw_packet {
317 struct hlist_node list; 330 struct hlist_node list;
318 unsigned long send_time; 331 unsigned long send_time;
319 uint8_t own; 332 uint8_t own;
@@ -322,76 +335,76 @@ struct forw_packet {
322 uint32_t direct_link_flags; 335 uint32_t direct_link_flags;
323 uint8_t num_packets; 336 uint8_t num_packets;
324 struct delayed_work delayed_work; 337 struct delayed_work delayed_work;
325 struct hard_iface *if_incoming; 338 struct batadv_hard_iface *if_incoming;
326}; 339};
327 340
328/* While scanning for vis-entries of a particular vis-originator 341/* While scanning for vis-entries of a particular vis-originator
329 * this list collects its interfaces to create a subgraph/cluster 342 * this list collects its interfaces to create a subgraph/cluster
330 * out of them later 343 * out of them later
331 */ 344 */
332struct if_list_entry { 345struct batadv_if_list_entry {
333 uint8_t addr[ETH_ALEN]; 346 uint8_t addr[ETH_ALEN];
334 bool primary; 347 bool primary;
335 struct hlist_node list; 348 struct hlist_node list;
336}; 349};
337 350
338struct debug_log { 351struct batadv_debug_log {
339 char log_buff[LOG_BUF_LEN]; 352 char log_buff[BATADV_LOG_BUF_LEN];
340 unsigned long log_start; 353 unsigned long log_start;
341 unsigned long log_end; 354 unsigned long log_end;
342 spinlock_t lock; /* protects log_buff, log_start and log_end */ 355 spinlock_t lock; /* protects log_buff, log_start and log_end */
343 wait_queue_head_t queue_wait; 356 wait_queue_head_t queue_wait;
344}; 357};
345 358
346struct frag_packet_list_entry { 359struct batadv_frag_packet_list_entry {
347 struct list_head list; 360 struct list_head list;
348 uint16_t seqno; 361 uint16_t seqno;
349 struct sk_buff *skb; 362 struct sk_buff *skb;
350}; 363};
351 364
352struct vis_info { 365struct batadv_vis_info {
353 unsigned long first_seen; 366 unsigned long first_seen;
354 /* list of server-neighbors we received a vis-packet 367 /* list of server-neighbors we received a vis-packet
355 * from. we should not reply to them. */ 368 * from. we should not reply to them.
369 */
356 struct list_head recv_list; 370 struct list_head recv_list;
357 struct list_head send_list; 371 struct list_head send_list;
358 struct kref refcount; 372 struct kref refcount;
359 struct hlist_node hash_entry; 373 struct hlist_node hash_entry;
360 struct bat_priv *bat_priv; 374 struct batadv_priv *bat_priv;
361 /* this packet might be part of the vis send queue. */ 375 /* this packet might be part of the vis send queue. */
362 struct sk_buff *skb_packet; 376 struct sk_buff *skb_packet;
363 /* vis_info may follow here*/ 377 /* vis_info may follow here */
364} __packed; 378} __packed;
365 379
366struct vis_info_entry { 380struct batadv_vis_info_entry {
367 uint8_t src[ETH_ALEN]; 381 uint8_t src[ETH_ALEN];
368 uint8_t dest[ETH_ALEN]; 382 uint8_t dest[ETH_ALEN];
369 uint8_t quality; /* quality = 0 client */ 383 uint8_t quality; /* quality = 0 client */
370} __packed; 384} __packed;
371 385
372struct recvlist_node { 386struct batadv_recvlist_node {
373 struct list_head list; 387 struct list_head list;
374 uint8_t mac[ETH_ALEN]; 388 uint8_t mac[ETH_ALEN];
375}; 389};
376 390
377struct bat_algo_ops { 391struct batadv_algo_ops {
378 struct hlist_node list; 392 struct hlist_node list;
379 char *name; 393 char *name;
380 /* init routing info when hard-interface is enabled */ 394 /* init routing info when hard-interface is enabled */
381 int (*bat_iface_enable)(struct hard_iface *hard_iface); 395 int (*bat_iface_enable)(struct batadv_hard_iface *hard_iface);
382 /* de-init routing info when hard-interface is disabled */ 396 /* de-init routing info when hard-interface is disabled */
383 void (*bat_iface_disable)(struct hard_iface *hard_iface); 397 void (*bat_iface_disable)(struct batadv_hard_iface *hard_iface);
384 /* (re-)init mac addresses of the protocol information 398 /* (re-)init mac addresses of the protocol information
385 * belonging to this hard-interface 399 * belonging to this hard-interface
386 */ 400 */
387 void (*bat_iface_update_mac)(struct hard_iface *hard_iface); 401 void (*bat_iface_update_mac)(struct batadv_hard_iface *hard_iface);
388 /* called when primary interface is selected / changed */ 402 /* called when primary interface is selected / changed */
389 void (*bat_primary_iface_set)(struct hard_iface *hard_iface); 403 void (*bat_primary_iface_set)(struct batadv_hard_iface *hard_iface);
390 /* prepare a new outgoing OGM for the send queue */ 404 /* prepare a new outgoing OGM for the send queue */
391 void (*bat_ogm_schedule)(struct hard_iface *hard_iface, 405 void (*bat_ogm_schedule)(struct batadv_hard_iface *hard_iface);
392 int tt_num_changes);
393 /* send scheduled OGM */ 406 /* send scheduled OGM */
394 void (*bat_ogm_emit)(struct forw_packet *forw_packet); 407 void (*bat_ogm_emit)(struct batadv_forw_packet *forw_packet);
395}; 408};
396 409
397#endif /* _NET_BATMAN_ADV_TYPES_H_ */ 410#endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 74175c21085..00164645b3f 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Andreas Langer 3 * Andreas Langer
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -31,19 +29,20 @@
31#include "hard-interface.h" 29#include "hard-interface.h"
32 30
33 31
34static struct sk_buff *frag_merge_packet(struct list_head *head, 32static struct sk_buff *
35 struct frag_packet_list_entry *tfp, 33batadv_frag_merge_packet(struct list_head *head,
36 struct sk_buff *skb) 34 struct batadv_frag_packet_list_entry *tfp,
35 struct sk_buff *skb)
37{ 36{
38 struct unicast_frag_packet *up = 37 struct batadv_unicast_frag_packet *up;
39 (struct unicast_frag_packet *)skb->data;
40 struct sk_buff *tmp_skb; 38 struct sk_buff *tmp_skb;
41 struct unicast_packet *unicast_packet; 39 struct batadv_unicast_packet *unicast_packet;
42 int hdr_len = sizeof(*unicast_packet); 40 int hdr_len = sizeof(*unicast_packet);
43 int uni_diff = sizeof(*up) - hdr_len; 41 int uni_diff = sizeof(*up) - hdr_len;
44 42
43 up = (struct batadv_unicast_frag_packet *)skb->data;
45 /* set skb to the first part and tmp_skb to the second part */ 44 /* set skb to the first part and tmp_skb to the second part */
46 if (up->flags & UNI_FRAG_HEAD) { 45 if (up->flags & BATADV_UNI_FRAG_HEAD) {
47 tmp_skb = tfp->skb; 46 tmp_skb = tfp->skb;
48 } else { 47 } else {
49 tmp_skb = skb; 48 tmp_skb = skb;
@@ -66,8 +65,9 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
66 kfree_skb(tmp_skb); 65 kfree_skb(tmp_skb);
67 66
68 memmove(skb->data + uni_diff, skb->data, hdr_len); 67 memmove(skb->data + uni_diff, skb->data, hdr_len);
69 unicast_packet = (struct unicast_packet *)skb_pull(skb, uni_diff); 68 unicast_packet = (struct batadv_unicast_packet *)skb_pull(skb,
70 unicast_packet->header.packet_type = BAT_UNICAST; 69 uni_diff);
70 unicast_packet->header.packet_type = BATADV_UNICAST;
71 71
72 return skb; 72 return skb;
73 73
@@ -77,11 +77,13 @@ err:
77 return NULL; 77 return NULL;
78} 78}
79 79
80static void frag_create_entry(struct list_head *head, struct sk_buff *skb) 80static void batadv_frag_create_entry(struct list_head *head,
81 struct sk_buff *skb)
81{ 82{
82 struct frag_packet_list_entry *tfp; 83 struct batadv_frag_packet_list_entry *tfp;
83 struct unicast_frag_packet *up = 84 struct batadv_unicast_frag_packet *up;
84 (struct unicast_frag_packet *)skb->data; 85
86 up = (struct batadv_unicast_frag_packet *)skb->data;
85 87
86 /* free and oldest packets stand at the end */ 88 /* free and oldest packets stand at the end */
87 tfp = list_entry((head)->prev, typeof(*tfp), list); 89 tfp = list_entry((head)->prev, typeof(*tfp), list);
@@ -93,15 +95,15 @@ static void frag_create_entry(struct list_head *head, struct sk_buff *skb)
93 return; 95 return;
94} 96}
95 97
96static int frag_create_buffer(struct list_head *head) 98static int batadv_frag_create_buffer(struct list_head *head)
97{ 99{
98 int i; 100 int i;
99 struct frag_packet_list_entry *tfp; 101 struct batadv_frag_packet_list_entry *tfp;
100 102
101 for (i = 0; i < FRAG_BUFFER_SIZE; i++) { 103 for (i = 0; i < BATADV_FRAG_BUFFER_SIZE; i++) {
102 tfp = kmalloc(sizeof(*tfp), GFP_ATOMIC); 104 tfp = kmalloc(sizeof(*tfp), GFP_ATOMIC);
103 if (!tfp) { 105 if (!tfp) {
104 frag_list_free(head); 106 batadv_frag_list_free(head);
105 return -ENOMEM; 107 return -ENOMEM;
106 } 108 }
107 tfp->skb = NULL; 109 tfp->skb = NULL;
@@ -113,14 +115,15 @@ static int frag_create_buffer(struct list_head *head)
113 return 0; 115 return 0;
114} 116}
115 117
116static struct frag_packet_list_entry *frag_search_packet(struct list_head *head, 118static struct batadv_frag_packet_list_entry *
117 const struct unicast_frag_packet *up) 119batadv_frag_search_packet(struct list_head *head,
120 const struct batadv_unicast_frag_packet *up)
118{ 121{
119 struct frag_packet_list_entry *tfp; 122 struct batadv_frag_packet_list_entry *tfp;
120 struct unicast_frag_packet *tmp_up = NULL; 123 struct batadv_unicast_frag_packet *tmp_up = NULL;
121 uint16_t search_seqno; 124 uint16_t search_seqno;
122 125
123 if (up->flags & UNI_FRAG_HEAD) 126 if (up->flags & BATADV_UNI_FRAG_HEAD)
124 search_seqno = ntohs(up->seqno)+1; 127 search_seqno = ntohs(up->seqno)+1;
125 else 128 else
126 search_seqno = ntohs(up->seqno)-1; 129 search_seqno = ntohs(up->seqno)-1;
@@ -133,12 +136,12 @@ static struct frag_packet_list_entry *frag_search_packet(struct list_head *head,
133 if (tfp->seqno == ntohs(up->seqno)) 136 if (tfp->seqno == ntohs(up->seqno))
134 goto mov_tail; 137 goto mov_tail;
135 138
136 tmp_up = (struct unicast_frag_packet *)tfp->skb->data; 139 tmp_up = (struct batadv_unicast_frag_packet *)tfp->skb->data;
137 140
138 if (tfp->seqno == search_seqno) { 141 if (tfp->seqno == search_seqno) {
139 142
140 if ((tmp_up->flags & UNI_FRAG_HEAD) != 143 if ((tmp_up->flags & BATADV_UNI_FRAG_HEAD) !=
141 (up->flags & UNI_FRAG_HEAD)) 144 (up->flags & BATADV_UNI_FRAG_HEAD))
142 return tfp; 145 return tfp;
143 else 146 else
144 goto mov_tail; 147 goto mov_tail;
@@ -151,9 +154,9 @@ mov_tail:
151 return NULL; 154 return NULL;
152} 155}
153 156
154void frag_list_free(struct list_head *head) 157void batadv_frag_list_free(struct list_head *head)
155{ 158{
156 struct frag_packet_list_entry *pf, *tmp_pf; 159 struct batadv_frag_packet_list_entry *pf, *tmp_pf;
157 160
158 if (!list_empty(head)) { 161 if (!list_empty(head)) {
159 162
@@ -172,64 +175,66 @@ void frag_list_free(struct list_head *head)
172 * or the skb could be reassembled (skb_new will point to the new packet and 175 * or the skb could be reassembled (skb_new will point to the new packet and
173 * skb was freed) 176 * skb was freed)
174 */ 177 */
175int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv, 178int batadv_frag_reassemble_skb(struct sk_buff *skb,
176 struct sk_buff **new_skb) 179 struct batadv_priv *bat_priv,
180 struct sk_buff **new_skb)
177{ 181{
178 struct orig_node *orig_node; 182 struct batadv_orig_node *orig_node;
179 struct frag_packet_list_entry *tmp_frag_entry; 183 struct batadv_frag_packet_list_entry *tmp_frag_entry;
180 int ret = NET_RX_DROP; 184 int ret = NET_RX_DROP;
181 struct unicast_frag_packet *unicast_packet = 185 struct batadv_unicast_frag_packet *unicast_packet;
182 (struct unicast_frag_packet *)skb->data;
183 186
187 unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
184 *new_skb = NULL; 188 *new_skb = NULL;
185 189
186 orig_node = orig_hash_find(bat_priv, unicast_packet->orig); 190 orig_node = batadv_orig_hash_find(bat_priv, unicast_packet->orig);
187 if (!orig_node) 191 if (!orig_node)
188 goto out; 192 goto out;
189 193
190 orig_node->last_frag_packet = jiffies; 194 orig_node->last_frag_packet = jiffies;
191 195
192 if (list_empty(&orig_node->frag_list) && 196 if (list_empty(&orig_node->frag_list) &&
193 frag_create_buffer(&orig_node->frag_list)) { 197 batadv_frag_create_buffer(&orig_node->frag_list)) {
194 pr_debug("couldn't create frag buffer\n"); 198 pr_debug("couldn't create frag buffer\n");
195 goto out; 199 goto out;
196 } 200 }
197 201
198 tmp_frag_entry = frag_search_packet(&orig_node->frag_list, 202 tmp_frag_entry = batadv_frag_search_packet(&orig_node->frag_list,
199 unicast_packet); 203 unicast_packet);
200 204
201 if (!tmp_frag_entry) { 205 if (!tmp_frag_entry) {
202 frag_create_entry(&orig_node->frag_list, skb); 206 batadv_frag_create_entry(&orig_node->frag_list, skb);
203 ret = NET_RX_SUCCESS; 207 ret = NET_RX_SUCCESS;
204 goto out; 208 goto out;
205 } 209 }
206 210
207 *new_skb = frag_merge_packet(&orig_node->frag_list, tmp_frag_entry, 211 *new_skb = batadv_frag_merge_packet(&orig_node->frag_list,
208 skb); 212 tmp_frag_entry, skb);
209 /* if not, merge failed */ 213 /* if not, merge failed */
210 if (*new_skb) 214 if (*new_skb)
211 ret = NET_RX_SUCCESS; 215 ret = NET_RX_SUCCESS;
212 216
213out: 217out:
214 if (orig_node) 218 if (orig_node)
215 orig_node_free_ref(orig_node); 219 batadv_orig_node_free_ref(orig_node);
216 return ret; 220 return ret;
217} 221}
218 222
219int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, 223int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
220 struct hard_iface *hard_iface, const uint8_t dstaddr[]) 224 struct batadv_hard_iface *hard_iface,
225 const uint8_t dstaddr[])
221{ 226{
222 struct unicast_packet tmp_uc, *unicast_packet; 227 struct batadv_unicast_packet tmp_uc, *unicast_packet;
223 struct hard_iface *primary_if; 228 struct batadv_hard_iface *primary_if;
224 struct sk_buff *frag_skb; 229 struct sk_buff *frag_skb;
225 struct unicast_frag_packet *frag1, *frag2; 230 struct batadv_unicast_frag_packet *frag1, *frag2;
226 int uc_hdr_len = sizeof(*unicast_packet); 231 int uc_hdr_len = sizeof(*unicast_packet);
227 int ucf_hdr_len = sizeof(*frag1); 232 int ucf_hdr_len = sizeof(*frag1);
228 int data_len = skb->len - uc_hdr_len; 233 int data_len = skb->len - uc_hdr_len;
229 int large_tail = 0, ret = NET_RX_DROP; 234 int large_tail = 0, ret = NET_RX_DROP;
230 uint16_t seqno; 235 uint16_t seqno;
231 236
232 primary_if = primary_if_get_selected(bat_priv); 237 primary_if = batadv_primary_if_get_selected(bat_priv);
233 if (!primary_if) 238 if (!primary_if)
234 goto dropped; 239 goto dropped;
235 240
@@ -238,38 +243,38 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
238 goto dropped; 243 goto dropped;
239 skb_reserve(frag_skb, ucf_hdr_len); 244 skb_reserve(frag_skb, ucf_hdr_len);
240 245
241 unicast_packet = (struct unicast_packet *)skb->data; 246 unicast_packet = (struct batadv_unicast_packet *)skb->data;
242 memcpy(&tmp_uc, unicast_packet, uc_hdr_len); 247 memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
243 skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len); 248 skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len);
244 249
245 if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 || 250 if (batadv_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 ||
246 my_skb_head_push(frag_skb, ucf_hdr_len) < 0) 251 batadv_skb_head_push(frag_skb, ucf_hdr_len) < 0)
247 goto drop_frag; 252 goto drop_frag;
248 253
249 frag1 = (struct unicast_frag_packet *)skb->data; 254 frag1 = (struct batadv_unicast_frag_packet *)skb->data;
250 frag2 = (struct unicast_frag_packet *)frag_skb->data; 255 frag2 = (struct batadv_unicast_frag_packet *)frag_skb->data;
251 256
252 memcpy(frag1, &tmp_uc, sizeof(tmp_uc)); 257 memcpy(frag1, &tmp_uc, sizeof(tmp_uc));
253 258
254 frag1->header.ttl--; 259 frag1->header.ttl--;
255 frag1->header.version = COMPAT_VERSION; 260 frag1->header.version = BATADV_COMPAT_VERSION;
256 frag1->header.packet_type = BAT_UNICAST_FRAG; 261 frag1->header.packet_type = BATADV_UNICAST_FRAG;
257 262
258 memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN); 263 memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
259 memcpy(frag2, frag1, sizeof(*frag2)); 264 memcpy(frag2, frag1, sizeof(*frag2));
260 265
261 if (data_len & 1) 266 if (data_len & 1)
262 large_tail = UNI_FRAG_LARGETAIL; 267 large_tail = BATADV_UNI_FRAG_LARGETAIL;
263 268
264 frag1->flags = UNI_FRAG_HEAD | large_tail; 269 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
265 frag2->flags = large_tail; 270 frag2->flags = large_tail;
266 271
267 seqno = atomic_add_return(2, &hard_iface->frag_seqno); 272 seqno = atomic_add_return(2, &hard_iface->frag_seqno);
268 frag1->seqno = htons(seqno - 1); 273 frag1->seqno = htons(seqno - 1);
269 frag2->seqno = htons(seqno); 274 frag2->seqno = htons(seqno);
270 275
271 send_skb_packet(skb, hard_iface, dstaddr); 276 batadv_send_skb_packet(skb, hard_iface, dstaddr);
272 send_skb_packet(frag_skb, hard_iface, dstaddr); 277 batadv_send_skb_packet(frag_skb, hard_iface, dstaddr);
273 ret = NET_RX_SUCCESS; 278 ret = NET_RX_SUCCESS;
274 goto out; 279 goto out;
275 280
@@ -279,52 +284,53 @@ dropped:
279 kfree_skb(skb); 284 kfree_skb(skb);
280out: 285out:
281 if (primary_if) 286 if (primary_if)
282 hardif_free_ref(primary_if); 287 batadv_hardif_free_ref(primary_if);
283 return ret; 288 return ret;
284} 289}
285 290
286int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv) 291int batadv_unicast_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv)
287{ 292{
288 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 293 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
289 struct unicast_packet *unicast_packet; 294 struct batadv_unicast_packet *unicast_packet;
290 struct orig_node *orig_node; 295 struct batadv_orig_node *orig_node;
291 struct neigh_node *neigh_node; 296 struct batadv_neigh_node *neigh_node;
292 int data_len = skb->len; 297 int data_len = skb->len;
293 int ret = 1; 298 int ret = 1;
299 unsigned int dev_mtu;
294 300
295 /* get routing information */ 301 /* get routing information */
296 if (is_multicast_ether_addr(ethhdr->h_dest)) { 302 if (is_multicast_ether_addr(ethhdr->h_dest)) {
297 orig_node = gw_get_selected_orig(bat_priv); 303 orig_node = batadv_gw_get_selected_orig(bat_priv);
298 if (orig_node) 304 if (orig_node)
299 goto find_router; 305 goto find_router;
300 } 306 }
301 307
302 /* check for tt host - increases orig_node refcount. 308 /* check for tt host - increases orig_node refcount.
303 * returns NULL in case of AP isolation */ 309 * returns NULL in case of AP isolation
304 orig_node = transtable_search(bat_priv, ethhdr->h_source, 310 */
305 ethhdr->h_dest); 311 orig_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
312 ethhdr->h_dest);
306 313
307find_router: 314find_router:
308 /** 315 /* find_router():
309 * find_router():
310 * - if orig_node is NULL it returns NULL 316 * - if orig_node is NULL it returns NULL
311 * - increases neigh_nodes refcount if found. 317 * - increases neigh_nodes refcount if found.
312 */ 318 */
313 neigh_node = find_router(bat_priv, orig_node, NULL); 319 neigh_node = batadv_find_router(bat_priv, orig_node, NULL);
314 320
315 if (!neigh_node) 321 if (!neigh_node)
316 goto out; 322 goto out;
317 323
318 if (my_skb_head_push(skb, sizeof(*unicast_packet)) < 0) 324 if (batadv_skb_head_push(skb, sizeof(*unicast_packet)) < 0)
319 goto out; 325 goto out;
320 326
321 unicast_packet = (struct unicast_packet *)skb->data; 327 unicast_packet = (struct batadv_unicast_packet *)skb->data;
322 328
323 unicast_packet->header.version = COMPAT_VERSION; 329 unicast_packet->header.version = BATADV_COMPAT_VERSION;
324 /* batman packet type: unicast */ 330 /* batman packet type: unicast */
325 unicast_packet->header.packet_type = BAT_UNICAST; 331 unicast_packet->header.packet_type = BATADV_UNICAST;
326 /* set unicast ttl */ 332 /* set unicast ttl */
327 unicast_packet->header.ttl = TTL; 333 unicast_packet->header.ttl = BATADV_TTL;
328 /* copy the destination for faster routing */ 334 /* copy the destination for faster routing */
329 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); 335 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
330 /* set the destination tt version number */ 336 /* set the destination tt version number */
@@ -336,28 +342,29 @@ find_router:
336 * try to reroute it because the ttvn contained in the header is less 342 * try to reroute it because the ttvn contained in the header is less
337 * than the current one 343 * than the current one
338 */ 344 */
339 if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest)) 345 if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
340 unicast_packet->ttvn = unicast_packet->ttvn - 1; 346 unicast_packet->ttvn = unicast_packet->ttvn - 1;
341 347
348 dev_mtu = neigh_node->if_incoming->net_dev->mtu;
342 if (atomic_read(&bat_priv->fragmentation) && 349 if (atomic_read(&bat_priv->fragmentation) &&
343 data_len + sizeof(*unicast_packet) > 350 data_len + sizeof(*unicast_packet) > dev_mtu) {
344 neigh_node->if_incoming->net_dev->mtu) {
345 /* send frag skb decreases ttl */ 351 /* send frag skb decreases ttl */
346 unicast_packet->header.ttl++; 352 unicast_packet->header.ttl++;
347 ret = frag_send_skb(skb, bat_priv, 353 ret = batadv_frag_send_skb(skb, bat_priv,
348 neigh_node->if_incoming, neigh_node->addr); 354 neigh_node->if_incoming,
355 neigh_node->addr);
349 goto out; 356 goto out;
350 } 357 }
351 358
352 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 359 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
353 ret = 0; 360 ret = 0;
354 goto out; 361 goto out;
355 362
356out: 363out:
357 if (neigh_node) 364 if (neigh_node)
358 neigh_node_free_ref(neigh_node); 365 batadv_neigh_node_free_ref(neigh_node);
359 if (orig_node) 366 if (orig_node)
360 orig_node_free_ref(orig_node); 367 batadv_orig_node_free_ref(orig_node);
361 if (ret == 1) 368 if (ret == 1)
362 kfree_skb(skb); 369 kfree_skb(skb);
363 return ret; 370 return ret;
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
index a9faf6b1db1..1c46e2eb1ef 100644
--- a/net/batman-adv/unicast.h
+++ b/net/batman-adv/unicast.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Andreas Langer 3 * Andreas Langer
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_UNICAST_H_ 20#ifndef _NET_BATMAN_ADV_UNICAST_H_
@@ -24,33 +22,35 @@
24 22
25#include "packet.h" 23#include "packet.h"
26 24
27#define FRAG_TIMEOUT 10000 /* purge frag list entries after time in ms */ 25#define BATADV_FRAG_TIMEOUT 10000 /* purge frag list entries after time in ms */
28#define FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */ 26#define BATADV_FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */
29 27
30int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv, 28int batadv_frag_reassemble_skb(struct sk_buff *skb,
31 struct sk_buff **new_skb); 29 struct batadv_priv *bat_priv,
32void frag_list_free(struct list_head *head); 30 struct sk_buff **new_skb);
33int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv); 31void batadv_frag_list_free(struct list_head *head);
34int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, 32int batadv_unicast_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv);
35 struct hard_iface *hard_iface, const uint8_t dstaddr[]); 33int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
34 struct batadv_hard_iface *hard_iface,
35 const uint8_t dstaddr[]);
36 36
37static inline int frag_can_reassemble(const struct sk_buff *skb, int mtu) 37static inline int batadv_frag_can_reassemble(const struct sk_buff *skb, int mtu)
38{ 38{
39 const struct unicast_frag_packet *unicast_packet; 39 const struct batadv_unicast_frag_packet *unicast_packet;
40 int uneven_correction = 0; 40 int uneven_correction = 0;
41 unsigned int merged_size; 41 unsigned int merged_size;
42 42
43 unicast_packet = (struct unicast_frag_packet *)skb->data; 43 unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
44 44
45 if (unicast_packet->flags & UNI_FRAG_LARGETAIL) { 45 if (unicast_packet->flags & BATADV_UNI_FRAG_LARGETAIL) {
46 if (unicast_packet->flags & UNI_FRAG_HEAD) 46 if (unicast_packet->flags & BATADV_UNI_FRAG_HEAD)
47 uneven_correction = 1; 47 uneven_correction = 1;
48 else 48 else
49 uneven_correction = -1; 49 uneven_correction = -1;
50 } 50 }
51 51
52 merged_size = (skb->len - sizeof(*unicast_packet)) * 2; 52 merged_size = (skb->len - sizeof(*unicast_packet)) * 2;
53 merged_size += sizeof(struct unicast_packet) + uneven_correction; 53 merged_size += sizeof(struct batadv_unicast_packet) + uneven_correction;
54 54
55 return merged_size <= mtu; 55 return merged_size <= mtu;
56} 56}
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index cec216fb77c..2a2ea068146 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2008-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2008-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Simon Wunderlich 3 * Simon Wunderlich
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -28,16 +26,19 @@
28#include "hash.h" 26#include "hash.h"
29#include "originator.h" 27#include "originator.h"
30 28
31#define MAX_VIS_PACKET_SIZE 1000 29#define BATADV_MAX_VIS_PACKET_SIZE 1000
32 30
33static void start_vis_timer(struct bat_priv *bat_priv); 31static void batadv_start_vis_timer(struct batadv_priv *bat_priv);
34 32
35/* free the info */ 33/* free the info */
36static void free_info(struct kref *ref) 34static void batadv_free_info(struct kref *ref)
37{ 35{
38 struct vis_info *info = container_of(ref, struct vis_info, refcount); 36 struct batadv_vis_info *info;
39 struct bat_priv *bat_priv = info->bat_priv; 37 struct batadv_priv *bat_priv;
40 struct recvlist_node *entry, *tmp; 38 struct batadv_recvlist_node *entry, *tmp;
39
40 info = container_of(ref, struct batadv_vis_info, refcount);
41 bat_priv = info->bat_priv;
41 42
42 list_del_init(&info->send_list); 43 list_del_init(&info->send_list);
43 spin_lock_bh(&bat_priv->vis_list_lock); 44 spin_lock_bh(&bat_priv->vis_list_lock);
@@ -52,29 +53,30 @@ static void free_info(struct kref *ref)
52} 53}
53 54
54/* Compare two vis packets, used by the hashing algorithm */ 55/* Compare two vis packets, used by the hashing algorithm */
55static int vis_info_cmp(const struct hlist_node *node, const void *data2) 56static int batadv_vis_info_cmp(const struct hlist_node *node, const void *data2)
56{ 57{
57 const struct vis_info *d1, *d2; 58 const struct batadv_vis_info *d1, *d2;
58 const struct vis_packet *p1, *p2; 59 const struct batadv_vis_packet *p1, *p2;
59 60
60 d1 = container_of(node, struct vis_info, hash_entry); 61 d1 = container_of(node, struct batadv_vis_info, hash_entry);
61 d2 = data2; 62 d2 = data2;
62 p1 = (struct vis_packet *)d1->skb_packet->data; 63 p1 = (struct batadv_vis_packet *)d1->skb_packet->data;
63 p2 = (struct vis_packet *)d2->skb_packet->data; 64 p2 = (struct batadv_vis_packet *)d2->skb_packet->data;
64 return compare_eth(p1->vis_orig, p2->vis_orig); 65 return batadv_compare_eth(p1->vis_orig, p2->vis_orig);
65} 66}
66 67
67/* hash function to choose an entry in a hash table of given size */ 68/* hash function to choose an entry in a hash table of given size
68/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ 69 * hash algorithm from http://en.wikipedia.org/wiki/Hash_table
69static uint32_t vis_info_choose(const void *data, uint32_t size) 70 */
71static uint32_t batadv_vis_info_choose(const void *data, uint32_t size)
70{ 72{
71 const struct vis_info *vis_info = data; 73 const struct batadv_vis_info *vis_info = data;
72 const struct vis_packet *packet; 74 const struct batadv_vis_packet *packet;
73 const unsigned char *key; 75 const unsigned char *key;
74 uint32_t hash = 0; 76 uint32_t hash = 0;
75 size_t i; 77 size_t i;
76 78
77 packet = (struct vis_packet *)vis_info->skb_packet->data; 79 packet = (struct batadv_vis_packet *)vis_info->skb_packet->data;
78 key = packet->vis_orig; 80 key = packet->vis_orig;
79 for (i = 0; i < ETH_ALEN; i++) { 81 for (i = 0; i < ETH_ALEN; i++) {
80 hash += key[i]; 82 hash += key[i];
@@ -89,24 +91,24 @@ static uint32_t vis_info_choose(const void *data, uint32_t size)
89 return hash % size; 91 return hash % size;
90} 92}
91 93
92static struct vis_info *vis_hash_find(struct bat_priv *bat_priv, 94static struct batadv_vis_info *
93 const void *data) 95batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
94{ 96{
95 struct hashtable_t *hash = bat_priv->vis_hash; 97 struct batadv_hashtable *hash = bat_priv->vis_hash;
96 struct hlist_head *head; 98 struct hlist_head *head;
97 struct hlist_node *node; 99 struct hlist_node *node;
98 struct vis_info *vis_info, *vis_info_tmp = NULL; 100 struct batadv_vis_info *vis_info, *vis_info_tmp = NULL;
99 uint32_t index; 101 uint32_t index;
100 102
101 if (!hash) 103 if (!hash)
102 return NULL; 104 return NULL;
103 105
104 index = vis_info_choose(data, hash->size); 106 index = batadv_vis_info_choose(data, hash->size);
105 head = &hash->table[index]; 107 head = &hash->table[index];
106 108
107 rcu_read_lock(); 109 rcu_read_lock();
108 hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) { 110 hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) {
109 if (!vis_info_cmp(node, data)) 111 if (!batadv_vis_info_cmp(node, data))
110 continue; 112 continue;
111 113
112 vis_info_tmp = vis_info; 114 vis_info_tmp = vis_info;
@@ -118,16 +120,17 @@ static struct vis_info *vis_hash_find(struct bat_priv *bat_priv,
118} 120}
119 121
120/* insert interface to the list of interfaces of one originator, if it 122/* insert interface to the list of interfaces of one originator, if it
121 * does not already exist in the list */ 123 * does not already exist in the list
122static void vis_data_insert_interface(const uint8_t *interface, 124 */
123 struct hlist_head *if_list, 125static void batadv_vis_data_insert_interface(const uint8_t *interface,
124 bool primary) 126 struct hlist_head *if_list,
127 bool primary)
125{ 128{
126 struct if_list_entry *entry; 129 struct batadv_if_list_entry *entry;
127 struct hlist_node *pos; 130 struct hlist_node *pos;
128 131
129 hlist_for_each_entry(entry, pos, if_list, list) { 132 hlist_for_each_entry(entry, pos, if_list, list) {
130 if (compare_eth(entry->addr, interface)) 133 if (batadv_compare_eth(entry->addr, interface))
131 return; 134 return;
132 } 135 }
133 136
@@ -140,195 +143,145 @@ static void vis_data_insert_interface(const uint8_t *interface,
140 hlist_add_head(&entry->list, if_list); 143 hlist_add_head(&entry->list, if_list);
141} 144}
142 145
143static ssize_t vis_data_read_prim_sec(char *buff, 146static void batadv_vis_data_read_prim_sec(struct seq_file *seq,
144 const struct hlist_head *if_list) 147 const struct hlist_head *if_list)
145{ 148{
146 struct if_list_entry *entry; 149 struct batadv_if_list_entry *entry;
147 struct hlist_node *pos; 150 struct hlist_node *pos;
148 size_t len = 0;
149 151
150 hlist_for_each_entry(entry, pos, if_list, list) { 152 hlist_for_each_entry(entry, pos, if_list, list) {
151 if (entry->primary) 153 if (entry->primary)
152 len += sprintf(buff + len, "PRIMARY, "); 154 seq_printf(seq, "PRIMARY, ");
153 else 155 else
154 len += sprintf(buff + len, "SEC %pM, ", entry->addr); 156 seq_printf(seq, "SEC %pM, ", entry->addr);
155 } 157 }
158}
156 159
157 return len; 160/* read an entry */
161static ssize_t
162batadv_vis_data_read_entry(struct seq_file *seq,
163 const struct batadv_vis_info_entry *entry,
164 const uint8_t *src, bool primary)
165{
166 if (primary && entry->quality == 0)
167 return seq_printf(seq, "TT %pM, ", entry->dest);
168 else if (batadv_compare_eth(entry->src, src))
169 return seq_printf(seq, "TQ %pM %d, ", entry->dest,
170 entry->quality);
171
172 return 0;
158} 173}
159 174
160static size_t vis_data_count_prim_sec(struct hlist_head *if_list) 175static void
176batadv_vis_data_insert_interfaces(struct hlist_head *list,
177 struct batadv_vis_packet *packet,
178 struct batadv_vis_info_entry *entries)
161{ 179{
162 struct if_list_entry *entry; 180 int i;
163 struct hlist_node *pos;
164 size_t count = 0;
165 181
166 hlist_for_each_entry(entry, pos, if_list, list) { 182 for (i = 0; i < packet->entries; i++) {
167 if (entry->primary) 183 if (entries[i].quality == 0)
168 count += 9; 184 continue;
169 else
170 count += 23;
171 }
172 185
173 return count; 186 if (batadv_compare_eth(entries[i].src, packet->vis_orig))
187 continue;
188
189 batadv_vis_data_insert_interface(entries[i].src, list, false);
190 }
174} 191}
175 192
176/* read an entry */ 193static void batadv_vis_data_read_entries(struct seq_file *seq,
177static ssize_t vis_data_read_entry(char *buff, 194 struct hlist_head *list,
178 const struct vis_info_entry *entry, 195 struct batadv_vis_packet *packet,
179 const uint8_t *src, bool primary) 196 struct batadv_vis_info_entry *entries)
180{ 197{
181 /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */ 198 int i;
182 if (primary && entry->quality == 0) 199 struct batadv_if_list_entry *entry;
183 return sprintf(buff, "TT %pM, ", entry->dest); 200 struct hlist_node *pos;
184 else if (compare_eth(entry->src, src))
185 return sprintf(buff, "TQ %pM %d, ", entry->dest,
186 entry->quality);
187 201
188 return 0; 202 hlist_for_each_entry(entry, pos, list, list) {
203 seq_printf(seq, "%pM,", entry->addr);
204
205 for (i = 0; i < packet->entries; i++)
206 batadv_vis_data_read_entry(seq, &entries[i],
207 entry->addr, entry->primary);
208
209 /* add primary/secondary records */
210 if (batadv_compare_eth(entry->addr, packet->vis_orig))
211 batadv_vis_data_read_prim_sec(seq, list);
212
213 seq_printf(seq, "\n");
214 }
189} 215}
190 216
191int vis_seq_print_text(struct seq_file *seq, void *offset) 217static void batadv_vis_seq_print_text_bucket(struct seq_file *seq,
218 const struct hlist_head *head)
192{ 219{
193 struct hard_iface *primary_if;
194 struct hlist_node *node; 220 struct hlist_node *node;
221 struct batadv_vis_info *info;
222 struct batadv_vis_packet *packet;
223 uint8_t *entries_pos;
224 struct batadv_vis_info_entry *entries;
225 struct batadv_if_list_entry *entry;
226 struct hlist_node *pos, *n;
227
228 HLIST_HEAD(vis_if_list);
229
230 hlist_for_each_entry_rcu(info, node, head, hash_entry) {
231 packet = (struct batadv_vis_packet *)info->skb_packet->data;
232 entries_pos = (uint8_t *)packet + sizeof(*packet);
233 entries = (struct batadv_vis_info_entry *)entries_pos;
234
235 batadv_vis_data_insert_interface(packet->vis_orig, &vis_if_list,
236 true);
237 batadv_vis_data_insert_interfaces(&vis_if_list, packet,
238 entries);
239 batadv_vis_data_read_entries(seq, &vis_if_list, packet,
240 entries);
241
242 hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) {
243 hlist_del(&entry->list);
244 kfree(entry);
245 }
246 }
247}
248
249int batadv_vis_seq_print_text(struct seq_file *seq, void *offset)
250{
251 struct batadv_hard_iface *primary_if;
195 struct hlist_head *head; 252 struct hlist_head *head;
196 struct vis_info *info;
197 struct vis_packet *packet;
198 struct vis_info_entry *entries;
199 struct net_device *net_dev = (struct net_device *)seq->private; 253 struct net_device *net_dev = (struct net_device *)seq->private;
200 struct bat_priv *bat_priv = netdev_priv(net_dev); 254 struct batadv_priv *bat_priv = netdev_priv(net_dev);
201 struct hashtable_t *hash = bat_priv->vis_hash; 255 struct batadv_hashtable *hash = bat_priv->vis_hash;
202 HLIST_HEAD(vis_if_list);
203 struct if_list_entry *entry;
204 struct hlist_node *pos, *n;
205 uint32_t i; 256 uint32_t i;
206 int j, ret = 0; 257 int ret = 0;
207 int vis_server = atomic_read(&bat_priv->vis_mode); 258 int vis_server = atomic_read(&bat_priv->vis_mode);
208 size_t buff_pos, buf_size;
209 char *buff;
210 int compare;
211 259
212 primary_if = primary_if_get_selected(bat_priv); 260 primary_if = batadv_primary_if_get_selected(bat_priv);
213 if (!primary_if) 261 if (!primary_if)
214 goto out; 262 goto out;
215 263
216 if (vis_server == VIS_TYPE_CLIENT_UPDATE) 264 if (vis_server == BATADV_VIS_TYPE_CLIENT_UPDATE)
217 goto out; 265 goto out;
218 266
219 buf_size = 1;
220 /* Estimate length */
221 spin_lock_bh(&bat_priv->vis_hash_lock); 267 spin_lock_bh(&bat_priv->vis_hash_lock);
222 for (i = 0; i < hash->size; i++) { 268 for (i = 0; i < hash->size; i++) {
223 head = &hash->table[i]; 269 head = &hash->table[i];
224 270 batadv_vis_seq_print_text_bucket(seq, head);
225 rcu_read_lock();
226 hlist_for_each_entry_rcu(info, node, head, hash_entry) {
227 packet = (struct vis_packet *)info->skb_packet->data;
228 entries = (struct vis_info_entry *)
229 ((char *)packet + sizeof(*packet));
230
231 for (j = 0; j < packet->entries; j++) {
232 if (entries[j].quality == 0)
233 continue;
234 compare =
235 compare_eth(entries[j].src, packet->vis_orig);
236 vis_data_insert_interface(entries[j].src,
237 &vis_if_list,
238 compare);
239 }
240
241 hlist_for_each_entry(entry, pos, &vis_if_list, list) {
242 buf_size += 18 + 26 * packet->entries;
243
244 /* add primary/secondary records */
245 if (compare_eth(entry->addr, packet->vis_orig))
246 buf_size +=
247 vis_data_count_prim_sec(&vis_if_list);
248
249 buf_size += 1;
250 }
251
252 hlist_for_each_entry_safe(entry, pos, n, &vis_if_list,
253 list) {
254 hlist_del(&entry->list);
255 kfree(entry);
256 }
257 }
258 rcu_read_unlock();
259 }
260
261 buff = kmalloc(buf_size, GFP_ATOMIC);
262 if (!buff) {
263 spin_unlock_bh(&bat_priv->vis_hash_lock);
264 ret = -ENOMEM;
265 goto out;
266 }
267 buff[0] = '\0';
268 buff_pos = 0;
269
270 for (i = 0; i < hash->size; i++) {
271 head = &hash->table[i];
272
273 rcu_read_lock();
274 hlist_for_each_entry_rcu(info, node, head, hash_entry) {
275 packet = (struct vis_packet *)info->skb_packet->data;
276 entries = (struct vis_info_entry *)
277 ((char *)packet + sizeof(*packet));
278
279 for (j = 0; j < packet->entries; j++) {
280 if (entries[j].quality == 0)
281 continue;
282 compare =
283 compare_eth(entries[j].src, packet->vis_orig);
284 vis_data_insert_interface(entries[j].src,
285 &vis_if_list,
286 compare);
287 }
288
289 hlist_for_each_entry(entry, pos, &vis_if_list, list) {
290 buff_pos += sprintf(buff + buff_pos, "%pM,",
291 entry->addr);
292
293 for (j = 0; j < packet->entries; j++)
294 buff_pos += vis_data_read_entry(
295 buff + buff_pos,
296 &entries[j],
297 entry->addr,
298 entry->primary);
299
300 /* add primary/secondary records */
301 if (compare_eth(entry->addr, packet->vis_orig))
302 buff_pos +=
303 vis_data_read_prim_sec(buff + buff_pos,
304 &vis_if_list);
305
306 buff_pos += sprintf(buff + buff_pos, "\n");
307 }
308
309 hlist_for_each_entry_safe(entry, pos, n, &vis_if_list,
310 list) {
311 hlist_del(&entry->list);
312 kfree(entry);
313 }
314 }
315 rcu_read_unlock();
316 } 271 }
317
318 spin_unlock_bh(&bat_priv->vis_hash_lock); 272 spin_unlock_bh(&bat_priv->vis_hash_lock);
319 273
320 seq_printf(seq, "%s", buff);
321 kfree(buff);
322
323out: 274out:
324 if (primary_if) 275 if (primary_if)
325 hardif_free_ref(primary_if); 276 batadv_hardif_free_ref(primary_if);
326 return ret; 277 return ret;
327} 278}
328 279
329/* add the info packet to the send list, if it was not 280/* add the info packet to the send list, if it was not
330 * already linked in. */ 281 * already linked in.
331static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info) 282 */
283static void batadv_send_list_add(struct batadv_priv *bat_priv,
284 struct batadv_vis_info *info)
332{ 285{
333 if (list_empty(&info->send_list)) { 286 if (list_empty(&info->send_list)) {
334 kref_get(&info->refcount); 287 kref_get(&info->refcount);
@@ -337,20 +290,21 @@ static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info)
337} 290}
338 291
339/* delete the info packet from the send list, if it was 292/* delete the info packet from the send list, if it was
340 * linked in. */ 293 * linked in.
341static void send_list_del(struct vis_info *info) 294 */
295static void batadv_send_list_del(struct batadv_vis_info *info)
342{ 296{
343 if (!list_empty(&info->send_list)) { 297 if (!list_empty(&info->send_list)) {
344 list_del_init(&info->send_list); 298 list_del_init(&info->send_list);
345 kref_put(&info->refcount, free_info); 299 kref_put(&info->refcount, batadv_free_info);
346 } 300 }
347} 301}
348 302
349/* tries to add one entry to the receive list. */ 303/* tries to add one entry to the receive list. */
350static void recv_list_add(struct bat_priv *bat_priv, 304static void batadv_recv_list_add(struct batadv_priv *bat_priv,
351 struct list_head *recv_list, const char *mac) 305 struct list_head *recv_list, const char *mac)
352{ 306{
353 struct recvlist_node *entry; 307 struct batadv_recvlist_node *entry;
354 308
355 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 309 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
356 if (!entry) 310 if (!entry)
@@ -363,14 +317,15 @@ static void recv_list_add(struct bat_priv *bat_priv,
363} 317}
364 318
365/* returns 1 if this mac is in the recv_list */ 319/* returns 1 if this mac is in the recv_list */
366static int recv_list_is_in(struct bat_priv *bat_priv, 320static int batadv_recv_list_is_in(struct batadv_priv *bat_priv,
367 const struct list_head *recv_list, const char *mac) 321 const struct list_head *recv_list,
322 const char *mac)
368{ 323{
369 const struct recvlist_node *entry; 324 const struct batadv_recvlist_node *entry;
370 325
371 spin_lock_bh(&bat_priv->vis_list_lock); 326 spin_lock_bh(&bat_priv->vis_list_lock);
372 list_for_each_entry(entry, recv_list, list) { 327 list_for_each_entry(entry, recv_list, list) {
373 if (compare_eth(entry->mac, mac)) { 328 if (batadv_compare_eth(entry->mac, mac)) {
374 spin_unlock_bh(&bat_priv->vis_list_lock); 329 spin_unlock_bh(&bat_priv->vis_list_lock);
375 return 1; 330 return 1;
376 } 331 }
@@ -381,17 +336,21 @@ static int recv_list_is_in(struct bat_priv *bat_priv,
381 336
382/* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old, 337/* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
383 * broken.. ). vis hash must be locked outside. is_new is set when the packet 338 * broken.. ). vis hash must be locked outside. is_new is set when the packet
384 * is newer than old entries in the hash. */ 339 * is newer than old entries in the hash.
385static struct vis_info *add_packet(struct bat_priv *bat_priv, 340 */
386 struct vis_packet *vis_packet, 341static struct batadv_vis_info *
387 int vis_info_len, int *is_new, 342batadv_add_packet(struct batadv_priv *bat_priv,
388 int make_broadcast) 343 struct batadv_vis_packet *vis_packet, int vis_info_len,
344 int *is_new, int make_broadcast)
389{ 345{
390 struct vis_info *info, *old_info; 346 struct batadv_vis_info *info, *old_info;
391 struct vis_packet *search_packet, *old_packet; 347 struct batadv_vis_packet *search_packet, *old_packet;
392 struct vis_info search_elem; 348 struct batadv_vis_info search_elem;
393 struct vis_packet *packet; 349 struct batadv_vis_packet *packet;
350 struct sk_buff *tmp_skb;
394 int hash_added; 351 int hash_added;
352 size_t len;
353 size_t max_entries;
395 354
396 *is_new = 0; 355 *is_new = 0;
397 /* sanity check */ 356 /* sanity check */
@@ -402,20 +361,23 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
402 search_elem.skb_packet = dev_alloc_skb(sizeof(*search_packet)); 361 search_elem.skb_packet = dev_alloc_skb(sizeof(*search_packet));
403 if (!search_elem.skb_packet) 362 if (!search_elem.skb_packet)
404 return NULL; 363 return NULL;
405 search_packet = (struct vis_packet *)skb_put(search_elem.skb_packet, 364 len = sizeof(*search_packet);
406 sizeof(*search_packet)); 365 tmp_skb = search_elem.skb_packet;
366 search_packet = (struct batadv_vis_packet *)skb_put(tmp_skb, len);
407 367
408 memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN); 368 memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
409 old_info = vis_hash_find(bat_priv, &search_elem); 369 old_info = batadv_vis_hash_find(bat_priv, &search_elem);
410 kfree_skb(search_elem.skb_packet); 370 kfree_skb(search_elem.skb_packet);
411 371
412 if (old_info) { 372 if (old_info) {
413 old_packet = (struct vis_packet *)old_info->skb_packet->data; 373 tmp_skb = old_info->skb_packet;
414 if (!seq_after(ntohl(vis_packet->seqno), 374 old_packet = (struct batadv_vis_packet *)tmp_skb->data;
415 ntohl(old_packet->seqno))) { 375 if (!batadv_seq_after(ntohl(vis_packet->seqno),
376 ntohl(old_packet->seqno))) {
416 if (old_packet->seqno == vis_packet->seqno) { 377 if (old_packet->seqno == vis_packet->seqno) {
417 recv_list_add(bat_priv, &old_info->recv_list, 378 batadv_recv_list_add(bat_priv,
418 vis_packet->sender_orig); 379 &old_info->recv_list,
380 vis_packet->sender_orig);
419 return old_info; 381 return old_info;
420 } else { 382 } else {
421 /* newer packet is already in hash. */ 383 /* newer packet is already in hash. */
@@ -423,52 +385,53 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
423 } 385 }
424 } 386 }
425 /* remove old entry */ 387 /* remove old entry */
426 hash_remove(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, 388 batadv_hash_remove(bat_priv->vis_hash, batadv_vis_info_cmp,
427 old_info); 389 batadv_vis_info_choose, old_info);
428 send_list_del(old_info); 390 batadv_send_list_del(old_info);
429 kref_put(&old_info->refcount, free_info); 391 kref_put(&old_info->refcount, batadv_free_info);
430 } 392 }
431 393
432 info = kmalloc(sizeof(*info), GFP_ATOMIC); 394 info = kmalloc(sizeof(*info), GFP_ATOMIC);
433 if (!info) 395 if (!info)
434 return NULL; 396 return NULL;
435 397
436 info->skb_packet = dev_alloc_skb(sizeof(*packet) + vis_info_len + 398 len = sizeof(*packet) + vis_info_len;
437 ETH_HLEN); 399 info->skb_packet = dev_alloc_skb(len + ETH_HLEN);
438 if (!info->skb_packet) { 400 if (!info->skb_packet) {
439 kfree(info); 401 kfree(info);
440 return NULL; 402 return NULL;
441 } 403 }
442 skb_reserve(info->skb_packet, ETH_HLEN); 404 skb_reserve(info->skb_packet, ETH_HLEN);
443 packet = (struct vis_packet *)skb_put(info->skb_packet, sizeof(*packet) 405 packet = (struct batadv_vis_packet *)skb_put(info->skb_packet, len);
444 + vis_info_len);
445 406
446 kref_init(&info->refcount); 407 kref_init(&info->refcount);
447 INIT_LIST_HEAD(&info->send_list); 408 INIT_LIST_HEAD(&info->send_list);
448 INIT_LIST_HEAD(&info->recv_list); 409 INIT_LIST_HEAD(&info->recv_list);
449 info->first_seen = jiffies; 410 info->first_seen = jiffies;
450 info->bat_priv = bat_priv; 411 info->bat_priv = bat_priv;
451 memcpy(packet, vis_packet, sizeof(*packet) + vis_info_len); 412 memcpy(packet, vis_packet, len);
452 413
453 /* initialize and add new packet. */ 414 /* initialize and add new packet. */
454 *is_new = 1; 415 *is_new = 1;
455 416
456 /* Make it a broadcast packet, if required */ 417 /* Make it a broadcast packet, if required */
457 if (make_broadcast) 418 if (make_broadcast)
458 memcpy(packet->target_orig, broadcast_addr, ETH_ALEN); 419 memcpy(packet->target_orig, batadv_broadcast_addr, ETH_ALEN);
459 420
460 /* repair if entries is longer than packet. */ 421 /* repair if entries is longer than packet. */
461 if (packet->entries * sizeof(struct vis_info_entry) > vis_info_len) 422 max_entries = vis_info_len / sizeof(struct batadv_vis_info_entry);
462 packet->entries = vis_info_len / sizeof(struct vis_info_entry); 423 if (packet->entries > max_entries)
424 packet->entries = max_entries;
463 425
464 recv_list_add(bat_priv, &info->recv_list, packet->sender_orig); 426 batadv_recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
465 427
466 /* try to add it */ 428 /* try to add it */
467 hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, 429 hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp,
468 info, &info->hash_entry); 430 batadv_vis_info_choose, info,
431 &info->hash_entry);
469 if (hash_added != 0) { 432 if (hash_added != 0) {
470 /* did not work (for some reason) */ 433 /* did not work (for some reason) */
471 kref_put(&info->refcount, free_info); 434 kref_put(&info->refcount, batadv_free_info);
472 info = NULL; 435 info = NULL;
473 } 436 }
474 437
@@ -476,37 +439,38 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
476} 439}
477 440
478/* handle the server sync packet, forward if needed. */ 441/* handle the server sync packet, forward if needed. */
479void receive_server_sync_packet(struct bat_priv *bat_priv, 442void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
480 struct vis_packet *vis_packet, 443 struct batadv_vis_packet *vis_packet,
481 int vis_info_len) 444 int vis_info_len)
482{ 445{
483 struct vis_info *info; 446 struct batadv_vis_info *info;
484 int is_new, make_broadcast; 447 int is_new, make_broadcast;
485 int vis_server = atomic_read(&bat_priv->vis_mode); 448 int vis_server = atomic_read(&bat_priv->vis_mode);
486 449
487 make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC); 450 make_broadcast = (vis_server == BATADV_VIS_TYPE_SERVER_SYNC);
488 451
489 spin_lock_bh(&bat_priv->vis_hash_lock); 452 spin_lock_bh(&bat_priv->vis_hash_lock);
490 info = add_packet(bat_priv, vis_packet, vis_info_len, 453 info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
491 &is_new, make_broadcast); 454 &is_new, make_broadcast);
492 if (!info) 455 if (!info)
493 goto end; 456 goto end;
494 457
495 /* only if we are server ourselves and packet is newer than the one in 458 /* only if we are server ourselves and packet is newer than the one in
496 * hash.*/ 459 * hash.
497 if (vis_server == VIS_TYPE_SERVER_SYNC && is_new) 460 */
498 send_list_add(bat_priv, info); 461 if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && is_new)
462 batadv_send_list_add(bat_priv, info);
499end: 463end:
500 spin_unlock_bh(&bat_priv->vis_hash_lock); 464 spin_unlock_bh(&bat_priv->vis_hash_lock);
501} 465}
502 466
503/* handle an incoming client update packet and schedule forward if needed. */ 467/* handle an incoming client update packet and schedule forward if needed. */
504void receive_client_update_packet(struct bat_priv *bat_priv, 468void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
505 struct vis_packet *vis_packet, 469 struct batadv_vis_packet *vis_packet,
506 int vis_info_len) 470 int vis_info_len)
507{ 471{
508 struct vis_info *info; 472 struct batadv_vis_info *info;
509 struct vis_packet *packet; 473 struct batadv_vis_packet *packet;
510 int is_new; 474 int is_new;
511 int vis_server = atomic_read(&bat_priv->vis_mode); 475 int vis_server = atomic_read(&bat_priv->vis_mode);
512 int are_target = 0; 476 int are_target = 0;
@@ -516,28 +480,28 @@ void receive_client_update_packet(struct bat_priv *bat_priv,
516 return; 480 return;
517 481
518 /* Are we the target for this VIS packet? */ 482 /* Are we the target for this VIS packet? */
519 if (vis_server == VIS_TYPE_SERVER_SYNC && 483 if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC &&
520 is_my_mac(vis_packet->target_orig)) 484 batadv_is_my_mac(vis_packet->target_orig))
521 are_target = 1; 485 are_target = 1;
522 486
523 spin_lock_bh(&bat_priv->vis_hash_lock); 487 spin_lock_bh(&bat_priv->vis_hash_lock);
524 info = add_packet(bat_priv, vis_packet, vis_info_len, 488 info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
525 &is_new, are_target); 489 &is_new, are_target);
526 490
527 if (!info) 491 if (!info)
528 goto end; 492 goto end;
529 /* note that outdated packets will be dropped at this point. */ 493 /* note that outdated packets will be dropped at this point. */
530 494
531 packet = (struct vis_packet *)info->skb_packet->data; 495 packet = (struct batadv_vis_packet *)info->skb_packet->data;
532 496
533 /* send only if we're the target server or ... */ 497 /* send only if we're the target server or ... */
534 if (are_target && is_new) { 498 if (are_target && is_new) {
535 packet->vis_type = VIS_TYPE_SERVER_SYNC; /* upgrade! */ 499 packet->vis_type = BATADV_VIS_TYPE_SERVER_SYNC; /* upgrade! */
536 send_list_add(bat_priv, info); 500 batadv_send_list_add(bat_priv, info);
537 501
538 /* ... we're not the recipient (and thus need to forward). */ 502 /* ... we're not the recipient (and thus need to forward). */
539 } else if (!is_my_mac(packet->target_orig)) { 503 } else if (!batadv_is_my_mac(packet->target_orig)) {
540 send_list_add(bat_priv, info); 504 batadv_send_list_add(bat_priv, info);
541 } 505 }
542 506
543end: 507end:
@@ -547,37 +511,38 @@ end:
547/* Walk the originators and find the VIS server with the best tq. Set the packet 511/* Walk the originators and find the VIS server with the best tq. Set the packet
548 * address to its address and return the best_tq. 512 * address to its address and return the best_tq.
549 * 513 *
550 * Must be called with the originator hash locked */ 514 * Must be called with the originator hash locked
551static int find_best_vis_server(struct bat_priv *bat_priv, 515 */
552 struct vis_info *info) 516static int batadv_find_best_vis_server(struct batadv_priv *bat_priv,
517 struct batadv_vis_info *info)
553{ 518{
554 struct hashtable_t *hash = bat_priv->orig_hash; 519 struct batadv_hashtable *hash = bat_priv->orig_hash;
555 struct neigh_node *router; 520 struct batadv_neigh_node *router;
556 struct hlist_node *node; 521 struct hlist_node *node;
557 struct hlist_head *head; 522 struct hlist_head *head;
558 struct orig_node *orig_node; 523 struct batadv_orig_node *orig_node;
559 struct vis_packet *packet; 524 struct batadv_vis_packet *packet;
560 int best_tq = -1; 525 int best_tq = -1;
561 uint32_t i; 526 uint32_t i;
562 527
563 packet = (struct vis_packet *)info->skb_packet->data; 528 packet = (struct batadv_vis_packet *)info->skb_packet->data;
564 529
565 for (i = 0; i < hash->size; i++) { 530 for (i = 0; i < hash->size; i++) {
566 head = &hash->table[i]; 531 head = &hash->table[i];
567 532
568 rcu_read_lock(); 533 rcu_read_lock();
569 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 534 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
570 router = orig_node_get_router(orig_node); 535 router = batadv_orig_node_get_router(orig_node);
571 if (!router) 536 if (!router)
572 continue; 537 continue;
573 538
574 if ((orig_node->flags & VIS_SERVER) && 539 if ((orig_node->flags & BATADV_VIS_SERVER) &&
575 (router->tq_avg > best_tq)) { 540 (router->tq_avg > best_tq)) {
576 best_tq = router->tq_avg; 541 best_tq = router->tq_avg;
577 memcpy(packet->target_orig, orig_node->orig, 542 memcpy(packet->target_orig, orig_node->orig,
578 ETH_ALEN); 543 ETH_ALEN);
579 } 544 }
580 neigh_node_free_ref(router); 545 batadv_neigh_node_free_ref(router);
581 } 546 }
582 rcu_read_unlock(); 547 rcu_read_unlock();
583 } 548 }
@@ -586,47 +551,52 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
586} 551}
587 552
588/* Return true if the vis packet is full. */ 553/* Return true if the vis packet is full. */
589static bool vis_packet_full(const struct vis_info *info) 554static bool batadv_vis_packet_full(const struct batadv_vis_info *info)
590{ 555{
591 const struct vis_packet *packet; 556 const struct batadv_vis_packet *packet;
592 packet = (struct vis_packet *)info->skb_packet->data; 557 size_t num;
558
559 packet = (struct batadv_vis_packet *)info->skb_packet->data;
560 num = BATADV_MAX_VIS_PACKET_SIZE / sizeof(struct batadv_vis_info_entry);
593 561
594 if (MAX_VIS_PACKET_SIZE / sizeof(struct vis_info_entry) 562 if (num < packet->entries + 1)
595 < packet->entries + 1)
596 return true; 563 return true;
597 return false; 564 return false;
598} 565}
599 566
600/* generates a packet of own vis data, 567/* generates a packet of own vis data,
601 * returns 0 on success, -1 if no packet could be generated */ 568 * returns 0 on success, -1 if no packet could be generated
602static int generate_vis_packet(struct bat_priv *bat_priv) 569 */
570static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
603{ 571{
604 struct hashtable_t *hash = bat_priv->orig_hash; 572 struct batadv_hashtable *hash = bat_priv->orig_hash;
605 struct hlist_node *node; 573 struct hlist_node *node;
606 struct hlist_head *head; 574 struct hlist_head *head;
607 struct orig_node *orig_node; 575 struct batadv_orig_node *orig_node;
608 struct neigh_node *router; 576 struct batadv_neigh_node *router;
609 struct vis_info *info = bat_priv->my_vis_info; 577 struct batadv_vis_info *info = bat_priv->my_vis_info;
610 struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data; 578 struct batadv_vis_packet *packet;
611 struct vis_info_entry *entry; 579 struct batadv_vis_info_entry *entry;
612 struct tt_common_entry *tt_common_entry; 580 struct batadv_tt_common_entry *tt_common_entry;
613 int best_tq = -1; 581 int best_tq = -1;
614 uint32_t i; 582 uint32_t i;
615 583
616 info->first_seen = jiffies; 584 info->first_seen = jiffies;
585 packet = (struct batadv_vis_packet *)info->skb_packet->data;
617 packet->vis_type = atomic_read(&bat_priv->vis_mode); 586 packet->vis_type = atomic_read(&bat_priv->vis_mode);
618 587
619 memcpy(packet->target_orig, broadcast_addr, ETH_ALEN); 588 memcpy(packet->target_orig, batadv_broadcast_addr, ETH_ALEN);
620 packet->header.ttl = TTL; 589 packet->header.ttl = BATADV_TTL;
621 packet->seqno = htonl(ntohl(packet->seqno) + 1); 590 packet->seqno = htonl(ntohl(packet->seqno) + 1);
622 packet->entries = 0; 591 packet->entries = 0;
592 packet->reserved = 0;
623 skb_trim(info->skb_packet, sizeof(*packet)); 593 skb_trim(info->skb_packet, sizeof(*packet));
624 594
625 if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) { 595 if (packet->vis_type == BATADV_VIS_TYPE_CLIENT_UPDATE) {
626 best_tq = find_best_vis_server(bat_priv, info); 596 best_tq = batadv_find_best_vis_server(bat_priv, info);
627 597
628 if (best_tq < 0) 598 if (best_tq < 0)
629 return -1; 599 return best_tq;
630 } 600 }
631 601
632 for (i = 0; i < hash->size; i++) { 602 for (i = 0; i < hash->size; i++) {
@@ -634,21 +604,21 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
634 604
635 rcu_read_lock(); 605 rcu_read_lock();
636 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 606 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
637 router = orig_node_get_router(orig_node); 607 router = batadv_orig_node_get_router(orig_node);
638 if (!router) 608 if (!router)
639 continue; 609 continue;
640 610
641 if (!compare_eth(router->addr, orig_node->orig)) 611 if (!batadv_compare_eth(router->addr, orig_node->orig))
642 goto next; 612 goto next;
643 613
644 if (router->if_incoming->if_status != IF_ACTIVE) 614 if (router->if_incoming->if_status != BATADV_IF_ACTIVE)
645 goto next; 615 goto next;
646 616
647 if (router->tq_avg < 1) 617 if (router->tq_avg < 1)
648 goto next; 618 goto next;
649 619
650 /* fill one entry into buffer. */ 620 /* fill one entry into buffer. */
651 entry = (struct vis_info_entry *) 621 entry = (struct batadv_vis_info_entry *)
652 skb_put(info->skb_packet, sizeof(*entry)); 622 skb_put(info->skb_packet, sizeof(*entry));
653 memcpy(entry->src, 623 memcpy(entry->src,
654 router->if_incoming->net_dev->dev_addr, 624 router->if_incoming->net_dev->dev_addr,
@@ -658,9 +628,9 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
658 packet->entries++; 628 packet->entries++;
659 629
660next: 630next:
661 neigh_node_free_ref(router); 631 batadv_neigh_node_free_ref(router);
662 632
663 if (vis_packet_full(info)) 633 if (batadv_vis_packet_full(info))
664 goto unlock; 634 goto unlock;
665 } 635 }
666 rcu_read_unlock(); 636 rcu_read_unlock();
@@ -674,7 +644,7 @@ next:
674 rcu_read_lock(); 644 rcu_read_lock();
675 hlist_for_each_entry_rcu(tt_common_entry, node, head, 645 hlist_for_each_entry_rcu(tt_common_entry, node, head,
676 hash_entry) { 646 hash_entry) {
677 entry = (struct vis_info_entry *) 647 entry = (struct batadv_vis_info_entry *)
678 skb_put(info->skb_packet, 648 skb_put(info->skb_packet,
679 sizeof(*entry)); 649 sizeof(*entry));
680 memset(entry->src, 0, ETH_ALEN); 650 memset(entry->src, 0, ETH_ALEN);
@@ -682,7 +652,7 @@ next:
682 entry->quality = 0; /* 0 means TT */ 652 entry->quality = 0; /* 0 means TT */
683 packet->entries++; 653 packet->entries++;
684 654
685 if (vis_packet_full(info)) 655 if (batadv_vis_packet_full(info))
686 goto unlock; 656 goto unlock;
687 } 657 }
688 rcu_read_unlock(); 658 rcu_read_unlock();
@@ -696,14 +666,15 @@ unlock:
696} 666}
697 667
698/* free old vis packets. Must be called with this vis_hash_lock 668/* free old vis packets. Must be called with this vis_hash_lock
699 * held */ 669 * held
700static void purge_vis_packets(struct bat_priv *bat_priv) 670 */
671static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
701{ 672{
702 uint32_t i; 673 uint32_t i;
703 struct hashtable_t *hash = bat_priv->vis_hash; 674 struct batadv_hashtable *hash = bat_priv->vis_hash;
704 struct hlist_node *node, *node_tmp; 675 struct hlist_node *node, *node_tmp;
705 struct hlist_head *head; 676 struct hlist_head *head;
706 struct vis_info *info; 677 struct batadv_vis_info *info;
707 678
708 for (i = 0; i < hash->size; i++) { 679 for (i = 0; i < hash->size; i++) {
709 head = &hash->table[i]; 680 head = &hash->table[i];
@@ -714,31 +685,32 @@ static void purge_vis_packets(struct bat_priv *bat_priv)
714 if (info == bat_priv->my_vis_info) 685 if (info == bat_priv->my_vis_info)
715 continue; 686 continue;
716 687
717 if (has_timed_out(info->first_seen, VIS_TIMEOUT)) { 688 if (batadv_has_timed_out(info->first_seen,
689 BATADV_VIS_TIMEOUT)) {
718 hlist_del(node); 690 hlist_del(node);
719 send_list_del(info); 691 batadv_send_list_del(info);
720 kref_put(&info->refcount, free_info); 692 kref_put(&info->refcount, batadv_free_info);
721 } 693 }
722 } 694 }
723 } 695 }
724} 696}
725 697
726static void broadcast_vis_packet(struct bat_priv *bat_priv, 698static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv,
727 struct vis_info *info) 699 struct batadv_vis_info *info)
728{ 700{
729 struct neigh_node *router; 701 struct batadv_neigh_node *router;
730 struct hashtable_t *hash = bat_priv->orig_hash; 702 struct batadv_hashtable *hash = bat_priv->orig_hash;
731 struct hlist_node *node; 703 struct hlist_node *node;
732 struct hlist_head *head; 704 struct hlist_head *head;
733 struct orig_node *orig_node; 705 struct batadv_orig_node *orig_node;
734 struct vis_packet *packet; 706 struct batadv_vis_packet *packet;
735 struct sk_buff *skb; 707 struct sk_buff *skb;
736 struct hard_iface *hard_iface; 708 struct batadv_hard_iface *hard_iface;
737 uint8_t dstaddr[ETH_ALEN]; 709 uint8_t dstaddr[ETH_ALEN];
738 uint32_t i; 710 uint32_t i;
739 711
740 712
741 packet = (struct vis_packet *)info->skb_packet->data; 713 packet = (struct batadv_vis_packet *)info->skb_packet->data;
742 714
743 /* send to all routers in range. */ 715 /* send to all routers in range. */
744 for (i = 0; i < hash->size; i++) { 716 for (i = 0; i < hash->size; i++) {
@@ -747,18 +719,19 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
747 rcu_read_lock(); 719 rcu_read_lock();
748 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 720 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
749 /* if it's a vis server and reachable, send it. */ 721 /* if it's a vis server and reachable, send it. */
750 if (!(orig_node->flags & VIS_SERVER)) 722 if (!(orig_node->flags & BATADV_VIS_SERVER))
751 continue; 723 continue;
752 724
753 router = orig_node_get_router(orig_node); 725 router = batadv_orig_node_get_router(orig_node);
754 if (!router) 726 if (!router)
755 continue; 727 continue;
756 728
757 /* don't send it if we already received the packet from 729 /* don't send it if we already received the packet from
758 * this node. */ 730 * this node.
759 if (recv_list_is_in(bat_priv, &info->recv_list, 731 */
760 orig_node->orig)) { 732 if (batadv_recv_list_is_in(bat_priv, &info->recv_list,
761 neigh_node_free_ref(router); 733 orig_node->orig)) {
734 batadv_neigh_node_free_ref(router);
762 continue; 735 continue;
763 } 736 }
764 737
@@ -766,57 +739,59 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
766 hard_iface = router->if_incoming; 739 hard_iface = router->if_incoming;
767 memcpy(dstaddr, router->addr, ETH_ALEN); 740 memcpy(dstaddr, router->addr, ETH_ALEN);
768 741
769 neigh_node_free_ref(router); 742 batadv_neigh_node_free_ref(router);
770 743
771 skb = skb_clone(info->skb_packet, GFP_ATOMIC); 744 skb = skb_clone(info->skb_packet, GFP_ATOMIC);
772 if (skb) 745 if (skb)
773 send_skb_packet(skb, hard_iface, dstaddr); 746 batadv_send_skb_packet(skb, hard_iface,
747 dstaddr);
774 748
775 } 749 }
776 rcu_read_unlock(); 750 rcu_read_unlock();
777 } 751 }
778} 752}
779 753
780static void unicast_vis_packet(struct bat_priv *bat_priv, 754static void batadv_unicast_vis_packet(struct batadv_priv *bat_priv,
781 struct vis_info *info) 755 struct batadv_vis_info *info)
782{ 756{
783 struct orig_node *orig_node; 757 struct batadv_orig_node *orig_node;
784 struct neigh_node *router = NULL; 758 struct batadv_neigh_node *router = NULL;
785 struct sk_buff *skb; 759 struct sk_buff *skb;
786 struct vis_packet *packet; 760 struct batadv_vis_packet *packet;
787 761
788 packet = (struct vis_packet *)info->skb_packet->data; 762 packet = (struct batadv_vis_packet *)info->skb_packet->data;
789 763
790 orig_node = orig_hash_find(bat_priv, packet->target_orig); 764 orig_node = batadv_orig_hash_find(bat_priv, packet->target_orig);
791 if (!orig_node) 765 if (!orig_node)
792 goto out; 766 goto out;
793 767
794 router = orig_node_get_router(orig_node); 768 router = batadv_orig_node_get_router(orig_node);
795 if (!router) 769 if (!router)
796 goto out; 770 goto out;
797 771
798 skb = skb_clone(info->skb_packet, GFP_ATOMIC); 772 skb = skb_clone(info->skb_packet, GFP_ATOMIC);
799 if (skb) 773 if (skb)
800 send_skb_packet(skb, router->if_incoming, router->addr); 774 batadv_send_skb_packet(skb, router->if_incoming, router->addr);
801 775
802out: 776out:
803 if (router) 777 if (router)
804 neigh_node_free_ref(router); 778 batadv_neigh_node_free_ref(router);
805 if (orig_node) 779 if (orig_node)
806 orig_node_free_ref(orig_node); 780 batadv_orig_node_free_ref(orig_node);
807} 781}
808 782
809/* only send one vis packet. called from send_vis_packets() */ 783/* only send one vis packet. called from batadv_send_vis_packets() */
810static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info) 784static void batadv_send_vis_packet(struct batadv_priv *bat_priv,
785 struct batadv_vis_info *info)
811{ 786{
812 struct hard_iface *primary_if; 787 struct batadv_hard_iface *primary_if;
813 struct vis_packet *packet; 788 struct batadv_vis_packet *packet;
814 789
815 primary_if = primary_if_get_selected(bat_priv); 790 primary_if = batadv_primary_if_get_selected(bat_priv);
816 if (!primary_if) 791 if (!primary_if)
817 goto out; 792 goto out;
818 793
819 packet = (struct vis_packet *)info->skb_packet->data; 794 packet = (struct batadv_vis_packet *)info->skb_packet->data;
820 if (packet->header.ttl < 2) { 795 if (packet->header.ttl < 2) {
821 pr_debug("Error - can't send vis packet: ttl exceeded\n"); 796 pr_debug("Error - can't send vis packet: ttl exceeded\n");
822 goto out; 797 goto out;
@@ -826,31 +801,31 @@ static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
826 packet->header.ttl--; 801 packet->header.ttl--;
827 802
828 if (is_broadcast_ether_addr(packet->target_orig)) 803 if (is_broadcast_ether_addr(packet->target_orig))
829 broadcast_vis_packet(bat_priv, info); 804 batadv_broadcast_vis_packet(bat_priv, info);
830 else 805 else
831 unicast_vis_packet(bat_priv, info); 806 batadv_unicast_vis_packet(bat_priv, info);
832 packet->header.ttl++; /* restore TTL */ 807 packet->header.ttl++; /* restore TTL */
833 808
834out: 809out:
835 if (primary_if) 810 if (primary_if)
836 hardif_free_ref(primary_if); 811 batadv_hardif_free_ref(primary_if);
837} 812}
838 813
839/* called from timer; send (and maybe generate) vis packet. */ 814/* called from timer; send (and maybe generate) vis packet. */
840static void send_vis_packets(struct work_struct *work) 815static void batadv_send_vis_packets(struct work_struct *work)
841{ 816{
842 struct delayed_work *delayed_work = 817 struct delayed_work *delayed_work =
843 container_of(work, struct delayed_work, work); 818 container_of(work, struct delayed_work, work);
844 struct bat_priv *bat_priv = 819 struct batadv_priv *bat_priv;
845 container_of(delayed_work, struct bat_priv, vis_work); 820 struct batadv_vis_info *info;
846 struct vis_info *info;
847 821
822 bat_priv = container_of(delayed_work, struct batadv_priv, vis_work);
848 spin_lock_bh(&bat_priv->vis_hash_lock); 823 spin_lock_bh(&bat_priv->vis_hash_lock);
849 purge_vis_packets(bat_priv); 824 batadv_purge_vis_packets(bat_priv);
850 825
851 if (generate_vis_packet(bat_priv) == 0) { 826 if (batadv_generate_vis_packet(bat_priv) == 0) {
852 /* schedule if generation was successful */ 827 /* schedule if generation was successful */
853 send_list_add(bat_priv, bat_priv->my_vis_info); 828 batadv_send_list_add(bat_priv, bat_priv->my_vis_info);
854 } 829 }
855 830
856 while (!list_empty(&bat_priv->vis_send_list)) { 831 while (!list_empty(&bat_priv->vis_send_list)) {
@@ -860,98 +835,103 @@ static void send_vis_packets(struct work_struct *work)
860 kref_get(&info->refcount); 835 kref_get(&info->refcount);
861 spin_unlock_bh(&bat_priv->vis_hash_lock); 836 spin_unlock_bh(&bat_priv->vis_hash_lock);
862 837
863 send_vis_packet(bat_priv, info); 838 batadv_send_vis_packet(bat_priv, info);
864 839
865 spin_lock_bh(&bat_priv->vis_hash_lock); 840 spin_lock_bh(&bat_priv->vis_hash_lock);
866 send_list_del(info); 841 batadv_send_list_del(info);
867 kref_put(&info->refcount, free_info); 842 kref_put(&info->refcount, batadv_free_info);
868 } 843 }
869 spin_unlock_bh(&bat_priv->vis_hash_lock); 844 spin_unlock_bh(&bat_priv->vis_hash_lock);
870 start_vis_timer(bat_priv); 845 batadv_start_vis_timer(bat_priv);
871} 846}
872 847
873/* init the vis server. this may only be called when if_list is already 848/* init the vis server. this may only be called when if_list is already
874 * initialized (e.g. bat0 is initialized, interfaces have been added) */ 849 * initialized (e.g. bat0 is initialized, interfaces have been added)
875int vis_init(struct bat_priv *bat_priv) 850 */
851int batadv_vis_init(struct batadv_priv *bat_priv)
876{ 852{
877 struct vis_packet *packet; 853 struct batadv_vis_packet *packet;
878 int hash_added; 854 int hash_added;
855 unsigned int len;
856 unsigned long first_seen;
857 struct sk_buff *tmp_skb;
879 858
880 if (bat_priv->vis_hash) 859 if (bat_priv->vis_hash)
881 return 1; 860 return 0;
882 861
883 spin_lock_bh(&bat_priv->vis_hash_lock); 862 spin_lock_bh(&bat_priv->vis_hash_lock);
884 863
885 bat_priv->vis_hash = hash_new(256); 864 bat_priv->vis_hash = batadv_hash_new(256);
886 if (!bat_priv->vis_hash) { 865 if (!bat_priv->vis_hash) {
887 pr_err("Can't initialize vis_hash\n"); 866 pr_err("Can't initialize vis_hash\n");
888 goto err; 867 goto err;
889 } 868 }
890 869
891 bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC); 870 bat_priv->my_vis_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
892 if (!bat_priv->my_vis_info) 871 if (!bat_priv->my_vis_info)
893 goto err; 872 goto err;
894 873
895 bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) + 874 len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN;
896 MAX_VIS_PACKET_SIZE + 875 bat_priv->my_vis_info->skb_packet = dev_alloc_skb(len);
897 ETH_HLEN);
898 if (!bat_priv->my_vis_info->skb_packet) 876 if (!bat_priv->my_vis_info->skb_packet)
899 goto free_info; 877 goto free_info;
900 878
901 skb_reserve(bat_priv->my_vis_info->skb_packet, ETH_HLEN); 879 skb_reserve(bat_priv->my_vis_info->skb_packet, ETH_HLEN);
902 packet = (struct vis_packet *)skb_put(bat_priv->my_vis_info->skb_packet, 880 tmp_skb = bat_priv->my_vis_info->skb_packet;
903 sizeof(*packet)); 881 packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet));
904 882
905 /* prefill the vis info */ 883 /* prefill the vis info */
906 bat_priv->my_vis_info->first_seen = jiffies - 884 first_seen = jiffies - msecs_to_jiffies(BATADV_VIS_INTERVAL);
907 msecs_to_jiffies(VIS_INTERVAL); 885 bat_priv->my_vis_info->first_seen = first_seen;
908 INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list); 886 INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list);
909 INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list); 887 INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
910 kref_init(&bat_priv->my_vis_info->refcount); 888 kref_init(&bat_priv->my_vis_info->refcount);
911 bat_priv->my_vis_info->bat_priv = bat_priv; 889 bat_priv->my_vis_info->bat_priv = bat_priv;
912 packet->header.version = COMPAT_VERSION; 890 packet->header.version = BATADV_COMPAT_VERSION;
913 packet->header.packet_type = BAT_VIS; 891 packet->header.packet_type = BATADV_VIS;
914 packet->header.ttl = TTL; 892 packet->header.ttl = BATADV_TTL;
915 packet->seqno = 0; 893 packet->seqno = 0;
894 packet->reserved = 0;
916 packet->entries = 0; 895 packet->entries = 0;
917 896
918 INIT_LIST_HEAD(&bat_priv->vis_send_list); 897 INIT_LIST_HEAD(&bat_priv->vis_send_list);
919 898
920 hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, 899 hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp,
921 bat_priv->my_vis_info, 900 batadv_vis_info_choose,
922 &bat_priv->my_vis_info->hash_entry); 901 bat_priv->my_vis_info,
902 &bat_priv->my_vis_info->hash_entry);
923 if (hash_added != 0) { 903 if (hash_added != 0) {
924 pr_err("Can't add own vis packet into hash\n"); 904 pr_err("Can't add own vis packet into hash\n");
925 /* not in hash, need to remove it manually. */ 905 /* not in hash, need to remove it manually. */
926 kref_put(&bat_priv->my_vis_info->refcount, free_info); 906 kref_put(&bat_priv->my_vis_info->refcount, batadv_free_info);
927 goto err; 907 goto err;
928 } 908 }
929 909
930 spin_unlock_bh(&bat_priv->vis_hash_lock); 910 spin_unlock_bh(&bat_priv->vis_hash_lock);
931 start_vis_timer(bat_priv); 911 batadv_start_vis_timer(bat_priv);
932 return 1; 912 return 0;
933 913
934free_info: 914free_info:
935 kfree(bat_priv->my_vis_info); 915 kfree(bat_priv->my_vis_info);
936 bat_priv->my_vis_info = NULL; 916 bat_priv->my_vis_info = NULL;
937err: 917err:
938 spin_unlock_bh(&bat_priv->vis_hash_lock); 918 spin_unlock_bh(&bat_priv->vis_hash_lock);
939 vis_quit(bat_priv); 919 batadv_vis_quit(bat_priv);
940 return 0; 920 return -ENOMEM;
941} 921}
942 922
943/* Decrease the reference count on a hash item info */ 923/* Decrease the reference count on a hash item info */
944static void free_info_ref(struct hlist_node *node, void *arg) 924static void batadv_free_info_ref(struct hlist_node *node, void *arg)
945{ 925{
946 struct vis_info *info; 926 struct batadv_vis_info *info;
947 927
948 info = container_of(node, struct vis_info, hash_entry); 928 info = container_of(node, struct batadv_vis_info, hash_entry);
949 send_list_del(info); 929 batadv_send_list_del(info);
950 kref_put(&info->refcount, free_info); 930 kref_put(&info->refcount, batadv_free_info);
951} 931}
952 932
953/* shutdown vis-server */ 933/* shutdown vis-server */
954void vis_quit(struct bat_priv *bat_priv) 934void batadv_vis_quit(struct batadv_priv *bat_priv)
955{ 935{
956 if (!bat_priv->vis_hash) 936 if (!bat_priv->vis_hash)
957 return; 937 return;
@@ -960,16 +940,16 @@ void vis_quit(struct bat_priv *bat_priv)
960 940
961 spin_lock_bh(&bat_priv->vis_hash_lock); 941 spin_lock_bh(&bat_priv->vis_hash_lock);
962 /* properly remove, kill timers ... */ 942 /* properly remove, kill timers ... */
963 hash_delete(bat_priv->vis_hash, free_info_ref, NULL); 943 batadv_hash_delete(bat_priv->vis_hash, batadv_free_info_ref, NULL);
964 bat_priv->vis_hash = NULL; 944 bat_priv->vis_hash = NULL;
965 bat_priv->my_vis_info = NULL; 945 bat_priv->my_vis_info = NULL;
966 spin_unlock_bh(&bat_priv->vis_hash_lock); 946 spin_unlock_bh(&bat_priv->vis_hash_lock);
967} 947}
968 948
969/* schedule packets for (re)transmission */ 949/* schedule packets for (re)transmission */
970static void start_vis_timer(struct bat_priv *bat_priv) 950static void batadv_start_vis_timer(struct batadv_priv *bat_priv)
971{ 951{
972 INIT_DELAYED_WORK(&bat_priv->vis_work, send_vis_packets); 952 INIT_DELAYED_WORK(&bat_priv->vis_work, batadv_send_vis_packets);
973 queue_delayed_work(bat_event_workqueue, &bat_priv->vis_work, 953 queue_delayed_work(batadv_event_workqueue, &bat_priv->vis_work,
974 msecs_to_jiffies(VIS_INTERVAL)); 954 msecs_to_jiffies(BATADV_VIS_INTERVAL));
975} 955}
diff --git a/net/batman-adv/vis.h b/net/batman-adv/vis.h
index ee2e46e5347..84e716ed896 100644
--- a/net/batman-adv/vis.h
+++ b/net/batman-adv/vis.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2008-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2008-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Simon Wunderlich, Marek Lindner 3 * Simon Wunderlich, Marek Lindner
5 * 4 *
@@ -16,23 +15,22 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#ifndef _NET_BATMAN_ADV_VIS_H_ 20#ifndef _NET_BATMAN_ADV_VIS_H_
23#define _NET_BATMAN_ADV_VIS_H_ 21#define _NET_BATMAN_ADV_VIS_H_
24 22
25#define VIS_TIMEOUT 200000 /* timeout of vis packets 23/* timeout of vis packets in miliseconds */
26 * in miliseconds */ 24#define BATADV_VIS_TIMEOUT 200000
27 25
28int vis_seq_print_text(struct seq_file *seq, void *offset); 26int batadv_vis_seq_print_text(struct seq_file *seq, void *offset);
29void receive_server_sync_packet(struct bat_priv *bat_priv, 27void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
30 struct vis_packet *vis_packet, 28 struct batadv_vis_packet *vis_packet,
31 int vis_info_len); 29 int vis_info_len);
32void receive_client_update_packet(struct bat_priv *bat_priv, 30void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
33 struct vis_packet *vis_packet, 31 struct batadv_vis_packet *vis_packet,
34 int vis_info_len); 32 int vis_info_len);
35int vis_init(struct bat_priv *bat_priv); 33int batadv_vis_init(struct batadv_priv *bat_priv);
36void vis_quit(struct bat_priv *bat_priv); 34void batadv_vis_quit(struct batadv_priv *bat_priv);
37 35
38#endif /* _NET_BATMAN_ADV_VIS_H_ */ 36#endif /* _NET_BATMAN_ADV_VIS_H_ */
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 2dc5a5700f5..fa6d94a4602 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_BT_CMTP) += cmtp/
9obj-$(CONFIG_BT_HIDP) += hidp/ 9obj-$(CONFIG_BT_HIDP) += hidp/
10 10
11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ 11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
12 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o 12 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
13 a2mp.o
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
new file mode 100644
index 00000000000..fb93250b393
--- /dev/null
+++ b/net/bluetooth/a2mp.c
@@ -0,0 +1,568 @@
1/*
2 Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
3 Copyright (c) 2011,2012 Intel Corp.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License version 2 and
7 only version 2 as published by the Free Software Foundation.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13*/
14
15#include <net/bluetooth/bluetooth.h>
16#include <net/bluetooth/hci_core.h>
17#include <net/bluetooth/l2cap.h>
18#include <net/bluetooth/a2mp.h>
19
20/* A2MP build & send command helper functions */
21static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data)
22{
23 struct a2mp_cmd *cmd;
24 int plen;
25
26 plen = sizeof(*cmd) + len;
27 cmd = kzalloc(plen, GFP_KERNEL);
28 if (!cmd)
29 return NULL;
30
31 cmd->code = code;
32 cmd->ident = ident;
33 cmd->len = cpu_to_le16(len);
34
35 memcpy(cmd->data, data, len);
36
37 return cmd;
38}
39
40static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len,
41 void *data)
42{
43 struct l2cap_chan *chan = mgr->a2mp_chan;
44 struct a2mp_cmd *cmd;
45 u16 total_len = len + sizeof(*cmd);
46 struct kvec iv;
47 struct msghdr msg;
48
49 cmd = __a2mp_build(code, ident, len, data);
50 if (!cmd)
51 return;
52
53 iv.iov_base = cmd;
54 iv.iov_len = total_len;
55
56 memset(&msg, 0, sizeof(msg));
57
58 msg.msg_iov = (struct iovec *) &iv;
59 msg.msg_iovlen = 1;
60
61 l2cap_chan_send(chan, &msg, total_len, 0);
62
63 kfree(cmd);
64}
65
66static inline void __a2mp_cl_bredr(struct a2mp_cl *cl)
67{
68 cl->id = 0;
69 cl->type = 0;
70 cl->status = 1;
71}
72
73/* hci_dev_list shall be locked */
74static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl, u8 num_ctrl)
75{
76 int i = 0;
77 struct hci_dev *hdev;
78
79 __a2mp_cl_bredr(cl);
80
81 list_for_each_entry(hdev, &hci_dev_list, list) {
82 /* Iterate through AMP controllers */
83 if (hdev->id == HCI_BREDR_ID)
84 continue;
85
86 /* Starting from second entry */
87 if (++i >= num_ctrl)
88 return;
89
90 cl[i].id = hdev->id;
91 cl[i].type = hdev->amp_type;
92 cl[i].status = hdev->amp_status;
93 }
94}
95
96/* Processing A2MP messages */
97static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb,
98 struct a2mp_cmd *hdr)
99{
100 struct a2mp_cmd_rej *rej = (void *) skb->data;
101
102 if (le16_to_cpu(hdr->len) < sizeof(*rej))
103 return -EINVAL;
104
105 BT_DBG("ident %d reason %d", hdr->ident, le16_to_cpu(rej->reason));
106
107 skb_pull(skb, sizeof(*rej));
108
109 return 0;
110}
111
112static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
113 struct a2mp_cmd *hdr)
114{
115 struct a2mp_discov_req *req = (void *) skb->data;
116 u16 len = le16_to_cpu(hdr->len);
117 struct a2mp_discov_rsp *rsp;
118 u16 ext_feat;
119 u8 num_ctrl;
120
121 if (len < sizeof(*req))
122 return -EINVAL;
123
124 skb_pull(skb, sizeof(*req));
125
126 ext_feat = le16_to_cpu(req->ext_feat);
127
128 BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat);
129
130 /* check that packet is not broken for now */
131 while (ext_feat & A2MP_FEAT_EXT) {
132 if (len < sizeof(ext_feat))
133 return -EINVAL;
134
135 ext_feat = get_unaligned_le16(skb->data);
136 BT_DBG("efm 0x%4.4x", ext_feat);
137 len -= sizeof(ext_feat);
138 skb_pull(skb, sizeof(ext_feat));
139 }
140
141 read_lock(&hci_dev_list_lock);
142
143 num_ctrl = __hci_num_ctrl();
144 len = num_ctrl * sizeof(struct a2mp_cl) + sizeof(*rsp);
145 rsp = kmalloc(len, GFP_ATOMIC);
146 if (!rsp) {
147 read_unlock(&hci_dev_list_lock);
148 return -ENOMEM;
149 }
150
151 rsp->mtu = __constant_cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
152 rsp->ext_feat = 0;
153
154 __a2mp_add_cl(mgr, rsp->cl, num_ctrl);
155
156 read_unlock(&hci_dev_list_lock);
157
158 a2mp_send(mgr, A2MP_DISCOVER_RSP, hdr->ident, len, rsp);
159
160 kfree(rsp);
161 return 0;
162}
163
164static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
165 struct a2mp_cmd *hdr)
166{
167 struct a2mp_cl *cl = (void *) skb->data;
168
169 while (skb->len >= sizeof(*cl)) {
170 BT_DBG("Controller id %d type %d status %d", cl->id, cl->type,
171 cl->status);
172 cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
173 }
174
175 /* TODO send A2MP_CHANGE_RSP */
176
177 return 0;
178}
179
180static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
181 struct a2mp_cmd *hdr)
182{
183 struct a2mp_info_req *req = (void *) skb->data;
184 struct a2mp_info_rsp rsp;
185 struct hci_dev *hdev;
186
187 if (le16_to_cpu(hdr->len) < sizeof(*req))
188 return -EINVAL;
189
190 BT_DBG("id %d", req->id);
191
192 rsp.id = req->id;
193 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
194
195 hdev = hci_dev_get(req->id);
196 if (hdev && hdev->amp_type != HCI_BREDR) {
197 rsp.status = 0;
198 rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
199 rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
200 rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
201 rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
202 rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
203 }
204
205 if (hdev)
206 hci_dev_put(hdev);
207
208 a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp), &rsp);
209
210 skb_pull(skb, sizeof(*req));
211 return 0;
212}
213
214static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
215 struct a2mp_cmd *hdr)
216{
217 struct a2mp_amp_assoc_req *req = (void *) skb->data;
218 struct hci_dev *hdev;
219
220 if (le16_to_cpu(hdr->len) < sizeof(*req))
221 return -EINVAL;
222
223 BT_DBG("id %d", req->id);
224
225 hdev = hci_dev_get(req->id);
226 if (!hdev || hdev->amp_type == HCI_BREDR) {
227 struct a2mp_amp_assoc_rsp rsp;
228 rsp.id = req->id;
229 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
230
231 a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp),
232 &rsp);
233 goto clean;
234 }
235
236 /* Placeholder for HCI Read AMP Assoc */
237
238clean:
239 if (hdev)
240 hci_dev_put(hdev);
241
242 skb_pull(skb, sizeof(*req));
243 return 0;
244}
245
246static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
247 struct a2mp_cmd *hdr)
248{
249 struct a2mp_physlink_req *req = (void *) skb->data;
250
251 struct a2mp_physlink_rsp rsp;
252 struct hci_dev *hdev;
253
254 if (le16_to_cpu(hdr->len) < sizeof(*req))
255 return -EINVAL;
256
257 BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
258
259 rsp.local_id = req->remote_id;
260 rsp.remote_id = req->local_id;
261
262 hdev = hci_dev_get(req->remote_id);
263 if (!hdev || hdev->amp_type != HCI_AMP) {
264 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
265 goto send_rsp;
266 }
267
268 /* TODO process physlink create */
269
270 rsp.status = A2MP_STATUS_SUCCESS;
271
272send_rsp:
273 if (hdev)
274 hci_dev_put(hdev);
275
276 a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident, sizeof(rsp),
277 &rsp);
278
279 skb_pull(skb, le16_to_cpu(hdr->len));
280 return 0;
281}
282
283static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
284 struct a2mp_cmd *hdr)
285{
286 struct a2mp_physlink_req *req = (void *) skb->data;
287 struct a2mp_physlink_rsp rsp;
288 struct hci_dev *hdev;
289
290 if (le16_to_cpu(hdr->len) < sizeof(*req))
291 return -EINVAL;
292
293 BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id);
294
295 rsp.local_id = req->remote_id;
296 rsp.remote_id = req->local_id;
297 rsp.status = A2MP_STATUS_SUCCESS;
298
299 hdev = hci_dev_get(req->local_id);
300 if (!hdev) {
301 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
302 goto send_rsp;
303 }
304
305 /* TODO Disconnect Phys Link here */
306
307 hci_dev_put(hdev);
308
309send_rsp:
310 a2mp_send(mgr, A2MP_DISCONNPHYSLINK_RSP, hdr->ident, sizeof(rsp), &rsp);
311
312 skb_pull(skb, sizeof(*req));
313 return 0;
314}
315
316static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
317 struct a2mp_cmd *hdr)
318{
319 BT_DBG("ident %d code %d", hdr->ident, hdr->code);
320
321 skb_pull(skb, le16_to_cpu(hdr->len));
322 return 0;
323}
324
325/* Handle A2MP signalling */
326static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
327{
328 struct a2mp_cmd *hdr = (void *) skb->data;
329 struct amp_mgr *mgr = chan->data;
330 int err = 0;
331
332 amp_mgr_get(mgr);
333
334 while (skb->len >= sizeof(*hdr)) {
335 struct a2mp_cmd *hdr = (void *) skb->data;
336 u16 len = le16_to_cpu(hdr->len);
337
338 BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, len);
339
340 skb_pull(skb, sizeof(*hdr));
341
342 if (len > skb->len || !hdr->ident) {
343 err = -EINVAL;
344 break;
345 }
346
347 mgr->ident = hdr->ident;
348
349 switch (hdr->code) {
350 case A2MP_COMMAND_REJ:
351 a2mp_command_rej(mgr, skb, hdr);
352 break;
353
354 case A2MP_DISCOVER_REQ:
355 err = a2mp_discover_req(mgr, skb, hdr);
356 break;
357
358 case A2MP_CHANGE_NOTIFY:
359 err = a2mp_change_notify(mgr, skb, hdr);
360 break;
361
362 case A2MP_GETINFO_REQ:
363 err = a2mp_getinfo_req(mgr, skb, hdr);
364 break;
365
366 case A2MP_GETAMPASSOC_REQ:
367 err = a2mp_getampassoc_req(mgr, skb, hdr);
368 break;
369
370 case A2MP_CREATEPHYSLINK_REQ:
371 err = a2mp_createphyslink_req(mgr, skb, hdr);
372 break;
373
374 case A2MP_DISCONNPHYSLINK_REQ:
375 err = a2mp_discphyslink_req(mgr, skb, hdr);
376 break;
377
378 case A2MP_CHANGE_RSP:
379 case A2MP_DISCOVER_RSP:
380 case A2MP_GETINFO_RSP:
381 case A2MP_GETAMPASSOC_RSP:
382 case A2MP_CREATEPHYSLINK_RSP:
383 case A2MP_DISCONNPHYSLINK_RSP:
384 err = a2mp_cmd_rsp(mgr, skb, hdr);
385 break;
386
387 default:
388 BT_ERR("Unknown A2MP sig cmd 0x%2.2x", hdr->code);
389 err = -EINVAL;
390 break;
391 }
392 }
393
394 if (err) {
395 struct a2mp_cmd_rej rej;
396 rej.reason = __constant_cpu_to_le16(0);
397
398 BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err);
399
400 a2mp_send(mgr, A2MP_COMMAND_REJ, hdr->ident, sizeof(rej),
401 &rej);
402 }
403
404 /* Always free skb and return success error code to prevent
405 from sending L2CAP Disconnect over A2MP channel */
406 kfree_skb(skb);
407
408 amp_mgr_put(mgr);
409
410 return 0;
411}
412
413static void a2mp_chan_close_cb(struct l2cap_chan *chan)
414{
415 l2cap_chan_destroy(chan);
416}
417
418static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state)
419{
420 struct amp_mgr *mgr = chan->data;
421
422 if (!mgr)
423 return;
424
425 BT_DBG("chan %p state %s", chan, state_to_string(state));
426
427 chan->state = state;
428
429 switch (state) {
430 case BT_CLOSED:
431 if (mgr)
432 amp_mgr_put(mgr);
433 break;
434 }
435}
436
437static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan,
438 unsigned long len, int nb)
439{
440 return bt_skb_alloc(len, GFP_KERNEL);
441}
442
443static struct l2cap_ops a2mp_chan_ops = {
444 .name = "L2CAP A2MP channel",
445 .recv = a2mp_chan_recv_cb,
446 .close = a2mp_chan_close_cb,
447 .state_change = a2mp_chan_state_change_cb,
448 .alloc_skb = a2mp_chan_alloc_skb_cb,
449
450 /* Not implemented for A2MP */
451 .new_connection = l2cap_chan_no_new_connection,
452 .teardown = l2cap_chan_no_teardown,
453 .ready = l2cap_chan_no_ready,
454};
455
456static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn)
457{
458 struct l2cap_chan *chan;
459 int err;
460
461 chan = l2cap_chan_create();
462 if (!chan)
463 return NULL;
464
465 BT_DBG("chan %p", chan);
466
467 chan->chan_type = L2CAP_CHAN_CONN_FIX_A2MP;
468 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
469
470 chan->ops = &a2mp_chan_ops;
471
472 l2cap_chan_set_defaults(chan);
473 chan->remote_max_tx = chan->max_tx;
474 chan->remote_tx_win = chan->tx_win;
475
476 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
477 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
478
479 skb_queue_head_init(&chan->tx_q);
480
481 chan->mode = L2CAP_MODE_ERTM;
482
483 err = l2cap_ertm_init(chan);
484 if (err < 0) {
485 l2cap_chan_del(chan, 0);
486 return NULL;
487 }
488
489 chan->conf_state = 0;
490
491 l2cap_chan_add(conn, chan);
492
493 chan->remote_mps = chan->omtu;
494 chan->mps = chan->omtu;
495
496 chan->state = BT_CONNECTED;
497
498 return chan;
499}
500
501/* AMP Manager functions */
502void amp_mgr_get(struct amp_mgr *mgr)
503{
504 BT_DBG("mgr %p", mgr);
505
506 kref_get(&mgr->kref);
507}
508
509static void amp_mgr_destroy(struct kref *kref)
510{
511 struct amp_mgr *mgr = container_of(kref, struct amp_mgr, kref);
512
513 BT_DBG("mgr %p", mgr);
514
515 kfree(mgr);
516}
517
518int amp_mgr_put(struct amp_mgr *mgr)
519{
520 BT_DBG("mgr %p", mgr);
521
522 return kref_put(&mgr->kref, &amp_mgr_destroy);
523}
524
525static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn)
526{
527 struct amp_mgr *mgr;
528 struct l2cap_chan *chan;
529
530 mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
531 if (!mgr)
532 return NULL;
533
534 BT_DBG("conn %p mgr %p", conn, mgr);
535
536 mgr->l2cap_conn = conn;
537
538 chan = a2mp_chan_open(conn);
539 if (!chan) {
540 kfree(mgr);
541 return NULL;
542 }
543
544 mgr->a2mp_chan = chan;
545 chan->data = mgr;
546
547 conn->hcon->amp_mgr = mgr;
548
549 kref_init(&mgr->kref);
550
551 return mgr;
552}
553
554struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
555 struct sk_buff *skb)
556{
557 struct amp_mgr *mgr;
558
559 mgr = amp_mgr_create(conn);
560 if (!mgr) {
561 BT_ERR("Could not create AMP manager");
562 return NULL;
563 }
564
565 BT_DBG("mgr: %p chan %p", mgr, mgr->a2mp_chan);
566
567 return mgr->a2mp_chan;
568}
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 3e18af4dadc..f7db5792ec6 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -25,18 +25,7 @@
25/* Bluetooth address family and sockets. */ 25/* Bluetooth address family and sockets. */
26 26
27#include <linux/module.h> 27#include <linux/module.h>
28
29#include <linux/types.h>
30#include <linux/list.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/sched.h>
34#include <linux/skbuff.h>
35#include <linux/init.h>
36#include <linux/poll.h>
37#include <net/sock.h>
38#include <asm/ioctls.h> 28#include <asm/ioctls.h>
39#include <linux/kmod.h>
40 29
41#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/bluetooth.h>
42 31
@@ -418,7 +407,8 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
418 return 0; 407 return 0;
419} 408}
420 409
421unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait) 410unsigned int bt_sock_poll(struct file *file, struct socket *sock,
411 poll_table *wait)
422{ 412{
423 struct sock *sk = sock->sk; 413 struct sock *sk = sock->sk;
424 unsigned int mask = 0; 414 unsigned int mask = 0;
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 031d7d65675..4a6620bc157 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -26,26 +26,9 @@
26*/ 26*/
27 27
28#include <linux/module.h> 28#include <linux/module.h>
29
30#include <linux/kernel.h>
31#include <linux/sched.h>
32#include <linux/signal.h>
33#include <linux/init.h>
34#include <linux/wait.h>
35#include <linux/freezer.h>
36#include <linux/errno.h>
37#include <linux/net.h>
38#include <linux/slab.h>
39#include <linux/kthread.h> 29#include <linux/kthread.h>
40#include <net/sock.h>
41
42#include <linux/socket.h>
43#include <linux/file.h> 30#include <linux/file.h>
44
45#include <linux/netdevice.h>
46#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
47#include <linux/skbuff.h>
48
49#include <asm/unaligned.h> 32#include <asm/unaligned.h>
50 33
51#include <net/bluetooth/bluetooth.h> 34#include <net/bluetooth/bluetooth.h>
@@ -306,7 +289,7 @@ static u8 __bnep_rx_hlen[] = {
306 ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */ 289 ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */
307}; 290};
308 291
309static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) 292static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
310{ 293{
311 struct net_device *dev = s->dev; 294 struct net_device *dev = s->dev;
312 struct sk_buff *nskb; 295 struct sk_buff *nskb;
@@ -404,7 +387,7 @@ static u8 __bnep_tx_types[] = {
404 BNEP_COMPRESSED 387 BNEP_COMPRESSED
405}; 388};
406 389
407static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb) 390static int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
408{ 391{
409 struct ethhdr *eh = (void *) skb->data; 392 struct ethhdr *eh = (void *) skb->data;
410 struct socket *sock = s->sock; 393 struct socket *sock = s->sock;
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index bc4086480d9..98f86f91d47 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -25,16 +25,8 @@
25 SOFTWARE IS DISCLAIMED. 25 SOFTWARE IS DISCLAIMED.
26*/ 26*/
27 27
28#include <linux/module.h> 28#include <linux/export.h>
29#include <linux/slab.h>
30
31#include <linux/socket.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/wait.h>
36
37#include <asm/unaligned.h>
38 30
39#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
40#include <net/bluetooth/hci_core.h> 32#include <net/bluetooth/hci_core.h>
@@ -128,7 +120,7 @@ static void bnep_net_timeout(struct net_device *dev)
128} 120}
129 121
130#ifdef CONFIG_BT_BNEP_MC_FILTER 122#ifdef CONFIG_BT_BNEP_MC_FILTER
131static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) 123static int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
132{ 124{
133 struct ethhdr *eh = (void *) skb->data; 125 struct ethhdr *eh = (void *) skb->data;
134 126
@@ -140,7 +132,7 @@ static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s
140 132
141#ifdef CONFIG_BT_BNEP_PROTO_FILTER 133#ifdef CONFIG_BT_BNEP_PROTO_FILTER
142/* Determine ether protocol. Based on eth_type_trans. */ 134/* Determine ether protocol. Based on eth_type_trans. */
143static inline u16 bnep_net_eth_proto(struct sk_buff *skb) 135static u16 bnep_net_eth_proto(struct sk_buff *skb)
144{ 136{
145 struct ethhdr *eh = (void *) skb->data; 137 struct ethhdr *eh = (void *) skb->data;
146 u16 proto = ntohs(eh->h_proto); 138 u16 proto = ntohs(eh->h_proto);
@@ -154,7 +146,7 @@ static inline u16 bnep_net_eth_proto(struct sk_buff *skb)
154 return ETH_P_802_2; 146 return ETH_P_802_2;
155} 147}
156 148
157static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s) 149static int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s)
158{ 150{
159 u16 proto = bnep_net_eth_proto(skb); 151 u16 proto = bnep_net_eth_proto(skb);
160 struct bnep_proto_filter *f = s->proto_filter; 152 struct bnep_proto_filter *f = s->proto_filter;
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 180bfc45810..5e5f5b410e0 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -24,24 +24,8 @@
24 SOFTWARE IS DISCLAIMED. 24 SOFTWARE IS DISCLAIMED.
25*/ 25*/
26 26
27#include <linux/module.h> 27#include <linux/export.h>
28
29#include <linux/types.h>
30#include <linux/capability.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/poll.h>
34#include <linux/fcntl.h>
35#include <linux/skbuff.h>
36#include <linux/socket.h>
37#include <linux/ioctl.h>
38#include <linux/file.h> 28#include <linux/file.h>
39#include <linux/init.h>
40#include <linux/compat.h>
41#include <linux/gfp.h>
42#include <linux/uaccess.h>
43#include <net/sock.h>
44
45 29
46#include "bnep.h" 30#include "bnep.h"
47 31
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 3f18a6ed973..2fcced377e5 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -24,24 +24,11 @@
24 24
25/* Bluetooth HCI connection handling. */ 25/* Bluetooth HCI connection handling. */
26 26
27#include <linux/module.h> 27#include <linux/export.h>
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/poll.h>
34#include <linux/fcntl.h>
35#include <linux/init.h>
36#include <linux/skbuff.h>
37#include <linux/interrupt.h>
38#include <net/sock.h>
39
40#include <linux/uaccess.h>
41#include <asm/unaligned.h>
42 28
43#include <net/bluetooth/bluetooth.h> 29#include <net/bluetooth/bluetooth.h>
44#include <net/bluetooth/hci_core.h> 30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/a2mp.h>
45 32
46static void hci_le_connect(struct hci_conn *conn) 33static void hci_le_connect(struct hci_conn *conn)
47{ 34{
@@ -54,15 +41,15 @@ static void hci_le_connect(struct hci_conn *conn)
54 conn->sec_level = BT_SECURITY_LOW; 41 conn->sec_level = BT_SECURITY_LOW;
55 42
56 memset(&cp, 0, sizeof(cp)); 43 memset(&cp, 0, sizeof(cp));
57 cp.scan_interval = cpu_to_le16(0x0060); 44 cp.scan_interval = __constant_cpu_to_le16(0x0060);
58 cp.scan_window = cpu_to_le16(0x0030); 45 cp.scan_window = __constant_cpu_to_le16(0x0030);
59 bacpy(&cp.peer_addr, &conn->dst); 46 bacpy(&cp.peer_addr, &conn->dst);
60 cp.peer_addr_type = conn->dst_type; 47 cp.peer_addr_type = conn->dst_type;
61 cp.conn_interval_min = cpu_to_le16(0x0028); 48 cp.conn_interval_min = __constant_cpu_to_le16(0x0028);
62 cp.conn_interval_max = cpu_to_le16(0x0038); 49 cp.conn_interval_max = __constant_cpu_to_le16(0x0038);
63 cp.supervision_timeout = cpu_to_le16(0x002a); 50 cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
64 cp.min_ce_len = cpu_to_le16(0x0000); 51 cp.min_ce_len = __constant_cpu_to_le16(0x0000);
65 cp.max_ce_len = cpu_to_le16(0x0000); 52 cp.max_ce_len = __constant_cpu_to_le16(0x0000);
66 53
67 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); 54 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
68} 55}
@@ -99,7 +86,7 @@ void hci_acl_connect(struct hci_conn *conn)
99 cp.pscan_rep_mode = ie->data.pscan_rep_mode; 86 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
100 cp.pscan_mode = ie->data.pscan_mode; 87 cp.pscan_mode = ie->data.pscan_mode;
101 cp.clock_offset = ie->data.clock_offset | 88 cp.clock_offset = ie->data.clock_offset |
102 cpu_to_le16(0x8000); 89 __constant_cpu_to_le16(0x8000);
103 } 90 }
104 91
105 memcpy(conn->dev_class, ie->data.dev_class, 3); 92 memcpy(conn->dev_class, ie->data.dev_class, 3);
@@ -175,9 +162,9 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
175 cp.handle = cpu_to_le16(handle); 162 cp.handle = cpu_to_le16(handle);
176 cp.pkt_type = cpu_to_le16(conn->pkt_type); 163 cp.pkt_type = cpu_to_le16(conn->pkt_type);
177 164
178 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 165 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
179 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 166 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
180 cp.max_latency = cpu_to_le16(0xffff); 167 cp.max_latency = __constant_cpu_to_le16(0xffff);
181 cp.voice_setting = cpu_to_le16(hdev->voice_setting); 168 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
182 cp.retrans_effort = 0xff; 169 cp.retrans_effort = 0xff;
183 170
@@ -185,7 +172,7 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
185} 172}
186 173
187void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, 174void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
188 u16 latency, u16 to_multiplier) 175 u16 latency, u16 to_multiplier)
189{ 176{
190 struct hci_cp_le_conn_update cp; 177 struct hci_cp_le_conn_update cp;
191 struct hci_dev *hdev = conn->hdev; 178 struct hci_dev *hdev = conn->hdev;
@@ -197,15 +184,14 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
197 cp.conn_interval_max = cpu_to_le16(max); 184 cp.conn_interval_max = cpu_to_le16(max);
198 cp.conn_latency = cpu_to_le16(latency); 185 cp.conn_latency = cpu_to_le16(latency);
199 cp.supervision_timeout = cpu_to_le16(to_multiplier); 186 cp.supervision_timeout = cpu_to_le16(to_multiplier);
200 cp.min_ce_len = cpu_to_le16(0x0001); 187 cp.min_ce_len = __constant_cpu_to_le16(0x0001);
201 cp.max_ce_len = cpu_to_le16(0x0001); 188 cp.max_ce_len = __constant_cpu_to_le16(0x0001);
202 189
203 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp); 190 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
204} 191}
205EXPORT_SYMBOL(hci_le_conn_update);
206 192
207void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], 193void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
208 __u8 ltk[16]) 194 __u8 ltk[16])
209{ 195{
210 struct hci_dev *hdev = conn->hdev; 196 struct hci_dev *hdev = conn->hdev;
211 struct hci_cp_le_start_enc cp; 197 struct hci_cp_le_start_enc cp;
@@ -221,7 +207,6 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
221 207
222 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp); 208 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
223} 209}
224EXPORT_SYMBOL(hci_le_start_enc);
225 210
226/* Device _must_ be locked */ 211/* Device _must_ be locked */
227void hci_sco_setup(struct hci_conn *conn, __u8 status) 212void hci_sco_setup(struct hci_conn *conn, __u8 status)
@@ -247,7 +232,7 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status)
247static void hci_conn_timeout(struct work_struct *work) 232static void hci_conn_timeout(struct work_struct *work)
248{ 233{
249 struct hci_conn *conn = container_of(work, struct hci_conn, 234 struct hci_conn *conn = container_of(work, struct hci_conn,
250 disc_work.work); 235 disc_work.work);
251 __u8 reason; 236 __u8 reason;
252 237
253 BT_DBG("conn %p state %s", conn, state_to_string(conn->state)); 238 BT_DBG("conn %p state %s", conn, state_to_string(conn->state));
@@ -295,9 +280,9 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
295 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { 280 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
296 struct hci_cp_sniff_subrate cp; 281 struct hci_cp_sniff_subrate cp;
297 cp.handle = cpu_to_le16(conn->handle); 282 cp.handle = cpu_to_le16(conn->handle);
298 cp.max_latency = cpu_to_le16(0); 283 cp.max_latency = __constant_cpu_to_le16(0);
299 cp.min_remote_timeout = cpu_to_le16(0); 284 cp.min_remote_timeout = __constant_cpu_to_le16(0);
300 cp.min_local_timeout = cpu_to_le16(0); 285 cp.min_local_timeout = __constant_cpu_to_le16(0);
301 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); 286 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
302 } 287 }
303 288
@@ -306,8 +291,8 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
306 cp.handle = cpu_to_le16(conn->handle); 291 cp.handle = cpu_to_le16(conn->handle);
307 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); 292 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
308 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); 293 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
309 cp.attempt = cpu_to_le16(4); 294 cp.attempt = __constant_cpu_to_le16(4);
310 cp.timeout = cpu_to_le16(1); 295 cp.timeout = __constant_cpu_to_le16(1);
311 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); 296 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
312 } 297 }
313} 298}
@@ -327,7 +312,7 @@ static void hci_conn_auto_accept(unsigned long arg)
327 struct hci_dev *hdev = conn->hdev; 312 struct hci_dev *hdev = conn->hdev;
328 313
329 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst), 314 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
330 &conn->dst); 315 &conn->dst);
331} 316}
332 317
333struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) 318struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
@@ -376,7 +361,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
376 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); 361 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
377 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); 362 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
378 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept, 363 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
379 (unsigned long) conn); 364 (unsigned long) conn);
380 365
381 atomic_set(&conn->refcnt, 0); 366 atomic_set(&conn->refcnt, 0);
382 367
@@ -425,9 +410,11 @@ int hci_conn_del(struct hci_conn *conn)
425 } 410 }
426 } 411 }
427 412
428
429 hci_chan_list_flush(conn); 413 hci_chan_list_flush(conn);
430 414
415 if (conn->amp_mgr)
416 amp_mgr_put(conn->amp_mgr);
417
431 hci_conn_hash_del(hdev, conn); 418 hci_conn_hash_del(hdev, conn);
432 if (hdev->notify) 419 if (hdev->notify)
433 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); 420 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
@@ -454,7 +441,8 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
454 read_lock(&hci_dev_list_lock); 441 read_lock(&hci_dev_list_lock);
455 442
456 list_for_each_entry(d, &hci_dev_list, list) { 443 list_for_each_entry(d, &hci_dev_list, list) {
457 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags)) 444 if (!test_bit(HCI_UP, &d->flags) ||
445 test_bit(HCI_RAW, &d->flags))
458 continue; 446 continue;
459 447
460 /* Simple routing: 448 /* Simple routing:
@@ -495,6 +483,11 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
495 if (type == LE_LINK) { 483 if (type == LE_LINK) {
496 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); 484 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
497 if (!le) { 485 if (!le) {
486 le = hci_conn_hash_lookup_state(hdev, LE_LINK,
487 BT_CONNECT);
488 if (le)
489 return ERR_PTR(-EBUSY);
490
498 le = hci_conn_add(hdev, LE_LINK, dst); 491 le = hci_conn_add(hdev, LE_LINK, dst);
499 if (!le) 492 if (!le)
500 return ERR_PTR(-ENOMEM); 493 return ERR_PTR(-ENOMEM);
@@ -545,7 +538,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
545 hci_conn_hold(sco); 538 hci_conn_hold(sco);
546 539
547 if (acl->state == BT_CONNECTED && 540 if (acl->state == BT_CONNECTED &&
548 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { 541 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
549 set_bit(HCI_CONN_POWER_SAVE, &acl->flags); 542 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
550 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON); 543 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
551 544
@@ -560,7 +553,6 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
560 553
561 return sco; 554 return sco;
562} 555}
563EXPORT_SYMBOL(hci_connect);
564 556
565/* Check link security requirement */ 557/* Check link security requirement */
566int hci_conn_check_link_mode(struct hci_conn *conn) 558int hci_conn_check_link_mode(struct hci_conn *conn)
@@ -572,7 +564,6 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
572 564
573 return 1; 565 return 1;
574} 566}
575EXPORT_SYMBOL(hci_conn_check_link_mode);
576 567
577/* Authenticate remote device */ 568/* Authenticate remote device */
578static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) 569static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
@@ -600,7 +591,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
600 591
601 cp.handle = cpu_to_le16(conn->handle); 592 cp.handle = cpu_to_le16(conn->handle);
602 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, 593 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
603 sizeof(cp), &cp); 594 sizeof(cp), &cp);
604 if (conn->key_type != 0xff) 595 if (conn->key_type != 0xff)
605 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 596 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
606 } 597 }
@@ -618,7 +609,7 @@ static void hci_conn_encrypt(struct hci_conn *conn)
618 cp.handle = cpu_to_le16(conn->handle); 609 cp.handle = cpu_to_le16(conn->handle);
619 cp.encrypt = 0x01; 610 cp.encrypt = 0x01;
620 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 611 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
621 &cp); 612 &cp);
622 } 613 }
623} 614}
624 615
@@ -648,8 +639,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
648 /* An unauthenticated combination key has sufficient security for 639 /* An unauthenticated combination key has sufficient security for
649 security level 1 and 2. */ 640 security level 1 and 2. */
650 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION && 641 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
651 (sec_level == BT_SECURITY_MEDIUM || 642 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
652 sec_level == BT_SECURITY_LOW))
653 goto encrypt; 643 goto encrypt;
654 644
655 /* A combination key has always sufficient security for the security 645 /* A combination key has always sufficient security for the security
@@ -657,8 +647,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
657 is generated using maximum PIN code length (16). 647 is generated using maximum PIN code length (16).
658 For pre 2.1 units. */ 648 For pre 2.1 units. */
659 if (conn->key_type == HCI_LK_COMBINATION && 649 if (conn->key_type == HCI_LK_COMBINATION &&
660 (sec_level != BT_SECURITY_HIGH || 650 (sec_level != BT_SECURITY_HIGH || conn->pin_length == 16))
661 conn->pin_length == 16))
662 goto encrypt; 651 goto encrypt;
663 652
664auth: 653auth:
@@ -701,12 +690,11 @@ int hci_conn_change_link_key(struct hci_conn *conn)
701 struct hci_cp_change_conn_link_key cp; 690 struct hci_cp_change_conn_link_key cp;
702 cp.handle = cpu_to_le16(conn->handle); 691 cp.handle = cpu_to_le16(conn->handle);
703 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY, 692 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
704 sizeof(cp), &cp); 693 sizeof(cp), &cp);
705 } 694 }
706 695
707 return 0; 696 return 0;
708} 697}
709EXPORT_SYMBOL(hci_conn_change_link_key);
710 698
711/* Switch role */ 699/* Switch role */
712int hci_conn_switch_role(struct hci_conn *conn, __u8 role) 700int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
@@ -752,7 +740,7 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
752timer: 740timer:
753 if (hdev->idle_timeout > 0) 741 if (hdev->idle_timeout > 0)
754 mod_timer(&conn->idle_timer, 742 mod_timer(&conn->idle_timer,
755 jiffies + msecs_to_jiffies(hdev->idle_timeout)); 743 jiffies + msecs_to_jiffies(hdev->idle_timeout));
756} 744}
757 745
758/* Drop all connection on the device */ 746/* Drop all connection on the device */
@@ -802,7 +790,7 @@ EXPORT_SYMBOL(hci_conn_put_device);
802 790
803int hci_get_conn_list(void __user *arg) 791int hci_get_conn_list(void __user *arg)
804{ 792{
805 register struct hci_conn *c; 793 struct hci_conn *c;
806 struct hci_conn_list_req req, *cl; 794 struct hci_conn_list_req req, *cl;
807 struct hci_conn_info *ci; 795 struct hci_conn_info *ci;
808 struct hci_dev *hdev; 796 struct hci_dev *hdev;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 411ace8e647..08994ecc3b6 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -25,28 +25,10 @@
25 25
26/* Bluetooth HCI core. */ 26/* Bluetooth HCI core. */
27 27
28#include <linux/jiffies.h> 28#include <linux/export.h>
29#include <linux/module.h> 29#include <linux/idr.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
41#include <linux/workqueue.h>
42#include <linux/interrupt.h>
43#include <linux/rfkill.h>
44#include <linux/timer.h>
45#include <linux/crypto.h>
46#include <net/sock.h>
47 30
48#include <linux/uaccess.h> 31#include <linux/rfkill.h>
49#include <asm/unaligned.h>
50 32
51#include <net/bluetooth/bluetooth.h> 33#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h> 34#include <net/bluetooth/hci_core.h>
@@ -65,6 +47,9 @@ DEFINE_RWLOCK(hci_dev_list_lock);
65LIST_HEAD(hci_cb_list); 47LIST_HEAD(hci_cb_list);
66DEFINE_RWLOCK(hci_cb_list_lock); 48DEFINE_RWLOCK(hci_cb_list_lock);
67 49
50/* HCI ID Numbering */
51static DEFINE_IDA(hci_index_ida);
52
68/* ---- HCI notifications ---- */ 53/* ---- HCI notifications ---- */
69 54
70static void hci_notify(struct hci_dev *hdev, int event) 55static void hci_notify(struct hci_dev *hdev, int event)
@@ -124,8 +109,9 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
124} 109}
125 110
126/* Execute request and wait for completion. */ 111/* Execute request and wait for completion. */
127static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 112static int __hci_request(struct hci_dev *hdev,
128 unsigned long opt, __u32 timeout) 113 void (*req)(struct hci_dev *hdev, unsigned long opt),
114 unsigned long opt, __u32 timeout)
129{ 115{
130 DECLARE_WAITQUEUE(wait, current); 116 DECLARE_WAITQUEUE(wait, current);
131 int err = 0; 117 int err = 0;
@@ -166,8 +152,9 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
166 return err; 152 return err;
167} 153}
168 154
169static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 155static int hci_request(struct hci_dev *hdev,
170 unsigned long opt, __u32 timeout) 156 void (*req)(struct hci_dev *hdev, unsigned long opt),
157 unsigned long opt, __u32 timeout)
171{ 158{
172 int ret; 159 int ret;
173 160
@@ -202,7 +189,7 @@ static void bredr_init(struct hci_dev *hdev)
202 /* Mandatory initialization */ 189 /* Mandatory initialization */
203 190
204 /* Reset */ 191 /* Reset */
205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) { 192 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
206 set_bit(HCI_RESET, &hdev->flags); 193 set_bit(HCI_RESET, &hdev->flags);
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); 194 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
208 } 195 }
@@ -235,7 +222,7 @@ static void bredr_init(struct hci_dev *hdev)
235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); 222 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
236 223
237 /* Connection accept timeout ~20 secs */ 224 /* Connection accept timeout ~20 secs */
238 param = cpu_to_le16(0x7d00); 225 param = __constant_cpu_to_le16(0x7d00);
239 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param); 226 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
240 227
241 bacpy(&cp.bdaddr, BDADDR_ANY); 228 bacpy(&cp.bdaddr, BDADDR_ANY);
@@ -417,7 +404,8 @@ static void inquiry_cache_flush(struct hci_dev *hdev)
417 INIT_LIST_HEAD(&cache->resolve); 404 INIT_LIST_HEAD(&cache->resolve);
418} 405}
419 406
420struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) 407struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
408 bdaddr_t *bdaddr)
421{ 409{
422 struct discovery_state *cache = &hdev->discovery; 410 struct discovery_state *cache = &hdev->discovery;
423 struct inquiry_entry *e; 411 struct inquiry_entry *e;
@@ -478,7 +466,7 @@ void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
478 466
479 list_for_each_entry(p, &cache->resolve, list) { 467 list_for_each_entry(p, &cache->resolve, list) {
480 if (p->name_state != NAME_PENDING && 468 if (p->name_state != NAME_PENDING &&
481 abs(p->data.rssi) >= abs(ie->data.rssi)) 469 abs(p->data.rssi) >= abs(ie->data.rssi))
482 break; 470 break;
483 pos = &p->list; 471 pos = &p->list;
484 } 472 }
@@ -503,7 +491,7 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
503 *ssp = true; 491 *ssp = true;
504 492
505 if (ie->name_state == NAME_NEEDED && 493 if (ie->name_state == NAME_NEEDED &&
506 data->rssi != ie->data.rssi) { 494 data->rssi != ie->data.rssi) {
507 ie->data.rssi = data->rssi; 495 ie->data.rssi = data->rssi;
508 hci_inquiry_cache_update_resolve(hdev, ie); 496 hci_inquiry_cache_update_resolve(hdev, ie);
509 } 497 }
@@ -527,7 +515,7 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
527 515
528update: 516update:
529 if (name_known && ie->name_state != NAME_KNOWN && 517 if (name_known && ie->name_state != NAME_KNOWN &&
530 ie->name_state != NAME_PENDING) { 518 ie->name_state != NAME_PENDING) {
531 ie->name_state = NAME_KNOWN; 519 ie->name_state = NAME_KNOWN;
532 list_del(&ie->list); 520 list_del(&ie->list);
533 } 521 }
@@ -605,8 +593,7 @@ int hci_inquiry(void __user *arg)
605 593
606 hci_dev_lock(hdev); 594 hci_dev_lock(hdev);
607 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 595 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
608 inquiry_cache_empty(hdev) || 596 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
609 ir.flags & IREQ_CACHE_FLUSH) {
610 inquiry_cache_flush(hdev); 597 inquiry_cache_flush(hdev);
611 do_inquiry = 1; 598 do_inquiry = 1;
612 } 599 }
@@ -620,7 +607,9 @@ int hci_inquiry(void __user *arg)
620 goto done; 607 goto done;
621 } 608 }
622 609
623 /* for unlimited number of responses we will use buffer with 255 entries */ 610 /* for unlimited number of responses we will use buffer with
611 * 255 entries
612 */
624 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; 613 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
625 614
626 /* cache_dump can't sleep. Therefore we allocate temp buffer and then 615 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
@@ -641,7 +630,7 @@ int hci_inquiry(void __user *arg)
641 if (!copy_to_user(ptr, &ir, sizeof(ir))) { 630 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
642 ptr += sizeof(ir); 631 ptr += sizeof(ir);
643 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * 632 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
644 ir.num_rsp)) 633 ir.num_rsp))
645 err = -EFAULT; 634 err = -EFAULT;
646 } else 635 } else
647 err = -EFAULT; 636 err = -EFAULT;
@@ -702,11 +691,11 @@ int hci_dev_open(__u16 dev)
702 hdev->init_last_cmd = 0; 691 hdev->init_last_cmd = 0;
703 692
704 ret = __hci_request(hdev, hci_init_req, 0, 693 ret = __hci_request(hdev, hci_init_req, 0,
705 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 694 msecs_to_jiffies(HCI_INIT_TIMEOUT));
706 695
707 if (lmp_host_le_capable(hdev)) 696 if (lmp_host_le_capable(hdev))
708 ret = __hci_request(hdev, hci_le_init_req, 0, 697 ret = __hci_request(hdev, hci_le_init_req, 0,
709 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 698 msecs_to_jiffies(HCI_INIT_TIMEOUT));
710 699
711 clear_bit(HCI_INIT, &hdev->flags); 700 clear_bit(HCI_INIT, &hdev->flags);
712 } 701 }
@@ -791,10 +780,10 @@ static int hci_dev_do_close(struct hci_dev *hdev)
791 skb_queue_purge(&hdev->cmd_q); 780 skb_queue_purge(&hdev->cmd_q);
792 atomic_set(&hdev->cmd_cnt, 1); 781 atomic_set(&hdev->cmd_cnt, 1);
793 if (!test_bit(HCI_RAW, &hdev->flags) && 782 if (!test_bit(HCI_RAW, &hdev->flags) &&
794 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) { 783 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
795 set_bit(HCI_INIT, &hdev->flags); 784 set_bit(HCI_INIT, &hdev->flags);
796 __hci_request(hdev, hci_reset_req, 0, 785 __hci_request(hdev, hci_reset_req, 0,
797 msecs_to_jiffies(250)); 786 msecs_to_jiffies(250));
798 clear_bit(HCI_INIT, &hdev->flags); 787 clear_bit(HCI_INIT, &hdev->flags);
799 } 788 }
800 789
@@ -884,7 +873,7 @@ int hci_dev_reset(__u16 dev)
884 873
885 if (!test_bit(HCI_RAW, &hdev->flags)) 874 if (!test_bit(HCI_RAW, &hdev->flags))
886 ret = __hci_request(hdev, hci_reset_req, 0, 875 ret = __hci_request(hdev, hci_reset_req, 0,
887 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 876 msecs_to_jiffies(HCI_INIT_TIMEOUT));
888 877
889done: 878done:
890 hci_req_unlock(hdev); 879 hci_req_unlock(hdev);
@@ -924,7 +913,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
924 switch (cmd) { 913 switch (cmd) {
925 case HCISETAUTH: 914 case HCISETAUTH:
926 err = hci_request(hdev, hci_auth_req, dr.dev_opt, 915 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
927 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 916 msecs_to_jiffies(HCI_INIT_TIMEOUT));
928 break; 917 break;
929 918
930 case HCISETENCRYPT: 919 case HCISETENCRYPT:
@@ -936,23 +925,23 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
936 if (!test_bit(HCI_AUTH, &hdev->flags)) { 925 if (!test_bit(HCI_AUTH, &hdev->flags)) {
937 /* Auth must be enabled first */ 926 /* Auth must be enabled first */
938 err = hci_request(hdev, hci_auth_req, dr.dev_opt, 927 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
939 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 928 msecs_to_jiffies(HCI_INIT_TIMEOUT));
940 if (err) 929 if (err)
941 break; 930 break;
942 } 931 }
943 932
944 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt, 933 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
945 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 934 msecs_to_jiffies(HCI_INIT_TIMEOUT));
946 break; 935 break;
947 936
948 case HCISETSCAN: 937 case HCISETSCAN:
949 err = hci_request(hdev, hci_scan_req, dr.dev_opt, 938 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
950 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 939 msecs_to_jiffies(HCI_INIT_TIMEOUT));
951 break; 940 break;
952 941
953 case HCISETLINKPOL: 942 case HCISETLINKPOL:
954 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt, 943 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
955 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 944 msecs_to_jiffies(HCI_INIT_TIMEOUT));
956 break; 945 break;
957 946
958 case HCISETLINKMODE: 947 case HCISETLINKMODE:
@@ -1103,7 +1092,7 @@ static void hci_power_on(struct work_struct *work)
1103 1092
1104 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 1093 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1105 schedule_delayed_work(&hdev->power_off, 1094 schedule_delayed_work(&hdev->power_off,
1106 msecs_to_jiffies(AUTO_OFF_TIMEOUT)); 1095 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1107 1096
1108 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) 1097 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1109 mgmt_index_added(hdev); 1098 mgmt_index_added(hdev);
@@ -1112,7 +1101,7 @@ static void hci_power_on(struct work_struct *work)
1112static void hci_power_off(struct work_struct *work) 1101static void hci_power_off(struct work_struct *work)
1113{ 1102{
1114 struct hci_dev *hdev = container_of(work, struct hci_dev, 1103 struct hci_dev *hdev = container_of(work, struct hci_dev,
1115 power_off.work); 1104 power_off.work);
1116 1105
1117 BT_DBG("%s", hdev->name); 1106 BT_DBG("%s", hdev->name);
1118 1107
@@ -1193,7 +1182,7 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1193} 1182}
1194 1183
1195static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, 1184static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1196 u8 key_type, u8 old_key_type) 1185 u8 key_type, u8 old_key_type)
1197{ 1186{
1198 /* Legacy key */ 1187 /* Legacy key */
1199 if (key_type < 0x03) 1188 if (key_type < 0x03)
@@ -1234,7 +1223,7 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1234 1223
1235 list_for_each_entry(k, &hdev->long_term_keys, list) { 1224 list_for_each_entry(k, &hdev->long_term_keys, list) {
1236 if (k->ediv != ediv || 1225 if (k->ediv != ediv ||
1237 memcmp(rand, k->rand, sizeof(k->rand))) 1226 memcmp(rand, k->rand, sizeof(k->rand)))
1238 continue; 1227 continue;
1239 1228
1240 return k; 1229 return k;
@@ -1242,7 +1231,6 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1242 1231
1243 return NULL; 1232 return NULL;
1244} 1233}
1245EXPORT_SYMBOL(hci_find_ltk);
1246 1234
1247struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, 1235struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1248 u8 addr_type) 1236 u8 addr_type)
@@ -1251,12 +1239,11 @@ struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1251 1239
1252 list_for_each_entry(k, &hdev->long_term_keys, list) 1240 list_for_each_entry(k, &hdev->long_term_keys, list)
1253 if (addr_type == k->bdaddr_type && 1241 if (addr_type == k->bdaddr_type &&
1254 bacmp(bdaddr, &k->bdaddr) == 0) 1242 bacmp(bdaddr, &k->bdaddr) == 0)
1255 return k; 1243 return k;
1256 1244
1257 return NULL; 1245 return NULL;
1258} 1246}
1259EXPORT_SYMBOL(hci_find_ltk_by_addr);
1260 1247
1261int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, 1248int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1262 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) 1249 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
@@ -1283,15 +1270,14 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1283 * combination key for legacy pairing even when there's no 1270 * combination key for legacy pairing even when there's no
1284 * previous key */ 1271 * previous key */
1285 if (type == HCI_LK_CHANGED_COMBINATION && 1272 if (type == HCI_LK_CHANGED_COMBINATION &&
1286 (!conn || conn->remote_auth == 0xff) && 1273 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1287 old_key_type == 0xff) {
1288 type = HCI_LK_COMBINATION; 1274 type = HCI_LK_COMBINATION;
1289 if (conn) 1275 if (conn)
1290 conn->key_type = type; 1276 conn->key_type = type;
1291 } 1277 }
1292 1278
1293 bacpy(&key->bdaddr, bdaddr); 1279 bacpy(&key->bdaddr, bdaddr);
1294 memcpy(key->val, val, 16); 1280 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1295 key->pin_len = pin_len; 1281 key->pin_len = pin_len;
1296 1282
1297 if (type == HCI_LK_CHANGED_COMBINATION) 1283 if (type == HCI_LK_CHANGED_COMBINATION)
@@ -1540,6 +1526,7 @@ static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1540 1526
1541 memset(&cp, 0, sizeof(cp)); 1527 memset(&cp, 0, sizeof(cp));
1542 cp.enable = 1; 1528 cp.enable = 1;
1529 cp.filter_dup = 1;
1543 1530
1544 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); 1531 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1545} 1532}
@@ -1707,41 +1694,39 @@ EXPORT_SYMBOL(hci_free_dev);
1707/* Register HCI device */ 1694/* Register HCI device */
1708int hci_register_dev(struct hci_dev *hdev) 1695int hci_register_dev(struct hci_dev *hdev)
1709{ 1696{
1710 struct list_head *head, *p;
1711 int id, error; 1697 int id, error;
1712 1698
1713 if (!hdev->open || !hdev->close) 1699 if (!hdev->open || !hdev->close)
1714 return -EINVAL; 1700 return -EINVAL;
1715 1701
1716 write_lock(&hci_dev_list_lock);
1717
1718 /* Do not allow HCI_AMP devices to register at index 0, 1702 /* Do not allow HCI_AMP devices to register at index 0,
1719 * so the index can be used as the AMP controller ID. 1703 * so the index can be used as the AMP controller ID.
1720 */ 1704 */
1721 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1; 1705 switch (hdev->dev_type) {
1722 head = &hci_dev_list; 1706 case HCI_BREDR:
1723 1707 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1724 /* Find first available device id */ 1708 break;
1725 list_for_each(p, &hci_dev_list) { 1709 case HCI_AMP:
1726 int nid = list_entry(p, struct hci_dev, list)->id; 1710 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1727 if (nid > id) 1711 break;
1728 break; 1712 default:
1729 if (nid == id) 1713 return -EINVAL;
1730 id++;
1731 head = p;
1732 } 1714 }
1733 1715
1716 if (id < 0)
1717 return id;
1718
1734 sprintf(hdev->name, "hci%d", id); 1719 sprintf(hdev->name, "hci%d", id);
1735 hdev->id = id; 1720 hdev->id = id;
1736 1721
1737 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 1722 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1738 1723
1739 list_add(&hdev->list, head); 1724 write_lock(&hci_dev_list_lock);
1740 1725 list_add(&hdev->list, &hci_dev_list);
1741 write_unlock(&hci_dev_list_lock); 1726 write_unlock(&hci_dev_list_lock);
1742 1727
1743 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND | 1728 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1744 WQ_MEM_RECLAIM, 1); 1729 WQ_MEM_RECLAIM, 1);
1745 if (!hdev->workqueue) { 1730 if (!hdev->workqueue) {
1746 error = -ENOMEM; 1731 error = -ENOMEM;
1747 goto err; 1732 goto err;
@@ -1752,7 +1737,8 @@ int hci_register_dev(struct hci_dev *hdev)
1752 goto err_wqueue; 1737 goto err_wqueue;
1753 1738
1754 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, 1739 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1755 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); 1740 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1741 hdev);
1756 if (hdev->rfkill) { 1742 if (hdev->rfkill) {
1757 if (rfkill_register(hdev->rfkill) < 0) { 1743 if (rfkill_register(hdev->rfkill) < 0) {
1758 rfkill_destroy(hdev->rfkill); 1744 rfkill_destroy(hdev->rfkill);
@@ -1772,6 +1758,7 @@ int hci_register_dev(struct hci_dev *hdev)
1772err_wqueue: 1758err_wqueue:
1773 destroy_workqueue(hdev->workqueue); 1759 destroy_workqueue(hdev->workqueue);
1774err: 1760err:
1761 ida_simple_remove(&hci_index_ida, hdev->id);
1775 write_lock(&hci_dev_list_lock); 1762 write_lock(&hci_dev_list_lock);
1776 list_del(&hdev->list); 1763 list_del(&hdev->list);
1777 write_unlock(&hci_dev_list_lock); 1764 write_unlock(&hci_dev_list_lock);
@@ -1783,12 +1770,14 @@ EXPORT_SYMBOL(hci_register_dev);
1783/* Unregister HCI device */ 1770/* Unregister HCI device */
1784void hci_unregister_dev(struct hci_dev *hdev) 1771void hci_unregister_dev(struct hci_dev *hdev)
1785{ 1772{
1786 int i; 1773 int i, id;
1787 1774
1788 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 1775 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1789 1776
1790 set_bit(HCI_UNREGISTER, &hdev->dev_flags); 1777 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1791 1778
1779 id = hdev->id;
1780
1792 write_lock(&hci_dev_list_lock); 1781 write_lock(&hci_dev_list_lock);
1793 list_del(&hdev->list); 1782 list_del(&hdev->list);
1794 write_unlock(&hci_dev_list_lock); 1783 write_unlock(&hci_dev_list_lock);
@@ -1799,7 +1788,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
1799 kfree_skb(hdev->reassembly[i]); 1788 kfree_skb(hdev->reassembly[i]);
1800 1789
1801 if (!test_bit(HCI_INIT, &hdev->flags) && 1790 if (!test_bit(HCI_INIT, &hdev->flags) &&
1802 !test_bit(HCI_SETUP, &hdev->dev_flags)) { 1791 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1803 hci_dev_lock(hdev); 1792 hci_dev_lock(hdev);
1804 mgmt_index_removed(hdev); 1793 mgmt_index_removed(hdev);
1805 hci_dev_unlock(hdev); 1794 hci_dev_unlock(hdev);
@@ -1829,6 +1818,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
1829 hci_dev_unlock(hdev); 1818 hci_dev_unlock(hdev);
1830 1819
1831 hci_dev_put(hdev); 1820 hci_dev_put(hdev);
1821
1822 ida_simple_remove(&hci_index_ida, id);
1832} 1823}
1833EXPORT_SYMBOL(hci_unregister_dev); 1824EXPORT_SYMBOL(hci_unregister_dev);
1834 1825
@@ -1853,7 +1844,7 @@ int hci_recv_frame(struct sk_buff *skb)
1853{ 1844{
1854 struct hci_dev *hdev = (struct hci_dev *) skb->dev; 1845 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1855 if (!hdev || (!test_bit(HCI_UP, &hdev->flags) 1846 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1856 && !test_bit(HCI_INIT, &hdev->flags))) { 1847 && !test_bit(HCI_INIT, &hdev->flags))) {
1857 kfree_skb(skb); 1848 kfree_skb(skb);
1858 return -ENXIO; 1849 return -ENXIO;
1859 } 1850 }
@@ -1872,7 +1863,7 @@ int hci_recv_frame(struct sk_buff *skb)
1872EXPORT_SYMBOL(hci_recv_frame); 1863EXPORT_SYMBOL(hci_recv_frame);
1873 1864
1874static int hci_reassembly(struct hci_dev *hdev, int type, void *data, 1865static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1875 int count, __u8 index) 1866 int count, __u8 index)
1876{ 1867{
1877 int len = 0; 1868 int len = 0;
1878 int hlen = 0; 1869 int hlen = 0;
@@ -1881,7 +1872,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1881 struct bt_skb_cb *scb; 1872 struct bt_skb_cb *scb;
1882 1873
1883 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) || 1874 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1884 index >= NUM_REASSEMBLY) 1875 index >= NUM_REASSEMBLY)
1885 return -EILSEQ; 1876 return -EILSEQ;
1886 1877
1887 skb = hdev->reassembly[index]; 1878 skb = hdev->reassembly[index];
@@ -2023,7 +2014,7 @@ int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2023 type = bt_cb(skb)->pkt_type; 2014 type = bt_cb(skb)->pkt_type;
2024 2015
2025 rem = hci_reassembly(hdev, type, data, count, 2016 rem = hci_reassembly(hdev, type, data, count,
2026 STREAM_REASSEMBLY); 2017 STREAM_REASSEMBLY);
2027 if (rem < 0) 2018 if (rem < 0)
2028 return rem; 2019 return rem;
2029 2020
@@ -2157,7 +2148,7 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2157} 2148}
2158 2149
2159static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue, 2150static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2160 struct sk_buff *skb, __u16 flags) 2151 struct sk_buff *skb, __u16 flags)
2161{ 2152{
2162 struct hci_dev *hdev = conn->hdev; 2153 struct hci_dev *hdev = conn->hdev;
2163 struct sk_buff *list; 2154 struct sk_buff *list;
@@ -2216,7 +2207,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2216 2207
2217 queue_work(hdev->workqueue, &hdev->tx_work); 2208 queue_work(hdev->workqueue, &hdev->tx_work);
2218} 2209}
2219EXPORT_SYMBOL(hci_send_acl);
2220 2210
2221/* Send SCO data */ 2211/* Send SCO data */
2222void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) 2212void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
@@ -2239,12 +2229,12 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2239 skb_queue_tail(&conn->data_q, skb); 2229 skb_queue_tail(&conn->data_q, skb);
2240 queue_work(hdev->workqueue, &hdev->tx_work); 2230 queue_work(hdev->workqueue, &hdev->tx_work);
2241} 2231}
2242EXPORT_SYMBOL(hci_send_sco);
2243 2232
2244/* ---- HCI TX task (outgoing data) ---- */ 2233/* ---- HCI TX task (outgoing data) ---- */
2245 2234
2246/* HCI Connection scheduler */ 2235/* HCI Connection scheduler */
2247static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) 2236static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2237 int *quote)
2248{ 2238{
2249 struct hci_conn_hash *h = &hdev->conn_hash; 2239 struct hci_conn_hash *h = &hdev->conn_hash;
2250 struct hci_conn *conn = NULL, *c; 2240 struct hci_conn *conn = NULL, *c;
@@ -2303,7 +2293,7 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
2303 return conn; 2293 return conn;
2304} 2294}
2305 2295
2306static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type) 2296static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2307{ 2297{
2308 struct hci_conn_hash *h = &hdev->conn_hash; 2298 struct hci_conn_hash *h = &hdev->conn_hash;
2309 struct hci_conn *c; 2299 struct hci_conn *c;
@@ -2316,16 +2306,16 @@ static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2316 list_for_each_entry_rcu(c, &h->list, list) { 2306 list_for_each_entry_rcu(c, &h->list, list) {
2317 if (c->type == type && c->sent) { 2307 if (c->type == type && c->sent) {
2318 BT_ERR("%s killing stalled connection %s", 2308 BT_ERR("%s killing stalled connection %s",
2319 hdev->name, batostr(&c->dst)); 2309 hdev->name, batostr(&c->dst));
2320 hci_acl_disconn(c, 0x13); 2310 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
2321 } 2311 }
2322 } 2312 }
2323 2313
2324 rcu_read_unlock(); 2314 rcu_read_unlock();
2325} 2315}
2326 2316
2327static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, 2317static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2328 int *quote) 2318 int *quote)
2329{ 2319{
2330 struct hci_conn_hash *h = &hdev->conn_hash; 2320 struct hci_conn_hash *h = &hdev->conn_hash;
2331 struct hci_chan *chan = NULL; 2321 struct hci_chan *chan = NULL;
@@ -2442,7 +2432,7 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2442 skb->priority = HCI_PRIO_MAX - 1; 2432 skb->priority = HCI_PRIO_MAX - 1;
2443 2433
2444 BT_DBG("chan %p skb %p promoted to %d", chan, skb, 2434 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2445 skb->priority); 2435 skb->priority);
2446 } 2436 }
2447 2437
2448 if (hci_conn_num(hdev, type) == num) 2438 if (hci_conn_num(hdev, type) == num)
@@ -2459,18 +2449,18 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2459 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); 2449 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2460} 2450}
2461 2451
2462static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt) 2452static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2463{ 2453{
2464 if (!test_bit(HCI_RAW, &hdev->flags)) { 2454 if (!test_bit(HCI_RAW, &hdev->flags)) {
2465 /* ACL tx timeout must be longer than maximum 2455 /* ACL tx timeout must be longer than maximum
2466 * link supervision timeout (40.9 seconds) */ 2456 * link supervision timeout (40.9 seconds) */
2467 if (!cnt && time_after(jiffies, hdev->acl_last_tx + 2457 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2468 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT))) 2458 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2469 hci_link_tx_to(hdev, ACL_LINK); 2459 hci_link_tx_to(hdev, ACL_LINK);
2470 } 2460 }
2471} 2461}
2472 2462
2473static inline void hci_sched_acl_pkt(struct hci_dev *hdev) 2463static void hci_sched_acl_pkt(struct hci_dev *hdev)
2474{ 2464{
2475 unsigned int cnt = hdev->acl_cnt; 2465 unsigned int cnt = hdev->acl_cnt;
2476 struct hci_chan *chan; 2466 struct hci_chan *chan;
@@ -2480,11 +2470,11 @@ static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2480 __check_timeout(hdev, cnt); 2470 __check_timeout(hdev, cnt);
2481 2471
2482 while (hdev->acl_cnt && 2472 while (hdev->acl_cnt &&
2483 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) { 2473 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2484 u32 priority = (skb_peek(&chan->data_q))->priority; 2474 u32 priority = (skb_peek(&chan->data_q))->priority;
2485 while (quote-- && (skb = skb_peek(&chan->data_q))) { 2475 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2486 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 2476 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2487 skb->len, skb->priority); 2477 skb->len, skb->priority);
2488 2478
2489 /* Stop if priority has changed */ 2479 /* Stop if priority has changed */
2490 if (skb->priority < priority) 2480 if (skb->priority < priority)
@@ -2508,7 +2498,7 @@ static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2508 hci_prio_recalculate(hdev, ACL_LINK); 2498 hci_prio_recalculate(hdev, ACL_LINK);
2509} 2499}
2510 2500
2511static inline void hci_sched_acl_blk(struct hci_dev *hdev) 2501static void hci_sched_acl_blk(struct hci_dev *hdev)
2512{ 2502{
2513 unsigned int cnt = hdev->block_cnt; 2503 unsigned int cnt = hdev->block_cnt;
2514 struct hci_chan *chan; 2504 struct hci_chan *chan;
@@ -2518,13 +2508,13 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2518 __check_timeout(hdev, cnt); 2508 __check_timeout(hdev, cnt);
2519 2509
2520 while (hdev->block_cnt > 0 && 2510 while (hdev->block_cnt > 0 &&
2521 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) { 2511 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2522 u32 priority = (skb_peek(&chan->data_q))->priority; 2512 u32 priority = (skb_peek(&chan->data_q))->priority;
2523 while (quote > 0 && (skb = skb_peek(&chan->data_q))) { 2513 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2524 int blocks; 2514 int blocks;
2525 2515
2526 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 2516 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2527 skb->len, skb->priority); 2517 skb->len, skb->priority);
2528 2518
2529 /* Stop if priority has changed */ 2519 /* Stop if priority has changed */
2530 if (skb->priority < priority) 2520 if (skb->priority < priority)
@@ -2537,7 +2527,7 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2537 return; 2527 return;
2538 2528
2539 hci_conn_enter_active_mode(chan->conn, 2529 hci_conn_enter_active_mode(chan->conn,
2540 bt_cb(skb)->force_active); 2530 bt_cb(skb)->force_active);
2541 2531
2542 hci_send_frame(skb); 2532 hci_send_frame(skb);
2543 hdev->acl_last_tx = jiffies; 2533 hdev->acl_last_tx = jiffies;
@@ -2554,7 +2544,7 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2554 hci_prio_recalculate(hdev, ACL_LINK); 2544 hci_prio_recalculate(hdev, ACL_LINK);
2555} 2545}
2556 2546
2557static inline void hci_sched_acl(struct hci_dev *hdev) 2547static void hci_sched_acl(struct hci_dev *hdev)
2558{ 2548{
2559 BT_DBG("%s", hdev->name); 2549 BT_DBG("%s", hdev->name);
2560 2550
@@ -2573,7 +2563,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
2573} 2563}
2574 2564
2575/* Schedule SCO */ 2565/* Schedule SCO */
2576static inline void hci_sched_sco(struct hci_dev *hdev) 2566static void hci_sched_sco(struct hci_dev *hdev)
2577{ 2567{
2578 struct hci_conn *conn; 2568 struct hci_conn *conn;
2579 struct sk_buff *skb; 2569 struct sk_buff *skb;
@@ -2596,7 +2586,7 @@ static inline void hci_sched_sco(struct hci_dev *hdev)
2596 } 2586 }
2597} 2587}
2598 2588
2599static inline void hci_sched_esco(struct hci_dev *hdev) 2589static void hci_sched_esco(struct hci_dev *hdev)
2600{ 2590{
2601 struct hci_conn *conn; 2591 struct hci_conn *conn;
2602 struct sk_buff *skb; 2592 struct sk_buff *skb;
@@ -2607,7 +2597,8 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
2607 if (!hci_conn_num(hdev, ESCO_LINK)) 2597 if (!hci_conn_num(hdev, ESCO_LINK))
2608 return; 2598 return;
2609 2599
2610 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) { 2600 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2601 &quote))) {
2611 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 2602 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2612 BT_DBG("skb %p len %d", skb, skb->len); 2603 BT_DBG("skb %p len %d", skb, skb->len);
2613 hci_send_frame(skb); 2604 hci_send_frame(skb);
@@ -2619,7 +2610,7 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
2619 } 2610 }
2620} 2611}
2621 2612
2622static inline void hci_sched_le(struct hci_dev *hdev) 2613static void hci_sched_le(struct hci_dev *hdev)
2623{ 2614{
2624 struct hci_chan *chan; 2615 struct hci_chan *chan;
2625 struct sk_buff *skb; 2616 struct sk_buff *skb;
@@ -2634,7 +2625,7 @@ static inline void hci_sched_le(struct hci_dev *hdev)
2634 /* LE tx timeout must be longer than maximum 2625 /* LE tx timeout must be longer than maximum
2635 * link supervision timeout (40.9 seconds) */ 2626 * link supervision timeout (40.9 seconds) */
2636 if (!hdev->le_cnt && hdev->le_pkts && 2627 if (!hdev->le_cnt && hdev->le_pkts &&
2637 time_after(jiffies, hdev->le_last_tx + HZ * 45)) 2628 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2638 hci_link_tx_to(hdev, LE_LINK); 2629 hci_link_tx_to(hdev, LE_LINK);
2639 } 2630 }
2640 2631
@@ -2644,7 +2635,7 @@ static inline void hci_sched_le(struct hci_dev *hdev)
2644 u32 priority = (skb_peek(&chan->data_q))->priority; 2635 u32 priority = (skb_peek(&chan->data_q))->priority;
2645 while (quote-- && (skb = skb_peek(&chan->data_q))) { 2636 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2646 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 2637 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2647 skb->len, skb->priority); 2638 skb->len, skb->priority);
2648 2639
2649 /* Stop if priority has changed */ 2640 /* Stop if priority has changed */
2650 if (skb->priority < priority) 2641 if (skb->priority < priority)
@@ -2676,7 +2667,7 @@ static void hci_tx_work(struct work_struct *work)
2676 struct sk_buff *skb; 2667 struct sk_buff *skb;
2677 2668
2678 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, 2669 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2679 hdev->sco_cnt, hdev->le_cnt); 2670 hdev->sco_cnt, hdev->le_cnt);
2680 2671
2681 /* Schedule queues and send stuff to HCI driver */ 2672 /* Schedule queues and send stuff to HCI driver */
2682 2673
@@ -2696,7 +2687,7 @@ static void hci_tx_work(struct work_struct *work)
2696/* ----- HCI RX task (incoming data processing) ----- */ 2687/* ----- HCI RX task (incoming data processing) ----- */
2697 2688
2698/* ACL data packet */ 2689/* ACL data packet */
2699static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) 2690static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2700{ 2691{
2701 struct hci_acl_hdr *hdr = (void *) skb->data; 2692 struct hci_acl_hdr *hdr = (void *) skb->data;
2702 struct hci_conn *conn; 2693 struct hci_conn *conn;
@@ -2708,7 +2699,8 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2708 flags = hci_flags(handle); 2699 flags = hci_flags(handle);
2709 handle = hci_handle(handle); 2700 handle = hci_handle(handle);
2710 2701
2711 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags); 2702 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len,
2703 handle, flags);
2712 2704
2713 hdev->stat.acl_rx++; 2705 hdev->stat.acl_rx++;
2714 2706
@@ -2732,14 +2724,14 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2732 return; 2724 return;
2733 } else { 2725 } else {
2734 BT_ERR("%s ACL packet for unknown connection handle %d", 2726 BT_ERR("%s ACL packet for unknown connection handle %d",
2735 hdev->name, handle); 2727 hdev->name, handle);
2736 } 2728 }
2737 2729
2738 kfree_skb(skb); 2730 kfree_skb(skb);
2739} 2731}
2740 2732
2741/* SCO data packet */ 2733/* SCO data packet */
2742static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) 2734static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2743{ 2735{
2744 struct hci_sco_hdr *hdr = (void *) skb->data; 2736 struct hci_sco_hdr *hdr = (void *) skb->data;
2745 struct hci_conn *conn; 2737 struct hci_conn *conn;
@@ -2763,7 +2755,7 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2763 return; 2755 return;
2764 } else { 2756 } else {
2765 BT_ERR("%s SCO packet for unknown connection handle %d", 2757 BT_ERR("%s SCO packet for unknown connection handle %d",
2766 hdev->name, handle); 2758 hdev->name, handle);
2767 } 2759 }
2768 2760
2769 kfree_skb(skb); 2761 kfree_skb(skb);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 94ad124a4ea..1ba929c05d0 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -24,20 +24,7 @@
24 24
25/* Bluetooth HCI event handling. */ 25/* Bluetooth HCI event handling. */
26 26
27#include <linux/module.h> 27#include <linux/export.h>
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/poll.h>
34#include <linux/fcntl.h>
35#include <linux/init.h>
36#include <linux/skbuff.h>
37#include <linux/interrupt.h>
38#include <net/sock.h>
39
40#include <linux/uaccess.h>
41#include <asm/unaligned.h> 28#include <asm/unaligned.h>
42 29
43#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/bluetooth.h>
@@ -95,7 +82,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
95 hci_conn_check_pending(hdev); 82 hci_conn_check_pending(hdev);
96} 83}
97 84
98static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb) 85static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 struct sk_buff *skb)
99{ 87{
100 BT_DBG("%s", hdev->name); 88 BT_DBG("%s", hdev->name);
101} 89}
@@ -166,7 +154,8 @@ static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
166 hci_dev_unlock(hdev); 154 hci_dev_unlock(hdev);
167} 155}
168 156
169static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 157static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
158 struct sk_buff *skb)
170{ 159{
171 struct hci_rp_read_def_link_policy *rp = (void *) skb->data; 160 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
172 161
@@ -178,7 +167,8 @@ static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *sk
178 hdev->link_policy = __le16_to_cpu(rp->policy); 167 hdev->link_policy = __le16_to_cpu(rp->policy);
179} 168}
180 169
181static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 170static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
171 struct sk_buff *skb)
182{ 172{
183 __u8 status = *((__u8 *) skb->data); 173 __u8 status = *((__u8 *) skb->data);
184 void *sent; 174 void *sent;
@@ -329,7 +319,7 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
329 if (hdev->discov_timeout > 0) { 319 if (hdev->discov_timeout > 0) {
330 int to = msecs_to_jiffies(hdev->discov_timeout * 1000); 320 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
331 queue_delayed_work(hdev->workqueue, &hdev->discov_off, 321 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
332 to); 322 to);
333 } 323 }
334 } else if (old_iscan) 324 } else if (old_iscan)
335 mgmt_discoverable(hdev, 0); 325 mgmt_discoverable(hdev, 0);
@@ -358,7 +348,7 @@ static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
358 memcpy(hdev->dev_class, rp->dev_class, 3); 348 memcpy(hdev->dev_class, rp->dev_class, 3);
359 349
360 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, 350 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
361 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 351 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
362} 352}
363 353
364static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 354static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
@@ -406,7 +396,8 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
406 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 396 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
407} 397}
408 398
409static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 399static void hci_cc_write_voice_setting(struct hci_dev *hdev,
400 struct sk_buff *skb)
410{ 401{
411 __u8 status = *((__u8 *) skb->data); 402 __u8 status = *((__u8 *) skb->data);
412 __u16 setting; 403 __u16 setting;
@@ -473,7 +464,7 @@ static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
473 return 1; 464 return 1;
474 465
475 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 && 466 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
476 hdev->lmp_subver == 0x0757) 467 hdev->lmp_subver == 0x0757)
477 return 1; 468 return 1;
478 469
479 if (hdev->manufacturer == 15) { 470 if (hdev->manufacturer == 15) {
@@ -486,7 +477,7 @@ static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
486 } 477 }
487 478
488 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 && 479 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
489 hdev->lmp_subver == 0x1805) 480 hdev->lmp_subver == 0x1805)
490 return 1; 481 return 1;
491 482
492 return 0; 483 return 0;
@@ -566,7 +557,7 @@ static void hci_setup(struct hci_dev *hdev)
566 if (hdev->hci_ver > BLUETOOTH_VER_1_1) 557 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
567 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); 558 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
568 559
569 if (hdev->features[6] & LMP_SIMPLE_PAIR) { 560 if (lmp_ssp_capable(hdev)) {
570 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { 561 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
571 u8 mode = 0x01; 562 u8 mode = 0x01;
572 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 563 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
@@ -618,8 +609,7 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
618 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 609 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
619 610
620 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, 611 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
621 hdev->manufacturer, 612 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
622 hdev->hci_ver, hdev->hci_rev);
623 613
624 if (test_bit(HCI_INIT, &hdev->flags)) 614 if (test_bit(HCI_INIT, &hdev->flags))
625 hci_setup(hdev); 615 hci_setup(hdev);
@@ -646,7 +636,8 @@ static void hci_setup_link_policy(struct hci_dev *hdev)
646 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); 636 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
647} 637}
648 638
649static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) 639static void hci_cc_read_local_commands(struct hci_dev *hdev,
640 struct sk_buff *skb)
650{ 641{
651 struct hci_rp_read_local_commands *rp = (void *) skb->data; 642 struct hci_rp_read_local_commands *rp = (void *) skb->data;
652 643
@@ -664,7 +655,8 @@ done:
664 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status); 655 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
665} 656}
666 657
667static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb) 658static void hci_cc_read_local_features(struct hci_dev *hdev,
659 struct sk_buff *skb)
668{ 660{
669 struct hci_rp_read_local_features *rp = (void *) skb->data; 661 struct hci_rp_read_local_features *rp = (void *) skb->data;
670 662
@@ -713,10 +705,10 @@ static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb
713 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 705 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
714 706
715 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name, 707 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
716 hdev->features[0], hdev->features[1], 708 hdev->features[0], hdev->features[1],
717 hdev->features[2], hdev->features[3], 709 hdev->features[2], hdev->features[3],
718 hdev->features[4], hdev->features[5], 710 hdev->features[4], hdev->features[5],
719 hdev->features[6], hdev->features[7]); 711 hdev->features[6], hdev->features[7]);
720} 712}
721 713
722static void hci_set_le_support(struct hci_dev *hdev) 714static void hci_set_le_support(struct hci_dev *hdev)
@@ -736,7 +728,7 @@ static void hci_set_le_support(struct hci_dev *hdev)
736} 728}
737 729
738static void hci_cc_read_local_ext_features(struct hci_dev *hdev, 730static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
739 struct sk_buff *skb) 731 struct sk_buff *skb)
740{ 732{
741 struct hci_rp_read_local_ext_features *rp = (void *) skb->data; 733 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
742 734
@@ -762,7 +754,7 @@ done:
762} 754}
763 755
764static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, 756static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
765 struct sk_buff *skb) 757 struct sk_buff *skb)
766{ 758{
767 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data; 759 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
768 760
@@ -798,9 +790,8 @@ static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
798 hdev->acl_cnt = hdev->acl_pkts; 790 hdev->acl_cnt = hdev->acl_pkts;
799 hdev->sco_cnt = hdev->sco_pkts; 791 hdev->sco_cnt = hdev->sco_pkts;
800 792
801 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, 793 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
802 hdev->acl_mtu, hdev->acl_pkts, 794 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
803 hdev->sco_mtu, hdev->sco_pkts);
804} 795}
805 796
806static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) 797static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
@@ -816,7 +807,7 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
816} 807}
817 808
818static void hci_cc_read_data_block_size(struct hci_dev *hdev, 809static void hci_cc_read_data_block_size(struct hci_dev *hdev,
819 struct sk_buff *skb) 810 struct sk_buff *skb)
820{ 811{
821 struct hci_rp_read_data_block_size *rp = (void *) skb->data; 812 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
822 813
@@ -832,7 +823,7 @@ static void hci_cc_read_data_block_size(struct hci_dev *hdev,
832 hdev->block_cnt = hdev->num_blocks; 823 hdev->block_cnt = hdev->num_blocks;
833 824
834 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 825 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
835 hdev->block_cnt, hdev->block_len); 826 hdev->block_cnt, hdev->block_len);
836 827
837 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status); 828 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
838} 829}
@@ -847,7 +838,7 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
847} 838}
848 839
849static void hci_cc_read_local_amp_info(struct hci_dev *hdev, 840static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
850 struct sk_buff *skb) 841 struct sk_buff *skb)
851{ 842{
852 struct hci_rp_read_local_amp_info *rp = (void *) skb->data; 843 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
853 844
@@ -871,7 +862,7 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
871} 862}
872 863
873static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, 864static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
874 struct sk_buff *skb) 865 struct sk_buff *skb)
875{ 866{
876 __u8 status = *((__u8 *) skb->data); 867 __u8 status = *((__u8 *) skb->data);
877 868
@@ -890,7 +881,7 @@ static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
890} 881}
891 882
892static void hci_cc_write_inquiry_mode(struct hci_dev *hdev, 883static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
893 struct sk_buff *skb) 884 struct sk_buff *skb)
894{ 885{
895 __u8 status = *((__u8 *) skb->data); 886 __u8 status = *((__u8 *) skb->data);
896 887
@@ -900,7 +891,7 @@ static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
900} 891}
901 892
902static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, 893static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
903 struct sk_buff *skb) 894 struct sk_buff *skb)
904{ 895{
905 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data; 896 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
906 897
@@ -959,7 +950,7 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
959 950
960 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 951 if (test_bit(HCI_MGMT, &hdev->dev_flags))
961 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 952 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
962 rp->status); 953 rp->status);
963 954
964 hci_dev_unlock(hdev); 955 hci_dev_unlock(hdev);
965} 956}
@@ -1000,7 +991,7 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1000} 991}
1001 992
1002static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, 993static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1003 struct sk_buff *skb) 994 struct sk_buff *skb)
1004{ 995{
1005 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 996 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1006 997
@@ -1031,7 +1022,7 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1031} 1022}
1032 1023
1033static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, 1024static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1034 struct sk_buff *skb) 1025 struct sk_buff *skb)
1035{ 1026{
1036 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1027 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1037 1028
@@ -1047,7 +1038,7 @@ static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1047} 1038}
1048 1039
1049static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, 1040static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1050 struct sk_buff *skb) 1041 struct sk_buff *skb)
1051{ 1042{
1052 struct hci_rp_read_local_oob_data *rp = (void *) skb->data; 1043 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1053 1044
@@ -1076,7 +1067,7 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1076} 1067}
1077 1068
1078static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 1069static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1079 struct sk_buff *skb) 1070 struct sk_buff *skb)
1080{ 1071{
1081 struct hci_cp_le_set_scan_enable *cp; 1072 struct hci_cp_le_set_scan_enable *cp;
1082 __u8 status = *((__u8 *) skb->data); 1073 __u8 status = *((__u8 *) skb->data);
@@ -1156,8 +1147,8 @@ static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1156 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status); 1147 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1157} 1148}
1158 1149
1159static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1150static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1160 struct sk_buff *skb) 1151 struct sk_buff *skb)
1161{ 1152{
1162 struct hci_cp_write_le_host_supported *sent; 1153 struct hci_cp_write_le_host_supported *sent;
1163 __u8 status = *((__u8 *) skb->data); 1154 __u8 status = *((__u8 *) skb->data);
@@ -1176,13 +1167,13 @@ static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1176 } 1167 }
1177 1168
1178 if (test_bit(HCI_MGMT, &hdev->dev_flags) && 1169 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1179 !test_bit(HCI_INIT, &hdev->flags)) 1170 !test_bit(HCI_INIT, &hdev->flags))
1180 mgmt_le_enable_complete(hdev, sent->le, status); 1171 mgmt_le_enable_complete(hdev, sent->le, status);
1181 1172
1182 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status); 1173 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1183} 1174}
1184 1175
1185static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 1176static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1186{ 1177{
1187 BT_DBG("%s status 0x%x", hdev->name, status); 1178 BT_DBG("%s status 0x%x", hdev->name, status);
1188 1179
@@ -1203,7 +1194,7 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1203 hci_dev_unlock(hdev); 1194 hci_dev_unlock(hdev);
1204} 1195}
1205 1196
1206static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 1197static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1207{ 1198{
1208 struct hci_cp_create_conn *cp; 1199 struct hci_cp_create_conn *cp;
1209 struct hci_conn *conn; 1200 struct hci_conn *conn;
@@ -1333,7 +1324,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1333} 1324}
1334 1325
1335static int hci_outgoing_auth_needed(struct hci_dev *hdev, 1326static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1336 struct hci_conn *conn) 1327 struct hci_conn *conn)
1337{ 1328{
1338 if (conn->state != BT_CONFIG || !conn->out) 1329 if (conn->state != BT_CONFIG || !conn->out)
1339 return 0; 1330 return 0;
@@ -1343,15 +1334,14 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1343 1334
1344 /* Only request authentication for SSP connections or non-SSP 1335 /* Only request authentication for SSP connections or non-SSP
1345 * devices with sec_level HIGH or if MITM protection is requested */ 1336 * devices with sec_level HIGH or if MITM protection is requested */
1346 if (!hci_conn_ssp_enabled(conn) && 1337 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1347 conn->pending_sec_level != BT_SECURITY_HIGH && 1338 conn->pending_sec_level != BT_SECURITY_HIGH)
1348 !(conn->auth_type & 0x01))
1349 return 0; 1339 return 0;
1350 1340
1351 return 1; 1341 return 1;
1352} 1342}
1353 1343
1354static inline int hci_resolve_name(struct hci_dev *hdev, 1344static int hci_resolve_name(struct hci_dev *hdev,
1355 struct inquiry_entry *e) 1345 struct inquiry_entry *e)
1356{ 1346{
1357 struct hci_cp_remote_name_req cp; 1347 struct hci_cp_remote_name_req cp;
@@ -1638,7 +1628,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1638 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr); 1628 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1639 1629
1640 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr), 1630 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1641 conn); 1631 conn);
1642 1632
1643 if (status) { 1633 if (status) {
1644 if (conn && conn->state == BT_CONNECT) { 1634 if (conn && conn->state == BT_CONNECT) {
@@ -1668,7 +1658,7 @@ static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1668 BT_DBG("%s status 0x%x", hdev->name, status); 1658 BT_DBG("%s status 0x%x", hdev->name, status);
1669} 1659}
1670 1660
1671static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1661static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1672{ 1662{
1673 __u8 status = *((__u8 *) skb->data); 1663 __u8 status = *((__u8 *) skb->data);
1674 struct discovery_state *discov = &hdev->discovery; 1664 struct discovery_state *discov = &hdev->discovery;
@@ -1708,7 +1698,7 @@ unlock:
1708 hci_dev_unlock(hdev); 1698 hci_dev_unlock(hdev);
1709} 1699}
1710 1700
1711static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 1701static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1712{ 1702{
1713 struct inquiry_data data; 1703 struct inquiry_data data;
1714 struct inquiry_info *info = (void *) (skb->data + 1); 1704 struct inquiry_info *info = (void *) (skb->data + 1);
@@ -1745,7 +1735,7 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
1745 hci_dev_unlock(hdev); 1735 hci_dev_unlock(hdev);
1746} 1736}
1747 1737
1748static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1738static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1749{ 1739{
1750 struct hci_ev_conn_complete *ev = (void *) skb->data; 1740 struct hci_ev_conn_complete *ev = (void *) skb->data;
1751 struct hci_conn *conn; 1741 struct hci_conn *conn;
@@ -1823,18 +1813,18 @@ unlock:
1823 hci_conn_check_pending(hdev); 1813 hci_conn_check_pending(hdev);
1824} 1814}
1825 1815
1826static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 1816static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1827{ 1817{
1828 struct hci_ev_conn_request *ev = (void *) skb->data; 1818 struct hci_ev_conn_request *ev = (void *) skb->data;
1829 int mask = hdev->link_mode; 1819 int mask = hdev->link_mode;
1830 1820
1831 BT_DBG("%s bdaddr %s type 0x%x", hdev->name, 1821 BT_DBG("%s bdaddr %s type 0x%x", hdev->name, batostr(&ev->bdaddr),
1832 batostr(&ev->bdaddr), ev->link_type); 1822 ev->link_type);
1833 1823
1834 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); 1824 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1835 1825
1836 if ((mask & HCI_LM_ACCEPT) && 1826 if ((mask & HCI_LM_ACCEPT) &&
1837 !hci_blacklist_lookup(hdev, &ev->bdaddr)) { 1827 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1838 /* Connection accepted */ 1828 /* Connection accepted */
1839 struct inquiry_entry *ie; 1829 struct inquiry_entry *ie;
1840 struct hci_conn *conn; 1830 struct hci_conn *conn;
@@ -1845,7 +1835,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1845 if (ie) 1835 if (ie)
1846 memcpy(ie->data.dev_class, ev->dev_class, 3); 1836 memcpy(ie->data.dev_class, ev->dev_class, 3);
1847 1837
1848 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 1838 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1839 &ev->bdaddr);
1849 if (!conn) { 1840 if (!conn) {
1850 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr); 1841 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1851 if (!conn) { 1842 if (!conn) {
@@ -1878,9 +1869,9 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1878 bacpy(&cp.bdaddr, &ev->bdaddr); 1869 bacpy(&cp.bdaddr, &ev->bdaddr);
1879 cp.pkt_type = cpu_to_le16(conn->pkt_type); 1870 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1880 1871
1881 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 1872 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1882 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 1873 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1883 cp.max_latency = cpu_to_le16(0xffff); 1874 cp.max_latency = __constant_cpu_to_le16(0xffff);
1884 cp.content_format = cpu_to_le16(hdev->voice_setting); 1875 cp.content_format = cpu_to_le16(hdev->voice_setting);
1885 cp.retrans_effort = 0xff; 1876 cp.retrans_effort = 0xff;
1886 1877
@@ -1897,7 +1888,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1897 } 1888 }
1898} 1889}
1899 1890
1900static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1891static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1901{ 1892{
1902 struct hci_ev_disconn_complete *ev = (void *) skb->data; 1893 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1903 struct hci_conn *conn; 1894 struct hci_conn *conn;
@@ -1914,10 +1905,10 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
1914 conn->state = BT_CLOSED; 1905 conn->state = BT_CLOSED;
1915 1906
1916 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) && 1907 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1917 (conn->type == ACL_LINK || conn->type == LE_LINK)) { 1908 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1918 if (ev->status != 0) 1909 if (ev->status != 0)
1919 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 1910 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1920 conn->dst_type, ev->status); 1911 conn->dst_type, ev->status);
1921 else 1912 else
1922 mgmt_device_disconnected(hdev, &conn->dst, conn->type, 1913 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1923 conn->dst_type); 1914 conn->dst_type);
@@ -1934,7 +1925,7 @@ unlock:
1934 hci_dev_unlock(hdev); 1925 hci_dev_unlock(hdev);
1935} 1926}
1936 1927
1937static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1928static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1938{ 1929{
1939 struct hci_ev_auth_complete *ev = (void *) skb->data; 1930 struct hci_ev_auth_complete *ev = (void *) skb->data;
1940 struct hci_conn *conn; 1931 struct hci_conn *conn;
@@ -1949,7 +1940,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1949 1940
1950 if (!ev->status) { 1941 if (!ev->status) {
1951 if (!hci_conn_ssp_enabled(conn) && 1942 if (!hci_conn_ssp_enabled(conn) &&
1952 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 1943 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1953 BT_INFO("re-auth of legacy device is not possible."); 1944 BT_INFO("re-auth of legacy device is not possible.");
1954 } else { 1945 } else {
1955 conn->link_mode |= HCI_LM_AUTH; 1946 conn->link_mode |= HCI_LM_AUTH;
@@ -1969,7 +1960,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1969 cp.handle = ev->handle; 1960 cp.handle = ev->handle;
1970 cp.encrypt = 0x01; 1961 cp.encrypt = 0x01;
1971 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1962 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1972 &cp); 1963 &cp);
1973 } else { 1964 } else {
1974 conn->state = BT_CONNECTED; 1965 conn->state = BT_CONNECTED;
1975 hci_proto_connect_cfm(conn, ev->status); 1966 hci_proto_connect_cfm(conn, ev->status);
@@ -1989,7 +1980,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1989 cp.handle = ev->handle; 1980 cp.handle = ev->handle;
1990 cp.encrypt = 0x01; 1981 cp.encrypt = 0x01;
1991 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1982 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1992 &cp); 1983 &cp);
1993 } else { 1984 } else {
1994 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 1985 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1995 hci_encrypt_cfm(conn, ev->status, 0x00); 1986 hci_encrypt_cfm(conn, ev->status, 0x00);
@@ -2000,7 +1991,7 @@ unlock:
2000 hci_dev_unlock(hdev); 1991 hci_dev_unlock(hdev);
2001} 1992}
2002 1993
2003static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) 1994static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2004{ 1995{
2005 struct hci_ev_remote_name *ev = (void *) skb->data; 1996 struct hci_ev_remote_name *ev = (void *) skb->data;
2006 struct hci_conn *conn; 1997 struct hci_conn *conn;
@@ -2039,7 +2030,7 @@ unlock:
2039 hci_dev_unlock(hdev); 2030 hci_dev_unlock(hdev);
2040} 2031}
2041 2032
2042static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2033static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2043{ 2034{
2044 struct hci_ev_encrypt_change *ev = (void *) skb->data; 2035 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2045 struct hci_conn *conn; 2036 struct hci_conn *conn;
@@ -2082,7 +2073,8 @@ unlock:
2082 hci_dev_unlock(hdev); 2073 hci_dev_unlock(hdev);
2083} 2074}
2084 2075
2085static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2076static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2077 struct sk_buff *skb)
2086{ 2078{
2087 struct hci_ev_change_link_key_complete *ev = (void *) skb->data; 2079 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2088 struct hci_conn *conn; 2080 struct hci_conn *conn;
@@ -2104,7 +2096,8 @@ static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct
2104 hci_dev_unlock(hdev); 2096 hci_dev_unlock(hdev);
2105} 2097}
2106 2098
2107static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 2099static void hci_remote_features_evt(struct hci_dev *hdev,
2100 struct sk_buff *skb)
2108{ 2101{
2109 struct hci_ev_remote_features *ev = (void *) skb->data; 2102 struct hci_ev_remote_features *ev = (void *) skb->data;
2110 struct hci_conn *conn; 2103 struct hci_conn *conn;
@@ -2128,7 +2121,7 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff
2128 cp.handle = ev->handle; 2121 cp.handle = ev->handle;
2129 cp.page = 0x01; 2122 cp.page = 0x01;
2130 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 2123 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2131 sizeof(cp), &cp); 2124 sizeof(cp), &cp);
2132 goto unlock; 2125 goto unlock;
2133 } 2126 }
2134 2127
@@ -2153,17 +2146,18 @@ unlock:
2153 hci_dev_unlock(hdev); 2146 hci_dev_unlock(hdev);
2154} 2147}
2155 2148
2156static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb) 2149static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2157{ 2150{
2158 BT_DBG("%s", hdev->name); 2151 BT_DBG("%s", hdev->name);
2159} 2152}
2160 2153
2161static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2154static void hci_qos_setup_complete_evt(struct hci_dev *hdev,
2155 struct sk_buff *skb)
2162{ 2156{
2163 BT_DBG("%s", hdev->name); 2157 BT_DBG("%s", hdev->name);
2164} 2158}
2165 2159
2166static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2160static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2167{ 2161{
2168 struct hci_ev_cmd_complete *ev = (void *) skb->data; 2162 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2169 __u16 opcode; 2163 __u16 opcode;
@@ -2384,7 +2378,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
2384 } 2378 }
2385} 2379}
2386 2380
2387static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) 2381static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2388{ 2382{
2389 struct hci_ev_cmd_status *ev = (void *) skb->data; 2383 struct hci_ev_cmd_status *ev = (void *) skb->data;
2390 __u16 opcode; 2384 __u16 opcode;
@@ -2465,7 +2459,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2465 } 2459 }
2466} 2460}
2467 2461
2468static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2462static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2469{ 2463{
2470 struct hci_ev_role_change *ev = (void *) skb->data; 2464 struct hci_ev_role_change *ev = (void *) skb->data;
2471 struct hci_conn *conn; 2465 struct hci_conn *conn;
@@ -2491,7 +2485,7 @@ static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb
2491 hci_dev_unlock(hdev); 2485 hci_dev_unlock(hdev);
2492} 2486}
2493 2487
2494static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) 2488static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2495{ 2489{
2496 struct hci_ev_num_comp_pkts *ev = (void *) skb->data; 2490 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2497 int i; 2491 int i;
@@ -2502,7 +2496,7 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
2502 } 2496 }
2503 2497
2504 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + 2498 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2505 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) { 2499 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2506 BT_DBG("%s bad parameters", hdev->name); 2500 BT_DBG("%s bad parameters", hdev->name);
2507 return; 2501 return;
2508 } 2502 }
@@ -2557,8 +2551,7 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
2557 queue_work(hdev->workqueue, &hdev->tx_work); 2551 queue_work(hdev->workqueue, &hdev->tx_work);
2558} 2552}
2559 2553
2560static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev, 2554static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2561 struct sk_buff *skb)
2562{ 2555{
2563 struct hci_ev_num_comp_blocks *ev = (void *) skb->data; 2556 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2564 int i; 2557 int i;
@@ -2569,13 +2562,13 @@ static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2569 } 2562 }
2570 2563
2571 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + 2564 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2572 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) { 2565 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2573 BT_DBG("%s bad parameters", hdev->name); 2566 BT_DBG("%s bad parameters", hdev->name);
2574 return; 2567 return;
2575 } 2568 }
2576 2569
2577 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks, 2570 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2578 ev->num_hndl); 2571 ev->num_hndl);
2579 2572
2580 for (i = 0; i < ev->num_hndl; i++) { 2573 for (i = 0; i < ev->num_hndl; i++) {
2581 struct hci_comp_blocks_info *info = &ev->handles[i]; 2574 struct hci_comp_blocks_info *info = &ev->handles[i];
@@ -2607,7 +2600,7 @@ static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2607 queue_work(hdev->workqueue, &hdev->tx_work); 2600 queue_work(hdev->workqueue, &hdev->tx_work);
2608} 2601}
2609 2602
2610static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2603static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2611{ 2604{
2612 struct hci_ev_mode_change *ev = (void *) skb->data; 2605 struct hci_ev_mode_change *ev = (void *) skb->data;
2613 struct hci_conn *conn; 2606 struct hci_conn *conn;
@@ -2621,7 +2614,8 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb
2621 conn->mode = ev->mode; 2614 conn->mode = ev->mode;
2622 conn->interval = __le16_to_cpu(ev->interval); 2615 conn->interval = __le16_to_cpu(ev->interval);
2623 2616
2624 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { 2617 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2618 &conn->flags)) {
2625 if (conn->mode == HCI_CM_ACTIVE) 2619 if (conn->mode == HCI_CM_ACTIVE)
2626 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 2620 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2627 else 2621 else
@@ -2635,7 +2629,7 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb
2635 hci_dev_unlock(hdev); 2629 hci_dev_unlock(hdev);
2636} 2630}
2637 2631
2638static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2632static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2639{ 2633{
2640 struct hci_ev_pin_code_req *ev = (void *) skb->data; 2634 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2641 struct hci_conn *conn; 2635 struct hci_conn *conn;
@@ -2656,7 +2650,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
2656 2650
2657 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags)) 2651 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2658 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 2652 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2659 sizeof(ev->bdaddr), &ev->bdaddr); 2653 sizeof(ev->bdaddr), &ev->bdaddr);
2660 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) { 2654 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2661 u8 secure; 2655 u8 secure;
2662 2656
@@ -2672,7 +2666,7 @@ unlock:
2672 hci_dev_unlock(hdev); 2666 hci_dev_unlock(hdev);
2673} 2667}
2674 2668
2675static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2669static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2676{ 2670{
2677 struct hci_ev_link_key_req *ev = (void *) skb->data; 2671 struct hci_ev_link_key_req *ev = (void *) skb->data;
2678 struct hci_cp_link_key_reply cp; 2672 struct hci_cp_link_key_reply cp;
@@ -2689,15 +2683,15 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
2689 key = hci_find_link_key(hdev, &ev->bdaddr); 2683 key = hci_find_link_key(hdev, &ev->bdaddr);
2690 if (!key) { 2684 if (!key) {
2691 BT_DBG("%s link key not found for %s", hdev->name, 2685 BT_DBG("%s link key not found for %s", hdev->name,
2692 batostr(&ev->bdaddr)); 2686 batostr(&ev->bdaddr));
2693 goto not_found; 2687 goto not_found;
2694 } 2688 }
2695 2689
2696 BT_DBG("%s found key type %u for %s", hdev->name, key->type, 2690 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2697 batostr(&ev->bdaddr)); 2691 batostr(&ev->bdaddr));
2698 2692
2699 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) && 2693 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2700 key->type == HCI_LK_DEBUG_COMBINATION) { 2694 key->type == HCI_LK_DEBUG_COMBINATION) {
2701 BT_DBG("%s ignoring debug key", hdev->name); 2695 BT_DBG("%s ignoring debug key", hdev->name);
2702 goto not_found; 2696 goto not_found;
2703 } 2697 }
@@ -2705,16 +2699,15 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
2705 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2699 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2706 if (conn) { 2700 if (conn) {
2707 if (key->type == HCI_LK_UNAUTH_COMBINATION && 2701 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2708 conn->auth_type != 0xff && 2702 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2709 (conn->auth_type & 0x01)) {
2710 BT_DBG("%s ignoring unauthenticated key", hdev->name); 2703 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2711 goto not_found; 2704 goto not_found;
2712 } 2705 }
2713 2706
2714 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 2707 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2715 conn->pending_sec_level == BT_SECURITY_HIGH) { 2708 conn->pending_sec_level == BT_SECURITY_HIGH) {
2716 BT_DBG("%s ignoring key unauthenticated for high \ 2709 BT_DBG("%s ignoring key unauthenticated for high security",
2717 security", hdev->name); 2710 hdev->name);
2718 goto not_found; 2711 goto not_found;
2719 } 2712 }
2720 2713
@@ -2723,7 +2716,7 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
2723 } 2716 }
2724 2717
2725 bacpy(&cp.bdaddr, &ev->bdaddr); 2718 bacpy(&cp.bdaddr, &ev->bdaddr);
2726 memcpy(cp.link_key, key->val, 16); 2719 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2727 2720
2728 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 2721 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2729 2722
@@ -2736,7 +2729,7 @@ not_found:
2736 hci_dev_unlock(hdev); 2729 hci_dev_unlock(hdev);
2737} 2730}
2738 2731
2739static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 2732static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2740{ 2733{
2741 struct hci_ev_link_key_notify *ev = (void *) skb->data; 2734 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2742 struct hci_conn *conn; 2735 struct hci_conn *conn;
@@ -2760,12 +2753,12 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff
2760 2753
2761 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags)) 2754 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2762 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key, 2755 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2763 ev->key_type, pin_len); 2756 ev->key_type, pin_len);
2764 2757
2765 hci_dev_unlock(hdev); 2758 hci_dev_unlock(hdev);
2766} 2759}
2767 2760
2768static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 2761static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2769{ 2762{
2770 struct hci_ev_clock_offset *ev = (void *) skb->data; 2763 struct hci_ev_clock_offset *ev = (void *) skb->data;
2771 struct hci_conn *conn; 2764 struct hci_conn *conn;
@@ -2788,7 +2781,7 @@ static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *sk
2788 hci_dev_unlock(hdev); 2781 hci_dev_unlock(hdev);
2789} 2782}
2790 2783
2791static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2784static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2792{ 2785{
2793 struct hci_ev_pkt_type_change *ev = (void *) skb->data; 2786 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2794 struct hci_conn *conn; 2787 struct hci_conn *conn;
@@ -2804,7 +2797,7 @@ static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff
2804 hci_dev_unlock(hdev); 2797 hci_dev_unlock(hdev);
2805} 2798}
2806 2799
2807static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) 2800static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2808{ 2801{
2809 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; 2802 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2810 struct inquiry_entry *ie; 2803 struct inquiry_entry *ie;
@@ -2822,7 +2815,8 @@ static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *
2822 hci_dev_unlock(hdev); 2815 hci_dev_unlock(hdev);
2823} 2816}
2824 2817
2825static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb) 2818static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2819 struct sk_buff *skb)
2826{ 2820{
2827 struct inquiry_data data; 2821 struct inquiry_data data;
2828 int num_rsp = *((__u8 *) skb->data); 2822 int num_rsp = *((__u8 *) skb->data);
@@ -2881,7 +2875,8 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2881 hci_dev_unlock(hdev); 2875 hci_dev_unlock(hdev);
2882} 2876}
2883 2877
2884static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 2878static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2879 struct sk_buff *skb)
2885{ 2880{
2886 struct hci_ev_remote_ext_features *ev = (void *) skb->data; 2881 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2887 struct hci_conn *conn; 2882 struct hci_conn *conn;
@@ -2929,7 +2924,8 @@ unlock:
2929 hci_dev_unlock(hdev); 2924 hci_dev_unlock(hdev);
2930} 2925}
2931 2926
2932static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2927static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2928 struct sk_buff *skb)
2933{ 2929{
2934 struct hci_ev_sync_conn_complete *ev = (void *) skb->data; 2930 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2935 struct hci_conn *conn; 2931 struct hci_conn *conn;
@@ -2984,19 +2980,20 @@ unlock:
2984 hci_dev_unlock(hdev); 2980 hci_dev_unlock(hdev);
2985} 2981}
2986 2982
2987static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb) 2983static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2988{ 2984{
2989 BT_DBG("%s", hdev->name); 2985 BT_DBG("%s", hdev->name);
2990} 2986}
2991 2987
2992static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) 2988static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2993{ 2989{
2994 struct hci_ev_sniff_subrate *ev = (void *) skb->data; 2990 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2995 2991
2996 BT_DBG("%s status %d", hdev->name, ev->status); 2992 BT_DBG("%s status %d", hdev->name, ev->status);
2997} 2993}
2998 2994
2999static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 2995static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
2996 struct sk_buff *skb)
3000{ 2997{
3001 struct inquiry_data data; 2998 struct inquiry_data data;
3002 struct extended_inquiry_info *info = (void *) (skb->data + 1); 2999 struct extended_inquiry_info *info = (void *) (skb->data + 1);
@@ -3087,7 +3084,7 @@ unlock:
3087 hci_dev_unlock(hdev); 3084 hci_dev_unlock(hdev);
3088} 3085}
3089 3086
3090static inline u8 hci_get_auth_req(struct hci_conn *conn) 3087static u8 hci_get_auth_req(struct hci_conn *conn)
3091{ 3088{
3092 /* If remote requests dedicated bonding follow that lead */ 3089 /* If remote requests dedicated bonding follow that lead */
3093 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) { 3090 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
@@ -3106,7 +3103,7 @@ static inline u8 hci_get_auth_req(struct hci_conn *conn)
3106 return conn->auth_type; 3103 return conn->auth_type;
3107} 3104}
3108 3105
3109static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3106static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3110{ 3107{
3111 struct hci_ev_io_capa_request *ev = (void *) skb->data; 3108 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3112 struct hci_conn *conn; 3109 struct hci_conn *conn;
@@ -3125,7 +3122,7 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
3125 goto unlock; 3122 goto unlock;
3126 3123
3127 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) || 3124 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3128 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 3125 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3129 struct hci_cp_io_capability_reply cp; 3126 struct hci_cp_io_capability_reply cp;
3130 3127
3131 bacpy(&cp.bdaddr, &ev->bdaddr); 3128 bacpy(&cp.bdaddr, &ev->bdaddr);
@@ -3136,14 +3133,14 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
3136 conn->auth_type = hci_get_auth_req(conn); 3133 conn->auth_type = hci_get_auth_req(conn);
3137 cp.authentication = conn->auth_type; 3134 cp.authentication = conn->auth_type;
3138 3135
3139 if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) && 3136 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3140 hci_find_remote_oob_data(hdev, &conn->dst)) 3137 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3141 cp.oob_data = 0x01; 3138 cp.oob_data = 0x01;
3142 else 3139 else
3143 cp.oob_data = 0x00; 3140 cp.oob_data = 0x00;
3144 3141
3145 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 3142 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3146 sizeof(cp), &cp); 3143 sizeof(cp), &cp);
3147 } else { 3144 } else {
3148 struct hci_cp_io_capability_neg_reply cp; 3145 struct hci_cp_io_capability_neg_reply cp;
3149 3146
@@ -3151,14 +3148,14 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
3151 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 3148 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3152 3149
3153 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 3150 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3154 sizeof(cp), &cp); 3151 sizeof(cp), &cp);
3155 } 3152 }
3156 3153
3157unlock: 3154unlock:
3158 hci_dev_unlock(hdev); 3155 hci_dev_unlock(hdev);
3159} 3156}
3160 3157
3161static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) 3158static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3162{ 3159{
3163 struct hci_ev_io_capa_reply *ev = (void *) skb->data; 3160 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3164 struct hci_conn *conn; 3161 struct hci_conn *conn;
@@ -3180,8 +3177,8 @@ unlock:
3180 hci_dev_unlock(hdev); 3177 hci_dev_unlock(hdev);
3181} 3178}
3182 3179
3183static inline void hci_user_confirm_request_evt(struct hci_dev *hdev, 3180static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3184 struct sk_buff *skb) 3181 struct sk_buff *skb)
3185{ 3182{
3186 struct hci_ev_user_confirm_req *ev = (void *) skb->data; 3183 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3187 int loc_mitm, rem_mitm, confirm_hint = 0; 3184 int loc_mitm, rem_mitm, confirm_hint = 0;
@@ -3209,13 +3206,13 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3209 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) { 3206 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3210 BT_DBG("Rejecting request: remote device can't provide MITM"); 3207 BT_DBG("Rejecting request: remote device can't provide MITM");
3211 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 3208 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3212 sizeof(ev->bdaddr), &ev->bdaddr); 3209 sizeof(ev->bdaddr), &ev->bdaddr);
3213 goto unlock; 3210 goto unlock;
3214 } 3211 }
3215 3212
3216 /* If no side requires MITM protection; auto-accept */ 3213 /* If no side requires MITM protection; auto-accept */
3217 if ((!loc_mitm || conn->remote_cap == 0x03) && 3214 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3218 (!rem_mitm || conn->io_capability == 0x03)) { 3215 (!rem_mitm || conn->io_capability == 0x03)) {
3219 3216
3220 /* If we're not the initiators request authorization to 3217 /* If we're not the initiators request authorization to
3221 * proceed from user space (mgmt_user_confirm with 3218 * proceed from user space (mgmt_user_confirm with
@@ -3227,7 +3224,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3227 } 3224 }
3228 3225
3229 BT_DBG("Auto-accept of user confirmation with %ums delay", 3226 BT_DBG("Auto-accept of user confirmation with %ums delay",
3230 hdev->auto_accept_delay); 3227 hdev->auto_accept_delay);
3231 3228
3232 if (hdev->auto_accept_delay > 0) { 3229 if (hdev->auto_accept_delay > 0) {
3233 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 3230 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
@@ -3236,7 +3233,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3236 } 3233 }
3237 3234
3238 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 3235 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3239 sizeof(ev->bdaddr), &ev->bdaddr); 3236 sizeof(ev->bdaddr), &ev->bdaddr);
3240 goto unlock; 3237 goto unlock;
3241 } 3238 }
3242 3239
@@ -3248,8 +3245,8 @@ unlock:
3248 hci_dev_unlock(hdev); 3245 hci_dev_unlock(hdev);
3249} 3246}
3250 3247
3251static inline void hci_user_passkey_request_evt(struct hci_dev *hdev, 3248static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3252 struct sk_buff *skb) 3249 struct sk_buff *skb)
3253{ 3250{
3254 struct hci_ev_user_passkey_req *ev = (void *) skb->data; 3251 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3255 3252
@@ -3263,7 +3260,8 @@ static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3263 hci_dev_unlock(hdev); 3260 hci_dev_unlock(hdev);
3264} 3261}
3265 3262
3266static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3263static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3264 struct sk_buff *skb)
3267{ 3265{
3268 struct hci_ev_simple_pair_complete *ev = (void *) skb->data; 3266 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3269 struct hci_conn *conn; 3267 struct hci_conn *conn;
@@ -3291,7 +3289,8 @@ unlock:
3291 hci_dev_unlock(hdev); 3289 hci_dev_unlock(hdev);
3292} 3290}
3293 3291
3294static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 3292static void hci_remote_host_features_evt(struct hci_dev *hdev,
3293 struct sk_buff *skb)
3295{ 3294{
3296 struct hci_ev_remote_host_features *ev = (void *) skb->data; 3295 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3297 struct inquiry_entry *ie; 3296 struct inquiry_entry *ie;
@@ -3307,8 +3306,8 @@ static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_
3307 hci_dev_unlock(hdev); 3306 hci_dev_unlock(hdev);
3308} 3307}
3309 3308
3310static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev, 3309static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3311 struct sk_buff *skb) 3310 struct sk_buff *skb)
3312{ 3311{
3313 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; 3312 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3314 struct oob_data *data; 3313 struct oob_data *data;
@@ -3329,20 +3328,20 @@ static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3329 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer)); 3328 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3330 3329
3331 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp), 3330 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3332 &cp); 3331 &cp);
3333 } else { 3332 } else {
3334 struct hci_cp_remote_oob_data_neg_reply cp; 3333 struct hci_cp_remote_oob_data_neg_reply cp;
3335 3334
3336 bacpy(&cp.bdaddr, &ev->bdaddr); 3335 bacpy(&cp.bdaddr, &ev->bdaddr);
3337 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp), 3336 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3338 &cp); 3337 &cp);
3339 } 3338 }
3340 3339
3341unlock: 3340unlock:
3342 hci_dev_unlock(hdev); 3341 hci_dev_unlock(hdev);
3343} 3342}
3344 3343
3345static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3344static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3346{ 3345{
3347 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 3346 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3348 struct hci_conn *conn; 3347 struct hci_conn *conn;
@@ -3351,6 +3350,19 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
3351 3350
3352 hci_dev_lock(hdev); 3351 hci_dev_lock(hdev);
3353 3352
3353 if (ev->status) {
3354 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3355 if (!conn)
3356 goto unlock;
3357
3358 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3359 conn->dst_type, ev->status);
3360 hci_proto_connect_cfm(conn, ev->status);
3361 conn->state = BT_CLOSED;
3362 hci_conn_del(conn);
3363 goto unlock;
3364 }
3365
3354 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr); 3366 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3355 if (!conn) { 3367 if (!conn) {
3356 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr); 3368 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
@@ -3363,15 +3375,6 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
3363 conn->dst_type = ev->bdaddr_type; 3375 conn->dst_type = ev->bdaddr_type;
3364 } 3376 }
3365 3377
3366 if (ev->status) {
3367 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3368 conn->dst_type, ev->status);
3369 hci_proto_connect_cfm(conn, ev->status);
3370 conn->state = BT_CLOSED;
3371 hci_conn_del(conn);
3372 goto unlock;
3373 }
3374
3375 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3378 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3376 mgmt_device_connected(hdev, &ev->bdaddr, conn->type, 3379 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3377 conn->dst_type, 0, NULL, 0, NULL); 3380 conn->dst_type, 0, NULL, 0, NULL);
@@ -3389,8 +3392,7 @@ unlock:
3389 hci_dev_unlock(hdev); 3392 hci_dev_unlock(hdev);
3390} 3393}
3391 3394
3392static inline void hci_le_adv_report_evt(struct hci_dev *hdev, 3395static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3393 struct sk_buff *skb)
3394{ 3396{
3395 u8 num_reports = skb->data[0]; 3397 u8 num_reports = skb->data[0];
3396 void *ptr = &skb->data[1]; 3398 void *ptr = &skb->data[1];
@@ -3411,8 +3413,7 @@ static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3411 hci_dev_unlock(hdev); 3413 hci_dev_unlock(hdev);
3412} 3414}
3413 3415
3414static inline void hci_le_ltk_request_evt(struct hci_dev *hdev, 3416static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3415 struct sk_buff *skb)
3416{ 3417{
3417 struct hci_ev_le_ltk_req *ev = (void *) skb->data; 3418 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3418 struct hci_cp_le_ltk_reply cp; 3419 struct hci_cp_le_ltk_reply cp;
@@ -3455,7 +3456,7 @@ not_found:
3455 hci_dev_unlock(hdev); 3456 hci_dev_unlock(hdev);
3456} 3457}
3457 3458
3458static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 3459static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3459{ 3460{
3460 struct hci_ev_le_meta *le_ev = (void *) skb->data; 3461 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3461 3462
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 5914623f426..a7f04de03d7 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -24,25 +24,7 @@
24 24
25/* Bluetooth HCI sockets. */ 25/* Bluetooth HCI sockets. */
26 26
27#include <linux/module.h> 27#include <linux/export.h>
28
29#include <linux/types.h>
30#include <linux/capability.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/slab.h>
34#include <linux/poll.h>
35#include <linux/fcntl.h>
36#include <linux/init.h>
37#include <linux/skbuff.h>
38#include <linux/workqueue.h>
39#include <linux/interrupt.h>
40#include <linux/compat.h>
41#include <linux/socket.h>
42#include <linux/ioctl.h>
43#include <net/sock.h>
44
45#include <linux/uaccess.h>
46#include <asm/unaligned.h> 28#include <asm/unaligned.h>
47 29
48#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/bluetooth.h>
@@ -113,11 +95,12 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
113 flt = &hci_pi(sk)->filter; 95 flt = &hci_pi(sk)->filter;
114 96
115 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ? 97 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
116 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask)) 98 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
99 &flt->type_mask))
117 continue; 100 continue;
118 101
119 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) { 102 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
120 register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); 103 int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
121 104
122 if (!hci_test_bit(evt, &flt->event_mask)) 105 if (!hci_test_bit(evt, &flt->event_mask))
123 continue; 106 continue;
@@ -240,7 +223,8 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
240 struct hci_mon_hdr *hdr; 223 struct hci_mon_hdr *hdr;
241 224
242 /* Create a private copy with headroom */ 225 /* Create a private copy with headroom */
243 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC); 226 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
227 GFP_ATOMIC);
244 if (!skb_copy) 228 if (!skb_copy)
245 continue; 229 continue;
246 230
@@ -495,7 +479,8 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
495} 479}
496 480
497/* Ioctls that require bound socket */ 481/* Ioctls that require bound socket */
498static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) 482static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
483 unsigned long arg)
499{ 484{
500 struct hci_dev *hdev = hci_pi(sk)->hdev; 485 struct hci_dev *hdev = hci_pi(sk)->hdev;
501 486
@@ -540,7 +525,8 @@ static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsign
540 } 525 }
541} 526}
542 527
543static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 528static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
529 unsigned long arg)
544{ 530{
545 struct sock *sk = sock->sk; 531 struct sock *sk = sock->sk;
546 void __user *argp = (void __user *) arg; 532 void __user *argp = (void __user *) arg;
@@ -601,7 +587,8 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long a
601 } 587 }
602} 588}
603 589
604static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 590static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
591 int addr_len)
605{ 592{
606 struct sockaddr_hci haddr; 593 struct sockaddr_hci haddr;
607 struct sock *sk = sock->sk; 594 struct sock *sk = sock->sk;
@@ -690,7 +677,8 @@ done:
690 return err; 677 return err;
691} 678}
692 679
693static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer) 680static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
681 int *addr_len, int peer)
694{ 682{
695 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; 683 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
696 struct sock *sk = sock->sk; 684 struct sock *sk = sock->sk;
@@ -711,13 +699,15 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *add
711 return 0; 699 return 0;
712} 700}
713 701
714static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) 702static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
703 struct sk_buff *skb)
715{ 704{
716 __u32 mask = hci_pi(sk)->cmsg_mask; 705 __u32 mask = hci_pi(sk)->cmsg_mask;
717 706
718 if (mask & HCI_CMSG_DIR) { 707 if (mask & HCI_CMSG_DIR) {
719 int incoming = bt_cb(skb)->incoming; 708 int incoming = bt_cb(skb)->incoming;
720 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming); 709 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
710 &incoming);
721 } 711 }
722 712
723 if (mask & HCI_CMSG_TSTAMP) { 713 if (mask & HCI_CMSG_TSTAMP) {
@@ -747,7 +737,7 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
747} 737}
748 738
749static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, 739static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
750 struct msghdr *msg, size_t len, int flags) 740 struct msghdr *msg, size_t len, int flags)
751{ 741{
752 int noblock = flags & MSG_DONTWAIT; 742 int noblock = flags & MSG_DONTWAIT;
753 struct sock *sk = sock->sk; 743 struct sock *sk = sock->sk;
@@ -857,8 +847,9 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
857 u16 ocf = hci_opcode_ocf(opcode); 847 u16 ocf = hci_opcode_ocf(opcode);
858 848
859 if (((ogf > HCI_SFLT_MAX_OGF) || 849 if (((ogf > HCI_SFLT_MAX_OGF) ||
860 !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) && 850 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
861 !capable(CAP_NET_RAW)) { 851 &hci_sec_filter.ocf_mask[ogf])) &&
852 !capable(CAP_NET_RAW)) {
862 err = -EPERM; 853 err = -EPERM;
863 goto drop; 854 goto drop;
864 } 855 }
@@ -891,7 +882,8 @@ drop:
891 goto done; 882 goto done;
892} 883}
893 884
894static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len) 885static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
886 char __user *optval, unsigned int len)
895{ 887{
896 struct hci_ufilter uf = { .opcode = 0 }; 888 struct hci_ufilter uf = { .opcode = 0 };
897 struct sock *sk = sock->sk; 889 struct sock *sk = sock->sk;
@@ -973,7 +965,8 @@ done:
973 return err; 965 return err;
974} 966}
975 967
976static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) 968static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
969 char __user *optval, int __user *optlen)
977{ 970{
978 struct hci_ufilter uf; 971 struct hci_ufilter uf;
979 struct sock *sk = sock->sk; 972 struct sock *sk = sock->sk;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 937f3187eaf..a20e61c3653 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -1,10 +1,6 @@
1/* Bluetooth HCI driver model support. */ 1/* Bluetooth HCI driver model support. */
2 2
3#include <linux/kernel.h>
4#include <linux/slab.h>
5#include <linux/init.h>
6#include <linux/debugfs.h> 3#include <linux/debugfs.h>
7#include <linux/seq_file.h>
8#include <linux/module.h> 4#include <linux/module.h>
9 5
10#include <net/bluetooth/bluetooth.h> 6#include <net/bluetooth/bluetooth.h>
@@ -31,27 +27,30 @@ static inline char *link_typetostr(int type)
31 } 27 }
32} 28}
33 29
34static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf) 30static ssize_t show_link_type(struct device *dev,
31 struct device_attribute *attr, char *buf)
35{ 32{
36 struct hci_conn *conn = to_hci_conn(dev); 33 struct hci_conn *conn = to_hci_conn(dev);
37 return sprintf(buf, "%s\n", link_typetostr(conn->type)); 34 return sprintf(buf, "%s\n", link_typetostr(conn->type));
38} 35}
39 36
40static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf) 37static ssize_t show_link_address(struct device *dev,
38 struct device_attribute *attr, char *buf)
41{ 39{
42 struct hci_conn *conn = to_hci_conn(dev); 40 struct hci_conn *conn = to_hci_conn(dev);
43 return sprintf(buf, "%s\n", batostr(&conn->dst)); 41 return sprintf(buf, "%s\n", batostr(&conn->dst));
44} 42}
45 43
46static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf) 44static ssize_t show_link_features(struct device *dev,
45 struct device_attribute *attr, char *buf)
47{ 46{
48 struct hci_conn *conn = to_hci_conn(dev); 47 struct hci_conn *conn = to_hci_conn(dev);
49 48
50 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 49 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
51 conn->features[0], conn->features[1], 50 conn->features[0], conn->features[1],
52 conn->features[2], conn->features[3], 51 conn->features[2], conn->features[3],
53 conn->features[4], conn->features[5], 52 conn->features[4], conn->features[5],
54 conn->features[6], conn->features[7]); 53 conn->features[6], conn->features[7]);
55} 54}
56 55
57#define LINK_ATTR(_name, _mode, _show, _store) \ 56#define LINK_ATTR(_name, _mode, _show, _store) \
@@ -185,19 +184,22 @@ static inline char *host_typetostr(int type)
185 } 184 }
186} 185}
187 186
188static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf) 187static ssize_t show_bus(struct device *dev,
188 struct device_attribute *attr, char *buf)
189{ 189{
190 struct hci_dev *hdev = to_hci_dev(dev); 190 struct hci_dev *hdev = to_hci_dev(dev);
191 return sprintf(buf, "%s\n", host_bustostr(hdev->bus)); 191 return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
192} 192}
193 193
194static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf) 194static ssize_t show_type(struct device *dev,
195 struct device_attribute *attr, char *buf)
195{ 196{
196 struct hci_dev *hdev = to_hci_dev(dev); 197 struct hci_dev *hdev = to_hci_dev(dev);
197 return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type)); 198 return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type));
198} 199}
199 200
200static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) 201static ssize_t show_name(struct device *dev,
202 struct device_attribute *attr, char *buf)
201{ 203{
202 struct hci_dev *hdev = to_hci_dev(dev); 204 struct hci_dev *hdev = to_hci_dev(dev);
203 char name[HCI_MAX_NAME_LENGTH + 1]; 205 char name[HCI_MAX_NAME_LENGTH + 1];
@@ -210,55 +212,64 @@ static ssize_t show_name(struct device *dev, struct device_attribute *attr, char
210 return sprintf(buf, "%s\n", name); 212 return sprintf(buf, "%s\n", name);
211} 213}
212 214
213static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf) 215static ssize_t show_class(struct device *dev,
216 struct device_attribute *attr, char *buf)
214{ 217{
215 struct hci_dev *hdev = to_hci_dev(dev); 218 struct hci_dev *hdev = to_hci_dev(dev);
216 return sprintf(buf, "0x%.2x%.2x%.2x\n", 219 return sprintf(buf, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
217 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 220 hdev->dev_class[1], hdev->dev_class[0]);
218} 221}
219 222
220static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) 223static ssize_t show_address(struct device *dev,
224 struct device_attribute *attr, char *buf)
221{ 225{
222 struct hci_dev *hdev = to_hci_dev(dev); 226 struct hci_dev *hdev = to_hci_dev(dev);
223 return sprintf(buf, "%s\n", batostr(&hdev->bdaddr)); 227 return sprintf(buf, "%s\n", batostr(&hdev->bdaddr));
224} 228}
225 229
226static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf) 230static ssize_t show_features(struct device *dev,
231 struct device_attribute *attr, char *buf)
227{ 232{
228 struct hci_dev *hdev = to_hci_dev(dev); 233 struct hci_dev *hdev = to_hci_dev(dev);
229 234
230 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 235 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
231 hdev->features[0], hdev->features[1], 236 hdev->features[0], hdev->features[1],
232 hdev->features[2], hdev->features[3], 237 hdev->features[2], hdev->features[3],
233 hdev->features[4], hdev->features[5], 238 hdev->features[4], hdev->features[5],
234 hdev->features[6], hdev->features[7]); 239 hdev->features[6], hdev->features[7]);
235} 240}
236 241
237static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf) 242static ssize_t show_manufacturer(struct device *dev,
243 struct device_attribute *attr, char *buf)
238{ 244{
239 struct hci_dev *hdev = to_hci_dev(dev); 245 struct hci_dev *hdev = to_hci_dev(dev);
240 return sprintf(buf, "%d\n", hdev->manufacturer); 246 return sprintf(buf, "%d\n", hdev->manufacturer);
241} 247}
242 248
243static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf) 249static ssize_t show_hci_version(struct device *dev,
250 struct device_attribute *attr, char *buf)
244{ 251{
245 struct hci_dev *hdev = to_hci_dev(dev); 252 struct hci_dev *hdev = to_hci_dev(dev);
246 return sprintf(buf, "%d\n", hdev->hci_ver); 253 return sprintf(buf, "%d\n", hdev->hci_ver);
247} 254}
248 255
249static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf) 256static ssize_t show_hci_revision(struct device *dev,
257 struct device_attribute *attr, char *buf)
250{ 258{
251 struct hci_dev *hdev = to_hci_dev(dev); 259 struct hci_dev *hdev = to_hci_dev(dev);
252 return sprintf(buf, "%d\n", hdev->hci_rev); 260 return sprintf(buf, "%d\n", hdev->hci_rev);
253} 261}
254 262
255static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf) 263static ssize_t show_idle_timeout(struct device *dev,
264 struct device_attribute *attr, char *buf)
256{ 265{
257 struct hci_dev *hdev = to_hci_dev(dev); 266 struct hci_dev *hdev = to_hci_dev(dev);
258 return sprintf(buf, "%d\n", hdev->idle_timeout); 267 return sprintf(buf, "%d\n", hdev->idle_timeout);
259} 268}
260 269
261static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 270static ssize_t store_idle_timeout(struct device *dev,
271 struct device_attribute *attr,
272 const char *buf, size_t count)
262{ 273{
263 struct hci_dev *hdev = to_hci_dev(dev); 274 struct hci_dev *hdev = to_hci_dev(dev);
264 unsigned int val; 275 unsigned int val;
@@ -276,13 +287,16 @@ static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *a
276 return count; 287 return count;
277} 288}
278 289
279static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf) 290static ssize_t show_sniff_max_interval(struct device *dev,
291 struct device_attribute *attr, char *buf)
280{ 292{
281 struct hci_dev *hdev = to_hci_dev(dev); 293 struct hci_dev *hdev = to_hci_dev(dev);
282 return sprintf(buf, "%d\n", hdev->sniff_max_interval); 294 return sprintf(buf, "%d\n", hdev->sniff_max_interval);
283} 295}
284 296
285static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 297static ssize_t store_sniff_max_interval(struct device *dev,
298 struct device_attribute *attr,
299 const char *buf, size_t count)
286{ 300{
287 struct hci_dev *hdev = to_hci_dev(dev); 301 struct hci_dev *hdev = to_hci_dev(dev);
288 u16 val; 302 u16 val;
@@ -300,13 +314,16 @@ static ssize_t store_sniff_max_interval(struct device *dev, struct device_attrib
300 return count; 314 return count;
301} 315}
302 316
303static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf) 317static ssize_t show_sniff_min_interval(struct device *dev,
318 struct device_attribute *attr, char *buf)
304{ 319{
305 struct hci_dev *hdev = to_hci_dev(dev); 320 struct hci_dev *hdev = to_hci_dev(dev);
306 return sprintf(buf, "%d\n", hdev->sniff_min_interval); 321 return sprintf(buf, "%d\n", hdev->sniff_min_interval);
307} 322}
308 323
309static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 324static ssize_t store_sniff_min_interval(struct device *dev,
325 struct device_attribute *attr,
326 const char *buf, size_t count)
310{ 327{
311 struct hci_dev *hdev = to_hci_dev(dev); 328 struct hci_dev *hdev = to_hci_dev(dev);
312 u16 val; 329 u16 val;
@@ -335,11 +352,11 @@ static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL);
335static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL); 352static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
336 353
337static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR, 354static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR,
338 show_idle_timeout, store_idle_timeout); 355 show_idle_timeout, store_idle_timeout);
339static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR, 356static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR,
340 show_sniff_max_interval, store_sniff_max_interval); 357 show_sniff_max_interval, store_sniff_max_interval);
341static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR, 358static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
342 show_sniff_min_interval, store_sniff_min_interval); 359 show_sniff_min_interval, store_sniff_min_interval);
343 360
344static struct attribute *bt_host_attrs[] = { 361static struct attribute *bt_host_attrs[] = {
345 &dev_attr_bus.attr, 362 &dev_attr_bus.attr,
@@ -455,8 +472,8 @@ static void print_bt_uuid(struct seq_file *f, u8 *uuid)
455 memcpy(&data5, &uuid[14], 2); 472 memcpy(&data5, &uuid[14], 2);
456 473
457 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n", 474 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n",
458 ntohl(data0), ntohs(data1), ntohs(data2), 475 ntohl(data0), ntohs(data1), ntohs(data2), ntohs(data3),
459 ntohs(data3), ntohl(data4), ntohs(data5)); 476 ntohl(data4), ntohs(data5));
460} 477}
461 478
462static int uuids_show(struct seq_file *f, void *p) 479static int uuids_show(struct seq_file *f, void *p)
@@ -513,7 +530,7 @@ static int auto_accept_delay_get(void *data, u64 *val)
513} 530}
514 531
515DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, 532DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
516 auto_accept_delay_set, "%llu\n"); 533 auto_accept_delay_set, "%llu\n");
517 534
518void hci_init_sysfs(struct hci_dev *hdev) 535void hci_init_sysfs(struct hci_dev *hdev)
519{ 536{
@@ -547,15 +564,15 @@ int hci_add_sysfs(struct hci_dev *hdev)
547 return 0; 564 return 0;
548 565
549 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, 566 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
550 hdev, &inquiry_cache_fops); 567 hdev, &inquiry_cache_fops);
551 568
552 debugfs_create_file("blacklist", 0444, hdev->debugfs, 569 debugfs_create_file("blacklist", 0444, hdev->debugfs,
553 hdev, &blacklist_fops); 570 hdev, &blacklist_fops);
554 571
555 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); 572 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
556 573
557 debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev, 574 debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev,
558 &auto_accept_delay_fops); 575 &auto_accept_delay_fops);
559 return 0; 576 return 0;
560} 577}
561 578
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 2c20d765b39..ccd985da651 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -21,27 +21,8 @@
21*/ 21*/
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24
25#include <linux/types.h>
26#include <linux/errno.h>
27#include <linux/kernel.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <linux/poll.h>
31#include <linux/freezer.h>
32#include <linux/fcntl.h>
33#include <linux/skbuff.h>
34#include <linux/socket.h>
35#include <linux/ioctl.h>
36#include <linux/file.h> 24#include <linux/file.h>
37#include <linux/init.h>
38#include <linux/wait.h>
39#include <linux/mutex.h>
40#include <linux/kthread.h> 25#include <linux/kthread.h>
41#include <net/sock.h>
42
43#include <linux/input.h>
44#include <linux/hid.h>
45#include <linux/hidraw.h> 26#include <linux/hidraw.h>
46 27
47#include <net/bluetooth/bluetooth.h> 28#include <net/bluetooth/bluetooth.h>
@@ -244,7 +225,8 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
244} 225}
245 226
246static int __hidp_send_ctrl_message(struct hidp_session *session, 227static int __hidp_send_ctrl_message(struct hidp_session *session,
247 unsigned char hdr, unsigned char *data, int size) 228 unsigned char hdr, unsigned char *data,
229 int size)
248{ 230{
249 struct sk_buff *skb; 231 struct sk_buff *skb;
250 232
@@ -268,7 +250,7 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
268 return 0; 250 return 0;
269} 251}
270 252
271static inline int hidp_send_ctrl_message(struct hidp_session *session, 253static int hidp_send_ctrl_message(struct hidp_session *session,
272 unsigned char hdr, unsigned char *data, int size) 254 unsigned char hdr, unsigned char *data, int size)
273{ 255{
274 int err; 256 int err;
@@ -471,7 +453,7 @@ static void hidp_set_timer(struct hidp_session *session)
471 mod_timer(&session->timer, jiffies + HZ * session->idle_to); 453 mod_timer(&session->timer, jiffies + HZ * session->idle_to);
472} 454}
473 455
474static inline void hidp_del_timer(struct hidp_session *session) 456static void hidp_del_timer(struct hidp_session *session)
475{ 457{
476 if (session->idle_to > 0) 458 if (session->idle_to > 0)
477 del_timer(&session->timer); 459 del_timer(&session->timer);
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 73a32d705c1..18b3f6892a3 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -20,22 +20,8 @@
20 SOFTWARE IS DISCLAIMED. 20 SOFTWARE IS DISCLAIMED.
21*/ 21*/
22 22
23#include <linux/module.h> 23#include <linux/export.h>
24
25#include <linux/types.h>
26#include <linux/capability.h>
27#include <linux/errno.h>
28#include <linux/kernel.h>
29#include <linux/poll.h>
30#include <linux/fcntl.h>
31#include <linux/skbuff.h>
32#include <linux/socket.h>
33#include <linux/ioctl.h>
34#include <linux/file.h> 24#include <linux/file.h>
35#include <linux/init.h>
36#include <linux/compat.h>
37#include <linux/gfp.h>
38#include <net/sock.h>
39 25
40#include "hidp.h" 26#include "hidp.h"
41 27
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 4554e80d16a..d42dfdc83eb 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -30,32 +30,14 @@
30 30
31#include <linux/module.h> 31#include <linux/module.h>
32 32
33#include <linux/types.h>
34#include <linux/capability.h>
35#include <linux/errno.h>
36#include <linux/kernel.h>
37#include <linux/sched.h>
38#include <linux/slab.h>
39#include <linux/poll.h>
40#include <linux/fcntl.h>
41#include <linux/init.h>
42#include <linux/interrupt.h>
43#include <linux/socket.h>
44#include <linux/skbuff.h>
45#include <linux/list.h>
46#include <linux/device.h>
47#include <linux/debugfs.h> 33#include <linux/debugfs.h>
48#include <linux/seq_file.h>
49#include <linux/uaccess.h>
50#include <linux/crc16.h> 34#include <linux/crc16.h>
51#include <net/sock.h>
52
53#include <asm/unaligned.h>
54 35
55#include <net/bluetooth/bluetooth.h> 36#include <net/bluetooth/bluetooth.h>
56#include <net/bluetooth/hci_core.h> 37#include <net/bluetooth/hci_core.h>
57#include <net/bluetooth/l2cap.h> 38#include <net/bluetooth/l2cap.h>
58#include <net/bluetooth/smp.h> 39#include <net/bluetooth/smp.h>
40#include <net/bluetooth/a2mp.h>
59 41
60bool disable_ertm; 42bool disable_ertm;
61 43
@@ -73,6 +55,9 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73static void l2cap_send_disconn_req(struct l2cap_conn *conn, 55static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err); 56 struct l2cap_chan *chan, int err);
75 57
58static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
76/* ---- L2CAP channels ---- */ 61/* ---- L2CAP channels ---- */
77 62
78static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) 63static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
@@ -196,7 +181,7 @@ static void __l2cap_state_change(struct l2cap_chan *chan, int state)
196 state_to_string(state)); 181 state_to_string(state));
197 182
198 chan->state = state; 183 chan->state = state;
199 chan->ops->state_change(chan->data, state); 184 chan->ops->state_change(chan, state);
200} 185}
201 186
202static void l2cap_state_change(struct l2cap_chan *chan, int state) 187static void l2cap_state_change(struct l2cap_chan *chan, int state)
@@ -224,6 +209,37 @@ static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
224 release_sock(sk); 209 release_sock(sk);
225} 210}
226 211
212static void __set_retrans_timer(struct l2cap_chan *chan)
213{
214 if (!delayed_work_pending(&chan->monitor_timer) &&
215 chan->retrans_timeout) {
216 l2cap_set_timer(chan, &chan->retrans_timer,
217 msecs_to_jiffies(chan->retrans_timeout));
218 }
219}
220
221static void __set_monitor_timer(struct l2cap_chan *chan)
222{
223 __clear_retrans_timer(chan);
224 if (chan->monitor_timeout) {
225 l2cap_set_timer(chan, &chan->monitor_timer,
226 msecs_to_jiffies(chan->monitor_timeout));
227 }
228}
229
230static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
231 u16 seq)
232{
233 struct sk_buff *skb;
234
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
237 return skb;
238 }
239
240 return NULL;
241}
242
227/* ---- L2CAP sequence number lists ---- */ 243/* ---- L2CAP sequence number lists ---- */
228 244
229/* For ERTM, ordered lists of sequence numbers must be tracked for 245/* For ERTM, ordered lists of sequence numbers must be tracked for
@@ -366,7 +382,7 @@ static void l2cap_chan_timeout(struct work_struct *work)
366 382
367 l2cap_chan_unlock(chan); 383 l2cap_chan_unlock(chan);
368 384
369 chan->ops->close(chan->data); 385 chan->ops->close(chan);
370 mutex_unlock(&conn->chan_lock); 386 mutex_unlock(&conn->chan_lock);
371 387
372 l2cap_chan_put(chan); 388 l2cap_chan_put(chan);
@@ -392,6 +408,9 @@ struct l2cap_chan *l2cap_chan_create(void)
392 408
393 atomic_set(&chan->refcnt, 1); 409 atomic_set(&chan->refcnt, 1);
394 410
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
413
395 BT_DBG("chan %p", chan); 414 BT_DBG("chan %p", chan);
396 415
397 return chan; 416 return chan;
@@ -430,7 +449,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
430 case L2CAP_CHAN_CONN_ORIENTED: 449 case L2CAP_CHAN_CONN_ORIENTED:
431 if (conn->hcon->type == LE_LINK) { 450 if (conn->hcon->type == LE_LINK) {
432 /* LE connection */ 451 /* LE connection */
433 chan->omtu = L2CAP_LE_DEFAULT_MTU; 452 chan->omtu = L2CAP_DEFAULT_MTU;
434 chan->scid = L2CAP_CID_LE_DATA; 453 chan->scid = L2CAP_CID_LE_DATA;
435 chan->dcid = L2CAP_CID_LE_DATA; 454 chan->dcid = L2CAP_CID_LE_DATA;
436 } else { 455 } else {
@@ -447,6 +466,13 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
447 chan->omtu = L2CAP_DEFAULT_MTU; 466 chan->omtu = L2CAP_DEFAULT_MTU;
448 break; 467 break;
449 468
469 case L2CAP_CHAN_CONN_FIX_A2MP:
470 chan->scid = L2CAP_CID_A2MP;
471 chan->dcid = L2CAP_CID_A2MP;
472 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
473 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
474 break;
475
450 default: 476 default:
451 /* Raw socket can send/recv signalling messages only */ 477 /* Raw socket can send/recv signalling messages only */
452 chan->scid = L2CAP_CID_SIGNALING; 478 chan->scid = L2CAP_CID_SIGNALING;
@@ -466,18 +492,16 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
466 list_add(&chan->list, &conn->chan_l); 492 list_add(&chan->list, &conn->chan_l);
467} 493}
468 494
469static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 495void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
470{ 496{
471 mutex_lock(&conn->chan_lock); 497 mutex_lock(&conn->chan_lock);
472 __l2cap_chan_add(conn, chan); 498 __l2cap_chan_add(conn, chan);
473 mutex_unlock(&conn->chan_lock); 499 mutex_unlock(&conn->chan_lock);
474} 500}
475 501
476static void l2cap_chan_del(struct l2cap_chan *chan, int err) 502void l2cap_chan_del(struct l2cap_chan *chan, int err)
477{ 503{
478 struct sock *sk = chan->sk;
479 struct l2cap_conn *conn = chan->conn; 504 struct l2cap_conn *conn = chan->conn;
480 struct sock *parent = bt_sk(sk)->parent;
481 505
482 __clear_chan_timer(chan); 506 __clear_chan_timer(chan);
483 507
@@ -490,34 +514,22 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
490 l2cap_chan_put(chan); 514 l2cap_chan_put(chan);
491 515
492 chan->conn = NULL; 516 chan->conn = NULL;
493 hci_conn_put(conn->hcon);
494 }
495
496 lock_sock(sk);
497
498 __l2cap_state_change(chan, BT_CLOSED);
499 sock_set_flag(sk, SOCK_ZAPPED);
500 517
501 if (err) 518 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
502 __l2cap_chan_set_err(chan, err); 519 hci_conn_put(conn->hcon);
520 }
503 521
504 if (parent) { 522 if (chan->ops->teardown)
505 bt_accept_unlink(sk); 523 chan->ops->teardown(chan, err);
506 parent->sk_data_ready(parent, 0);
507 } else
508 sk->sk_state_change(sk);
509 524
510 release_sock(sk); 525 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
511
512 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
513 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
514 return; 526 return;
515 527
516 skb_queue_purge(&chan->tx_q); 528 switch(chan->mode) {
517 529 case L2CAP_MODE_BASIC:
518 if (chan->mode == L2CAP_MODE_ERTM) { 530 break;
519 struct srej_list *l, *tmp;
520 531
532 case L2CAP_MODE_ERTM:
521 __clear_retrans_timer(chan); 533 __clear_retrans_timer(chan);
522 __clear_monitor_timer(chan); 534 __clear_monitor_timer(chan);
523 __clear_ack_timer(chan); 535 __clear_ack_timer(chan);
@@ -526,30 +538,15 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
526 538
527 l2cap_seq_list_free(&chan->srej_list); 539 l2cap_seq_list_free(&chan->srej_list);
528 l2cap_seq_list_free(&chan->retrans_list); 540 l2cap_seq_list_free(&chan->retrans_list);
529 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
530 list_del(&l->list);
531 kfree(l);
532 }
533 }
534}
535
536static void l2cap_chan_cleanup_listen(struct sock *parent)
537{
538 struct sock *sk;
539
540 BT_DBG("parent %p", parent);
541 541
542 /* Close not yet accepted channels */ 542 /* fall through */
543 while ((sk = bt_accept_dequeue(parent, NULL))) {
544 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
545
546 l2cap_chan_lock(chan);
547 __clear_chan_timer(chan);
548 l2cap_chan_close(chan, ECONNRESET);
549 l2cap_chan_unlock(chan);
550 543
551 chan->ops->close(chan->data); 544 case L2CAP_MODE_STREAMING:
545 skb_queue_purge(&chan->tx_q);
546 break;
552 } 547 }
548
549 return;
553} 550}
554 551
555void l2cap_chan_close(struct l2cap_chan *chan, int reason) 552void l2cap_chan_close(struct l2cap_chan *chan, int reason)
@@ -562,12 +559,8 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
562 559
563 switch (chan->state) { 560 switch (chan->state) {
564 case BT_LISTEN: 561 case BT_LISTEN:
565 lock_sock(sk); 562 if (chan->ops->teardown)
566 l2cap_chan_cleanup_listen(sk); 563 chan->ops->teardown(chan, 0);
567
568 __l2cap_state_change(chan, BT_CLOSED);
569 sock_set_flag(sk, SOCK_ZAPPED);
570 release_sock(sk);
571 break; 564 break;
572 565
573 case BT_CONNECTED: 566 case BT_CONNECTED:
@@ -595,7 +588,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
595 rsp.scid = cpu_to_le16(chan->dcid); 588 rsp.scid = cpu_to_le16(chan->dcid);
596 rsp.dcid = cpu_to_le16(chan->scid); 589 rsp.dcid = cpu_to_le16(chan->scid);
597 rsp.result = cpu_to_le16(result); 590 rsp.result = cpu_to_le16(result);
598 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 591 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
599 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 592 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
600 sizeof(rsp), &rsp); 593 sizeof(rsp), &rsp);
601 } 594 }
@@ -609,9 +602,8 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
609 break; 602 break;
610 603
611 default: 604 default:
612 lock_sock(sk); 605 if (chan->ops->teardown)
613 sock_set_flag(sk, SOCK_ZAPPED); 606 chan->ops->teardown(chan, 0);
614 release_sock(sk);
615 break; 607 break;
616 } 608 }
617} 609}
@@ -627,7 +619,7 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
627 default: 619 default:
628 return HCI_AT_NO_BONDING; 620 return HCI_AT_NO_BONDING;
629 } 621 }
630 } else if (chan->psm == cpu_to_le16(0x0001)) { 622 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
631 if (chan->sec_level == BT_SECURITY_LOW) 623 if (chan->sec_level == BT_SECURITY_LOW)
632 chan->sec_level = BT_SECURITY_SDP; 624 chan->sec_level = BT_SECURITY_SDP;
633 625
@@ -773,9 +765,11 @@ static inline void __unpack_control(struct l2cap_chan *chan,
773 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { 765 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
774 __unpack_extended_control(get_unaligned_le32(skb->data), 766 __unpack_extended_control(get_unaligned_le32(skb->data),
775 &bt_cb(skb)->control); 767 &bt_cb(skb)->control);
768 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
776 } else { 769 } else {
777 __unpack_enhanced_control(get_unaligned_le16(skb->data), 770 __unpack_enhanced_control(get_unaligned_le16(skb->data),
778 &bt_cb(skb)->control); 771 &bt_cb(skb)->control);
772 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
779 } 773 }
780} 774}
781 775
@@ -830,66 +824,102 @@ static inline void __pack_control(struct l2cap_chan *chan,
830 } 824 }
831} 825}
832 826
833static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control) 827static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
834{ 828{
835 struct sk_buff *skb;
836 struct l2cap_hdr *lh;
837 struct l2cap_conn *conn = chan->conn;
838 int count, hlen;
839
840 if (chan->state != BT_CONNECTED)
841 return;
842
843 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 829 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
844 hlen = L2CAP_EXT_HDR_SIZE; 830 return L2CAP_EXT_HDR_SIZE;
845 else 831 else
846 hlen = L2CAP_ENH_HDR_SIZE; 832 return L2CAP_ENH_HDR_SIZE;
833}
834
835static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
836 u32 control)
837{
838 struct sk_buff *skb;
839 struct l2cap_hdr *lh;
840 int hlen = __ertm_hdr_size(chan);
847 841
848 if (chan->fcs == L2CAP_FCS_CRC16) 842 if (chan->fcs == L2CAP_FCS_CRC16)
849 hlen += L2CAP_FCS_SIZE; 843 hlen += L2CAP_FCS_SIZE;
850 844
851 BT_DBG("chan %p, control 0x%8.8x", chan, control); 845 skb = bt_skb_alloc(hlen, GFP_KERNEL);
852
853 count = min_t(unsigned int, conn->mtu, hlen);
854
855 control |= __set_sframe(chan);
856 846
857 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
858 control |= __set_ctrl_final(chan);
859
860 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
861 control |= __set_ctrl_poll(chan);
862
863 skb = bt_skb_alloc(count, GFP_ATOMIC);
864 if (!skb) 847 if (!skb)
865 return; 848 return ERR_PTR(-ENOMEM);
866 849
867 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 850 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
868 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); 851 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
869 lh->cid = cpu_to_le16(chan->dcid); 852 lh->cid = cpu_to_le16(chan->dcid);
870 853
871 __put_control(chan, control, skb_put(skb, __ctrl_size(chan))); 854 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
855 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
856 else
857 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
872 858
873 if (chan->fcs == L2CAP_FCS_CRC16) { 859 if (chan->fcs == L2CAP_FCS_CRC16) {
874 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE); 860 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
875 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); 861 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
876 } 862 }
877 863
878 skb->priority = HCI_PRIO_MAX; 864 skb->priority = HCI_PRIO_MAX;
879 l2cap_do_send(chan, skb); 865 return skb;
880} 866}
881 867
882static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control) 868static void l2cap_send_sframe(struct l2cap_chan *chan,
869 struct l2cap_ctrl *control)
883{ 870{
884 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 871 struct sk_buff *skb;
885 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); 872 u32 control_field;
873
874 BT_DBG("chan %p, control %p", chan, control);
875
876 if (!control->sframe)
877 return;
878
879 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
880 !control->poll)
881 control->final = 1;
882
883 if (control->super == L2CAP_SUPER_RR)
884 clear_bit(CONN_RNR_SENT, &chan->conn_state);
885 else if (control->super == L2CAP_SUPER_RNR)
886 set_bit(CONN_RNR_SENT, &chan->conn_state); 886 set_bit(CONN_RNR_SENT, &chan->conn_state);
887 } else
888 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
889 887
890 control |= __set_reqseq(chan, chan->buffer_seq); 888 if (control->super != L2CAP_SUPER_SREJ) {
889 chan->last_acked_seq = control->reqseq;
890 __clear_ack_timer(chan);
891 }
891 892
892 l2cap_send_sframe(chan, control); 893 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
894 control->final, control->poll, control->super);
895
896 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
897 control_field = __pack_extended_control(control);
898 else
899 control_field = __pack_enhanced_control(control);
900
901 skb = l2cap_create_sframe_pdu(chan, control_field);
902 if (!IS_ERR(skb))
903 l2cap_do_send(chan, skb);
904}
905
906static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
907{
908 struct l2cap_ctrl control;
909
910 BT_DBG("chan %p, poll %d", chan, poll);
911
912 memset(&control, 0, sizeof(control));
913 control.sframe = 1;
914 control.poll = poll;
915
916 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
917 control.super = L2CAP_SUPER_RNR;
918 else
919 control.super = L2CAP_SUPER_RR;
920
921 control.reqseq = chan->buffer_seq;
922 l2cap_send_sframe(chan, &control);
893} 923}
894 924
895static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) 925static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
@@ -914,25 +944,13 @@ static void l2cap_send_conn_req(struct l2cap_chan *chan)
914 944
915static void l2cap_chan_ready(struct l2cap_chan *chan) 945static void l2cap_chan_ready(struct l2cap_chan *chan)
916{ 946{
917 struct sock *sk = chan->sk; 947 /* This clears all conf flags, including CONF_NOT_COMPLETE */
918 struct sock *parent;
919
920 lock_sock(sk);
921
922 parent = bt_sk(sk)->parent;
923
924 BT_DBG("sk %p, parent %p", sk, parent);
925
926 chan->conf_state = 0; 948 chan->conf_state = 0;
927 __clear_chan_timer(chan); 949 __clear_chan_timer(chan);
928 950
929 __l2cap_state_change(chan, BT_CONNECTED); 951 chan->state = BT_CONNECTED;
930 sk->sk_state_change(sk);
931
932 if (parent)
933 parent->sk_data_ready(parent, 0);
934 952
935 release_sock(sk); 953 chan->ops->ready(chan);
936} 954}
937 955
938static void l2cap_do_start(struct l2cap_chan *chan) 956static void l2cap_do_start(struct l2cap_chan *chan)
@@ -953,7 +971,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
953 l2cap_send_conn_req(chan); 971 l2cap_send_conn_req(chan);
954 } else { 972 } else {
955 struct l2cap_info_req req; 973 struct l2cap_info_req req;
956 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 974 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
957 975
958 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 976 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
959 conn->info_ident = l2cap_get_ident(conn); 977 conn->info_ident = l2cap_get_ident(conn);
@@ -995,6 +1013,11 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c
995 __clear_ack_timer(chan); 1013 __clear_ack_timer(chan);
996 } 1014 }
997 1015
1016 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1017 __l2cap_state_change(chan, BT_DISCONN);
1018 return;
1019 }
1020
998 req.dcid = cpu_to_le16(chan->dcid); 1021 req.dcid = cpu_to_le16(chan->dcid);
999 req.scid = cpu_to_le16(chan->scid); 1022 req.scid = cpu_to_le16(chan->scid);
1000 l2cap_send_cmd(conn, l2cap_get_ident(conn), 1023 l2cap_send_cmd(conn, l2cap_get_ident(conn),
@@ -1053,20 +1076,20 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
1053 if (test_bit(BT_SK_DEFER_SETUP, 1076 if (test_bit(BT_SK_DEFER_SETUP,
1054 &bt_sk(sk)->flags)) { 1077 &bt_sk(sk)->flags)) {
1055 struct sock *parent = bt_sk(sk)->parent; 1078 struct sock *parent = bt_sk(sk)->parent;
1056 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 1079 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1057 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); 1080 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1058 if (parent) 1081 if (parent)
1059 parent->sk_data_ready(parent, 0); 1082 parent->sk_data_ready(parent, 0);
1060 1083
1061 } else { 1084 } else {
1062 __l2cap_state_change(chan, BT_CONFIG); 1085 __l2cap_state_change(chan, BT_CONFIG);
1063 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 1086 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1064 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 1087 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1065 } 1088 }
1066 release_sock(sk); 1089 release_sock(sk);
1067 } else { 1090 } else {
1068 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 1091 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1069 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); 1092 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1070 } 1093 }
1071 1094
1072 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 1095 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
@@ -1150,13 +1173,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1150 1173
1151 lock_sock(parent); 1174 lock_sock(parent);
1152 1175
1153 /* Check for backlog size */ 1176 chan = pchan->ops->new_connection(pchan);
1154 if (sk_acceptq_is_full(parent)) {
1155 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1156 goto clean;
1157 }
1158
1159 chan = pchan->ops->new_connection(pchan->data);
1160 if (!chan) 1177 if (!chan)
1161 goto clean; 1178 goto clean;
1162 1179
@@ -1171,10 +1188,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1171 1188
1172 l2cap_chan_add(conn, chan); 1189 l2cap_chan_add(conn, chan);
1173 1190
1174 __set_chan_timer(chan, sk->sk_sndtimeo); 1191 l2cap_chan_ready(chan);
1175
1176 __l2cap_state_change(chan, BT_CONNECTED);
1177 parent->sk_data_ready(parent, 0);
1178 1192
1179clean: 1193clean:
1180 release_sock(parent); 1194 release_sock(parent);
@@ -1198,6 +1212,11 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
1198 1212
1199 l2cap_chan_lock(chan); 1213 l2cap_chan_lock(chan);
1200 1214
1215 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1216 l2cap_chan_unlock(chan);
1217 continue;
1218 }
1219
1201 if (conn->hcon->type == LE_LINK) { 1220 if (conn->hcon->type == LE_LINK) {
1202 if (smp_conn_security(conn, chan->sec_level)) 1221 if (smp_conn_security(conn, chan->sec_level))
1203 l2cap_chan_ready(chan); 1222 l2cap_chan_ready(chan);
@@ -1270,7 +1289,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
1270 1289
1271 l2cap_chan_unlock(chan); 1290 l2cap_chan_unlock(chan);
1272 1291
1273 chan->ops->close(chan->data); 1292 chan->ops->close(chan);
1274 l2cap_chan_put(chan); 1293 l2cap_chan_put(chan);
1275 } 1294 }
1276 1295
@@ -1444,21 +1463,17 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1444 goto done; 1463 goto done;
1445 } 1464 }
1446 1465
1447 lock_sock(sk); 1466 switch (chan->state) {
1448
1449 switch (sk->sk_state) {
1450 case BT_CONNECT: 1467 case BT_CONNECT:
1451 case BT_CONNECT2: 1468 case BT_CONNECT2:
1452 case BT_CONFIG: 1469 case BT_CONFIG:
1453 /* Already connecting */ 1470 /* Already connecting */
1454 err = 0; 1471 err = 0;
1455 release_sock(sk);
1456 goto done; 1472 goto done;
1457 1473
1458 case BT_CONNECTED: 1474 case BT_CONNECTED:
1459 /* Already connected */ 1475 /* Already connected */
1460 err = -EISCONN; 1476 err = -EISCONN;
1461 release_sock(sk);
1462 goto done; 1477 goto done;
1463 1478
1464 case BT_OPEN: 1479 case BT_OPEN:
@@ -1468,13 +1483,12 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1468 1483
1469 default: 1484 default:
1470 err = -EBADFD; 1485 err = -EBADFD;
1471 release_sock(sk);
1472 goto done; 1486 goto done;
1473 } 1487 }
1474 1488
1475 /* Set destination address and psm */ 1489 /* Set destination address and psm */
1490 lock_sock(sk);
1476 bacpy(&bt_sk(sk)->dst, dst); 1491 bacpy(&bt_sk(sk)->dst, dst);
1477
1478 release_sock(sk); 1492 release_sock(sk);
1479 1493
1480 chan->psm = psm; 1494 chan->psm = psm;
@@ -1576,23 +1590,20 @@ int __l2cap_wait_ack(struct sock *sk)
1576static void l2cap_monitor_timeout(struct work_struct *work) 1590static void l2cap_monitor_timeout(struct work_struct *work)
1577{ 1591{
1578 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1592 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1579 monitor_timer.work); 1593 monitor_timer.work);
1580 1594
1581 BT_DBG("chan %p", chan); 1595 BT_DBG("chan %p", chan);
1582 1596
1583 l2cap_chan_lock(chan); 1597 l2cap_chan_lock(chan);
1584 1598
1585 if (chan->retry_count >= chan->remote_max_tx) { 1599 if (!chan->conn) {
1586 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1587 l2cap_chan_unlock(chan); 1600 l2cap_chan_unlock(chan);
1588 l2cap_chan_put(chan); 1601 l2cap_chan_put(chan);
1589 return; 1602 return;
1590 } 1603 }
1591 1604
1592 chan->retry_count++; 1605 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1593 __set_monitor_timer(chan);
1594 1606
1595 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1596 l2cap_chan_unlock(chan); 1607 l2cap_chan_unlock(chan);
1597 l2cap_chan_put(chan); 1608 l2cap_chan_put(chan);
1598} 1609}
@@ -1600,234 +1611,293 @@ static void l2cap_monitor_timeout(struct work_struct *work)
1600static void l2cap_retrans_timeout(struct work_struct *work) 1611static void l2cap_retrans_timeout(struct work_struct *work)
1601{ 1612{
1602 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1613 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1603 retrans_timer.work); 1614 retrans_timer.work);
1604 1615
1605 BT_DBG("chan %p", chan); 1616 BT_DBG("chan %p", chan);
1606 1617
1607 l2cap_chan_lock(chan); 1618 l2cap_chan_lock(chan);
1608 1619
1609 chan->retry_count = 1; 1620 if (!chan->conn) {
1610 __set_monitor_timer(chan); 1621 l2cap_chan_unlock(chan);
1611 1622 l2cap_chan_put(chan);
1612 set_bit(CONN_WAIT_F, &chan->conn_state); 1623 return;
1613 1624 }
1614 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1615 1625
1626 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1616 l2cap_chan_unlock(chan); 1627 l2cap_chan_unlock(chan);
1617 l2cap_chan_put(chan); 1628 l2cap_chan_put(chan);
1618} 1629}
1619 1630
1620static void l2cap_drop_acked_frames(struct l2cap_chan *chan) 1631static void l2cap_streaming_send(struct l2cap_chan *chan,
1632 struct sk_buff_head *skbs)
1621{ 1633{
1622 struct sk_buff *skb; 1634 struct sk_buff *skb;
1635 struct l2cap_ctrl *control;
1623 1636
1624 while ((skb = skb_peek(&chan->tx_q)) && 1637 BT_DBG("chan %p, skbs %p", chan, skbs);
1625 chan->unacked_frames) {
1626 if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
1627 break;
1628 1638
1629 skb = skb_dequeue(&chan->tx_q); 1639 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1630 kfree_skb(skb);
1631 1640
1632 chan->unacked_frames--; 1641 while (!skb_queue_empty(&chan->tx_q)) {
1633 }
1634 1642
1635 if (!chan->unacked_frames) 1643 skb = skb_dequeue(&chan->tx_q);
1636 __clear_retrans_timer(chan);
1637}
1638 1644
1639static void l2cap_streaming_send(struct l2cap_chan *chan) 1645 bt_cb(skb)->control.retries = 1;
1640{ 1646 control = &bt_cb(skb)->control;
1641 struct sk_buff *skb;
1642 u32 control;
1643 u16 fcs;
1644 1647
1645 while ((skb = skb_dequeue(&chan->tx_q))) { 1648 control->reqseq = 0;
1646 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE); 1649 control->txseq = chan->next_tx_seq;
1647 control |= __set_txseq(chan, chan->next_tx_seq); 1650
1648 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar); 1651 __pack_control(chan, control, skb);
1649 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1650 1652
1651 if (chan->fcs == L2CAP_FCS_CRC16) { 1653 if (chan->fcs == L2CAP_FCS_CRC16) {
1652 fcs = crc16(0, (u8 *)skb->data, 1654 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1653 skb->len - L2CAP_FCS_SIZE); 1655 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1654 put_unaligned_le16(fcs,
1655 skb->data + skb->len - L2CAP_FCS_SIZE);
1656 } 1656 }
1657 1657
1658 l2cap_do_send(chan, skb); 1658 l2cap_do_send(chan, skb);
1659 1659
1660 BT_DBG("Sent txseq %d", (int)control->txseq);
1661
1660 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); 1662 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1663 chan->frames_sent++;
1661 } 1664 }
1662} 1665}
1663 1666
1664static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq) 1667static int l2cap_ertm_send(struct l2cap_chan *chan)
1665{ 1668{
1666 struct sk_buff *skb, *tx_skb; 1669 struct sk_buff *skb, *tx_skb;
1667 u16 fcs; 1670 struct l2cap_ctrl *control;
1668 u32 control; 1671 int sent = 0;
1669 1672
1670 skb = skb_peek(&chan->tx_q); 1673 BT_DBG("chan %p", chan);
1671 if (!skb)
1672 return;
1673 1674
1674 while (bt_cb(skb)->control.txseq != tx_seq) { 1675 if (chan->state != BT_CONNECTED)
1675 if (skb_queue_is_last(&chan->tx_q, skb)) 1676 return -ENOTCONN;
1676 return;
1677 1677
1678 skb = skb_queue_next(&chan->tx_q, skb); 1678 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1679 } 1679 return 0;
1680 1680
1681 if (bt_cb(skb)->control.retries == chan->remote_max_tx && 1681 while (chan->tx_send_head &&
1682 chan->remote_max_tx) { 1682 chan->unacked_frames < chan->remote_tx_win &&
1683 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 1683 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1684 return;
1685 }
1686 1684
1687 tx_skb = skb_clone(skb, GFP_ATOMIC); 1685 skb = chan->tx_send_head;
1688 bt_cb(skb)->control.retries++;
1689 1686
1690 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); 1687 bt_cb(skb)->control.retries = 1;
1691 control &= __get_sar_mask(chan); 1688 control = &bt_cb(skb)->control;
1692 1689
1693 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 1690 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1694 control |= __set_ctrl_final(chan); 1691 control->final = 1;
1695 1692
1696 control |= __set_reqseq(chan, chan->buffer_seq); 1693 control->reqseq = chan->buffer_seq;
1697 control |= __set_txseq(chan, tx_seq); 1694 chan->last_acked_seq = chan->buffer_seq;
1695 control->txseq = chan->next_tx_seq;
1698 1696
1699 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); 1697 __pack_control(chan, control, skb);
1700 1698
1701 if (chan->fcs == L2CAP_FCS_CRC16) { 1699 if (chan->fcs == L2CAP_FCS_CRC16) {
1702 fcs = crc16(0, (u8 *)tx_skb->data, 1700 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1703 tx_skb->len - L2CAP_FCS_SIZE); 1701 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1704 put_unaligned_le16(fcs, 1702 }
1705 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE); 1703
1704 /* Clone after data has been modified. Data is assumed to be
1705 read-only (for locking purposes) on cloned sk_buffs.
1706 */
1707 tx_skb = skb_clone(skb, GFP_KERNEL);
1708
1709 if (!tx_skb)
1710 break;
1711
1712 __set_retrans_timer(chan);
1713
1714 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1715 chan->unacked_frames++;
1716 chan->frames_sent++;
1717 sent++;
1718
1719 if (skb_queue_is_last(&chan->tx_q, skb))
1720 chan->tx_send_head = NULL;
1721 else
1722 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1723
1724 l2cap_do_send(chan, tx_skb);
1725 BT_DBG("Sent txseq %d", (int)control->txseq);
1706 } 1726 }
1707 1727
1708 l2cap_do_send(chan, tx_skb); 1728 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1729 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1730
1731 return sent;
1709} 1732}
1710 1733
1711static int l2cap_ertm_send(struct l2cap_chan *chan) 1734static void l2cap_ertm_resend(struct l2cap_chan *chan)
1712{ 1735{
1713 struct sk_buff *skb, *tx_skb; 1736 struct l2cap_ctrl control;
1714 u16 fcs; 1737 struct sk_buff *skb;
1715 u32 control; 1738 struct sk_buff *tx_skb;
1716 int nsent = 0; 1739 u16 seq;
1717 1740
1718 if (chan->state != BT_CONNECTED) 1741 BT_DBG("chan %p", chan);
1719 return -ENOTCONN;
1720 1742
1721 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 1743 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1722 return 0; 1744 return;
1723 1745
1724 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) { 1746 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1747 seq = l2cap_seq_list_pop(&chan->retrans_list);
1725 1748
1726 if (bt_cb(skb)->control.retries == chan->remote_max_tx && 1749 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1727 chan->remote_max_tx) { 1750 if (!skb) {
1728 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 1751 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1729 break; 1752 seq);
1753 continue;
1730 } 1754 }
1731 1755
1732 tx_skb = skb_clone(skb, GFP_ATOMIC);
1733
1734 bt_cb(skb)->control.retries++; 1756 bt_cb(skb)->control.retries++;
1757 control = bt_cb(skb)->control;
1735 1758
1736 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); 1759 if (chan->max_tx != 0 &&
1737 control &= __get_sar_mask(chan); 1760 bt_cb(skb)->control.retries > chan->max_tx) {
1761 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1762 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1763 l2cap_seq_list_clear(&chan->retrans_list);
1764 break;
1765 }
1738 1766
1767 control.reqseq = chan->buffer_seq;
1739 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 1768 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1740 control |= __set_ctrl_final(chan); 1769 control.final = 1;
1770 else
1771 control.final = 0;
1741 1772
1742 control |= __set_reqseq(chan, chan->buffer_seq); 1773 if (skb_cloned(skb)) {
1743 control |= __set_txseq(chan, chan->next_tx_seq); 1774 /* Cloned sk_buffs are read-only, so we need a
1744 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar); 1775 * writeable copy
1776 */
1777 tx_skb = skb_copy(skb, GFP_ATOMIC);
1778 } else {
1779 tx_skb = skb_clone(skb, GFP_ATOMIC);
1780 }
1745 1781
1746 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); 1782 if (!tx_skb) {
1783 l2cap_seq_list_clear(&chan->retrans_list);
1784 break;
1785 }
1786
1787 /* Update skb contents */
1788 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1789 put_unaligned_le32(__pack_extended_control(&control),
1790 tx_skb->data + L2CAP_HDR_SIZE);
1791 } else {
1792 put_unaligned_le16(__pack_enhanced_control(&control),
1793 tx_skb->data + L2CAP_HDR_SIZE);
1794 }
1747 1795
1748 if (chan->fcs == L2CAP_FCS_CRC16) { 1796 if (chan->fcs == L2CAP_FCS_CRC16) {
1749 fcs = crc16(0, (u8 *)skb->data, 1797 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1750 tx_skb->len - L2CAP_FCS_SIZE); 1798 put_unaligned_le16(fcs, skb_put(tx_skb,
1751 put_unaligned_le16(fcs, skb->data + 1799 L2CAP_FCS_SIZE));
1752 tx_skb->len - L2CAP_FCS_SIZE);
1753 } 1800 }
1754 1801
1755 l2cap_do_send(chan, tx_skb); 1802 l2cap_do_send(chan, tx_skb);
1756 1803
1757 __set_retrans_timer(chan); 1804 BT_DBG("Resent txseq %d", control.txseq);
1758
1759 bt_cb(skb)->control.txseq = chan->next_tx_seq;
1760
1761 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1762
1763 if (bt_cb(skb)->control.retries == 1) {
1764 chan->unacked_frames++;
1765
1766 if (!nsent++)
1767 __clear_ack_timer(chan);
1768 }
1769
1770 chan->frames_sent++;
1771 1805
1772 if (skb_queue_is_last(&chan->tx_q, skb)) 1806 chan->last_acked_seq = chan->buffer_seq;
1773 chan->tx_send_head = NULL;
1774 else
1775 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1776 } 1807 }
1777
1778 return nsent;
1779} 1808}
1780 1809
1781static int l2cap_retransmit_frames(struct l2cap_chan *chan) 1810static void l2cap_retransmit(struct l2cap_chan *chan,
1811 struct l2cap_ctrl *control)
1782{ 1812{
1783 int ret; 1813 BT_DBG("chan %p, control %p", chan, control);
1784
1785 if (!skb_queue_empty(&chan->tx_q))
1786 chan->tx_send_head = chan->tx_q.next;
1787 1814
1788 chan->next_tx_seq = chan->expected_ack_seq; 1815 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1789 ret = l2cap_ertm_send(chan); 1816 l2cap_ertm_resend(chan);
1790 return ret;
1791} 1817}
1792 1818
1793static void __l2cap_send_ack(struct l2cap_chan *chan) 1819static void l2cap_retransmit_all(struct l2cap_chan *chan,
1820 struct l2cap_ctrl *control)
1794{ 1821{
1795 u32 control = 0; 1822 struct sk_buff *skb;
1796 1823
1797 control |= __set_reqseq(chan, chan->buffer_seq); 1824 BT_DBG("chan %p, control %p", chan, control);
1798 1825
1799 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 1826 if (control->poll)
1800 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); 1827 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1801 set_bit(CONN_RNR_SENT, &chan->conn_state);
1802 l2cap_send_sframe(chan, control);
1803 return;
1804 }
1805 1828
1806 if (l2cap_ertm_send(chan) > 0) 1829 l2cap_seq_list_clear(&chan->retrans_list);
1830
1831 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1807 return; 1832 return;
1808 1833
1809 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); 1834 if (chan->unacked_frames) {
1810 l2cap_send_sframe(chan, control); 1835 skb_queue_walk(&chan->tx_q, skb) {
1836 if (bt_cb(skb)->control.txseq == control->reqseq ||
1837 skb == chan->tx_send_head)
1838 break;
1839 }
1840
1841 skb_queue_walk_from(&chan->tx_q, skb) {
1842 if (skb == chan->tx_send_head)
1843 break;
1844
1845 l2cap_seq_list_append(&chan->retrans_list,
1846 bt_cb(skb)->control.txseq);
1847 }
1848
1849 l2cap_ertm_resend(chan);
1850 }
1811} 1851}
1812 1852
1813static void l2cap_send_ack(struct l2cap_chan *chan) 1853static void l2cap_send_ack(struct l2cap_chan *chan)
1814{ 1854{
1815 __clear_ack_timer(chan); 1855 struct l2cap_ctrl control;
1816 __l2cap_send_ack(chan); 1856 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1817} 1857 chan->last_acked_seq);
1858 int threshold;
1818 1859
1819static void l2cap_send_srejtail(struct l2cap_chan *chan) 1860 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1820{ 1861 chan, chan->last_acked_seq, chan->buffer_seq);
1821 struct srej_list *tail;
1822 u32 control;
1823 1862
1824 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); 1863 memset(&control, 0, sizeof(control));
1825 control |= __set_ctrl_final(chan); 1864 control.sframe = 1;
1826 1865
1827 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list); 1866 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1828 control |= __set_reqseq(chan, tail->tx_seq); 1867 chan->rx_state == L2CAP_RX_STATE_RECV) {
1868 __clear_ack_timer(chan);
1869 control.super = L2CAP_SUPER_RNR;
1870 control.reqseq = chan->buffer_seq;
1871 l2cap_send_sframe(chan, &control);
1872 } else {
1873 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1874 l2cap_ertm_send(chan);
1875 /* If any i-frames were sent, they included an ack */
1876 if (chan->buffer_seq == chan->last_acked_seq)
1877 frames_to_ack = 0;
1878 }
1879
1880 /* Ack now if the tx window is 3/4ths full.
1881 * Calculate without mul or div
1882 */
1883 threshold = chan->tx_win;
1884 threshold += threshold << 1;
1885 threshold >>= 2;
1886
1887 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1888 threshold);
1889
1890 if (frames_to_ack >= threshold) {
1891 __clear_ack_timer(chan);
1892 control.super = L2CAP_SUPER_RR;
1893 control.reqseq = chan->buffer_seq;
1894 l2cap_send_sframe(chan, &control);
1895 frames_to_ack = 0;
1896 }
1829 1897
1830 l2cap_send_sframe(chan, control); 1898 if (frames_to_ack)
1899 __set_ack_timer(chan);
1900 }
1831} 1901}
1832 1902
1833static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, 1903static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
@@ -1956,10 +2026,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1956 if (!conn) 2026 if (!conn)
1957 return ERR_PTR(-ENOTCONN); 2027 return ERR_PTR(-ENOTCONN);
1958 2028
1959 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 2029 hlen = __ertm_hdr_size(chan);
1960 hlen = L2CAP_EXT_HDR_SIZE;
1961 else
1962 hlen = L2CAP_ENH_HDR_SIZE;
1963 2030
1964 if (sdulen) 2031 if (sdulen)
1965 hlen += L2CAP_SDULEN_SIZE; 2032 hlen += L2CAP_SDULEN_SIZE;
@@ -1979,7 +2046,11 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1979 lh->cid = cpu_to_le16(chan->dcid); 2046 lh->cid = cpu_to_le16(chan->dcid);
1980 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 2047 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1981 2048
1982 __put_control(chan, 0, skb_put(skb, __ctrl_size(chan))); 2049 /* Control header is populated later */
2050 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2051 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2052 else
2053 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1983 2054
1984 if (sdulen) 2055 if (sdulen)
1985 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); 2056 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
@@ -1990,9 +2061,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1990 return ERR_PTR(err); 2061 return ERR_PTR(err);
1991 } 2062 }
1992 2063
1993 if (chan->fcs == L2CAP_FCS_CRC16) 2064 bt_cb(skb)->control.fcs = chan->fcs;
1994 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1995
1996 bt_cb(skb)->control.retries = 0; 2065 bt_cb(skb)->control.retries = 0;
1997 return skb; 2066 return skb;
1998} 2067}
@@ -2004,7 +2073,6 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
2004 struct sk_buff *skb; 2073 struct sk_buff *skb;
2005 u16 sdu_len; 2074 u16 sdu_len;
2006 size_t pdu_len; 2075 size_t pdu_len;
2007 int err = 0;
2008 u8 sar; 2076 u8 sar;
2009 2077
2010 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len); 2078 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
@@ -2020,7 +2088,10 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
2020 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); 2088 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2021 2089
2022 /* Adjust for largest possible L2CAP overhead. */ 2090 /* Adjust for largest possible L2CAP overhead. */
2023 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE; 2091 if (chan->fcs)
2092 pdu_len -= L2CAP_FCS_SIZE;
2093
2094 pdu_len -= __ertm_hdr_size(chan);
2024 2095
2025 /* Remote device may have requested smaller PDUs */ 2096 /* Remote device may have requested smaller PDUs */
2026 pdu_len = min_t(size_t, pdu_len, chan->remote_mps); 2097 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
@@ -2060,7 +2131,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
2060 } 2131 }
2061 } 2132 }
2062 2133
2063 return err; 2134 return 0;
2064} 2135}
2065 2136
2066int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, 2137int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
@@ -2122,17 +2193,12 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2122 if (err) 2193 if (err)
2123 break; 2194 break;
2124 2195
2125 if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL)
2126 chan->tx_send_head = seg_queue.next;
2127 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2128
2129 if (chan->mode == L2CAP_MODE_ERTM) 2196 if (chan->mode == L2CAP_MODE_ERTM)
2130 err = l2cap_ertm_send(chan); 2197 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2131 else 2198 else
2132 l2cap_streaming_send(chan); 2199 l2cap_streaming_send(chan, &seg_queue);
2133 2200
2134 if (err >= 0) 2201 err = len;
2135 err = len;
2136 2202
2137 /* If the skbs were not queued for sending, they'll still be in 2203 /* If the skbs were not queued for sending, they'll still be in
2138 * seg_queue and need to be purged. 2204 * seg_queue and need to be purged.
@@ -2148,6 +2214,296 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2148 return err; 2214 return err;
2149} 2215}
2150 2216
2217static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2218{
2219 struct l2cap_ctrl control;
2220 u16 seq;
2221
2222 BT_DBG("chan %p, txseq %d", chan, txseq);
2223
2224 memset(&control, 0, sizeof(control));
2225 control.sframe = 1;
2226 control.super = L2CAP_SUPER_SREJ;
2227
2228 for (seq = chan->expected_tx_seq; seq != txseq;
2229 seq = __next_seq(chan, seq)) {
2230 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2231 control.reqseq = seq;
2232 l2cap_send_sframe(chan, &control);
2233 l2cap_seq_list_append(&chan->srej_list, seq);
2234 }
2235 }
2236
2237 chan->expected_tx_seq = __next_seq(chan, txseq);
2238}
2239
2240static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2241{
2242 struct l2cap_ctrl control;
2243
2244 BT_DBG("chan %p", chan);
2245
2246 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2247 return;
2248
2249 memset(&control, 0, sizeof(control));
2250 control.sframe = 1;
2251 control.super = L2CAP_SUPER_SREJ;
2252 control.reqseq = chan->srej_list.tail;
2253 l2cap_send_sframe(chan, &control);
2254}
2255
2256static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2257{
2258 struct l2cap_ctrl control;
2259 u16 initial_head;
2260 u16 seq;
2261
2262 BT_DBG("chan %p, txseq %d", chan, txseq);
2263
2264 memset(&control, 0, sizeof(control));
2265 control.sframe = 1;
2266 control.super = L2CAP_SUPER_SREJ;
2267
2268 /* Capture initial list head to allow only one pass through the list. */
2269 initial_head = chan->srej_list.head;
2270
2271 do {
2272 seq = l2cap_seq_list_pop(&chan->srej_list);
2273 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2274 break;
2275
2276 control.reqseq = seq;
2277 l2cap_send_sframe(chan, &control);
2278 l2cap_seq_list_append(&chan->srej_list, seq);
2279 } while (chan->srej_list.head != initial_head);
2280}
2281
2282static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2283{
2284 struct sk_buff *acked_skb;
2285 u16 ackseq;
2286
2287 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2288
2289 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2290 return;
2291
2292 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2293 chan->expected_ack_seq, chan->unacked_frames);
2294
2295 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2296 ackseq = __next_seq(chan, ackseq)) {
2297
2298 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2299 if (acked_skb) {
2300 skb_unlink(acked_skb, &chan->tx_q);
2301 kfree_skb(acked_skb);
2302 chan->unacked_frames--;
2303 }
2304 }
2305
2306 chan->expected_ack_seq = reqseq;
2307
2308 if (chan->unacked_frames == 0)
2309 __clear_retrans_timer(chan);
2310
2311 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2312}
2313
2314static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2315{
2316 BT_DBG("chan %p", chan);
2317
2318 chan->expected_tx_seq = chan->buffer_seq;
2319 l2cap_seq_list_clear(&chan->srej_list);
2320 skb_queue_purge(&chan->srej_q);
2321 chan->rx_state = L2CAP_RX_STATE_RECV;
2322}
2323
2324static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2325 struct l2cap_ctrl *control,
2326 struct sk_buff_head *skbs, u8 event)
2327{
2328 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2329 event);
2330
2331 switch (event) {
2332 case L2CAP_EV_DATA_REQUEST:
2333 if (chan->tx_send_head == NULL)
2334 chan->tx_send_head = skb_peek(skbs);
2335
2336 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2337 l2cap_ertm_send(chan);
2338 break;
2339 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2340 BT_DBG("Enter LOCAL_BUSY");
2341 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2342
2343 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2344 /* The SREJ_SENT state must be aborted if we are to
2345 * enter the LOCAL_BUSY state.
2346 */
2347 l2cap_abort_rx_srej_sent(chan);
2348 }
2349
2350 l2cap_send_ack(chan);
2351
2352 break;
2353 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2354 BT_DBG("Exit LOCAL_BUSY");
2355 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2356
2357 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2358 struct l2cap_ctrl local_control;
2359
2360 memset(&local_control, 0, sizeof(local_control));
2361 local_control.sframe = 1;
2362 local_control.super = L2CAP_SUPER_RR;
2363 local_control.poll = 1;
2364 local_control.reqseq = chan->buffer_seq;
2365 l2cap_send_sframe(chan, &local_control);
2366
2367 chan->retry_count = 1;
2368 __set_monitor_timer(chan);
2369 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2370 }
2371 break;
2372 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2373 l2cap_process_reqseq(chan, control->reqseq);
2374 break;
2375 case L2CAP_EV_EXPLICIT_POLL:
2376 l2cap_send_rr_or_rnr(chan, 1);
2377 chan->retry_count = 1;
2378 __set_monitor_timer(chan);
2379 __clear_ack_timer(chan);
2380 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2381 break;
2382 case L2CAP_EV_RETRANS_TO:
2383 l2cap_send_rr_or_rnr(chan, 1);
2384 chan->retry_count = 1;
2385 __set_monitor_timer(chan);
2386 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2387 break;
2388 case L2CAP_EV_RECV_FBIT:
2389 /* Nothing to process */
2390 break;
2391 default:
2392 break;
2393 }
2394}
2395
2396static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2397 struct l2cap_ctrl *control,
2398 struct sk_buff_head *skbs, u8 event)
2399{
2400 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2401 event);
2402
2403 switch (event) {
2404 case L2CAP_EV_DATA_REQUEST:
2405 if (chan->tx_send_head == NULL)
2406 chan->tx_send_head = skb_peek(skbs);
2407 /* Queue data, but don't send. */
2408 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2409 break;
2410 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2411 BT_DBG("Enter LOCAL_BUSY");
2412 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2413
2414 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2415 /* The SREJ_SENT state must be aborted if we are to
2416 * enter the LOCAL_BUSY state.
2417 */
2418 l2cap_abort_rx_srej_sent(chan);
2419 }
2420
2421 l2cap_send_ack(chan);
2422
2423 break;
2424 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2425 BT_DBG("Exit LOCAL_BUSY");
2426 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2427
2428 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2429 struct l2cap_ctrl local_control;
2430 memset(&local_control, 0, sizeof(local_control));
2431 local_control.sframe = 1;
2432 local_control.super = L2CAP_SUPER_RR;
2433 local_control.poll = 1;
2434 local_control.reqseq = chan->buffer_seq;
2435 l2cap_send_sframe(chan, &local_control);
2436
2437 chan->retry_count = 1;
2438 __set_monitor_timer(chan);
2439 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2440 }
2441 break;
2442 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2443 l2cap_process_reqseq(chan, control->reqseq);
2444
2445 /* Fall through */
2446
2447 case L2CAP_EV_RECV_FBIT:
2448 if (control && control->final) {
2449 __clear_monitor_timer(chan);
2450 if (chan->unacked_frames > 0)
2451 __set_retrans_timer(chan);
2452 chan->retry_count = 0;
2453 chan->tx_state = L2CAP_TX_STATE_XMIT;
2454 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2455 }
2456 break;
2457 case L2CAP_EV_EXPLICIT_POLL:
2458 /* Ignore */
2459 break;
2460 case L2CAP_EV_MONITOR_TO:
2461 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2462 l2cap_send_rr_or_rnr(chan, 1);
2463 __set_monitor_timer(chan);
2464 chan->retry_count++;
2465 } else {
2466 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2467 }
2468 break;
2469 default:
2470 break;
2471 }
2472}
2473
2474static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2475 struct sk_buff_head *skbs, u8 event)
2476{
2477 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2478 chan, control, skbs, event, chan->tx_state);
2479
2480 switch (chan->tx_state) {
2481 case L2CAP_TX_STATE_XMIT:
2482 l2cap_tx_state_xmit(chan, control, skbs, event);
2483 break;
2484 case L2CAP_TX_STATE_WAIT_F:
2485 l2cap_tx_state_wait_f(chan, control, skbs, event);
2486 break;
2487 default:
2488 /* Ignore event */
2489 break;
2490 }
2491}
2492
2493static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2494 struct l2cap_ctrl *control)
2495{
2496 BT_DBG("chan %p, control %p", chan, control);
2497 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2498}
2499
2500static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2501 struct l2cap_ctrl *control)
2502{
2503 BT_DBG("chan %p, control %p", chan, control);
2504 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2505}
2506
2151/* Copy frame to all raw sockets on that connection */ 2507/* Copy frame to all raw sockets on that connection */
2152static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) 2508static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2153{ 2509{
@@ -2170,7 +2526,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2170 if (!nskb) 2526 if (!nskb)
2171 continue; 2527 continue;
2172 2528
2173 if (chan->ops->recv(chan->data, nskb)) 2529 if (chan->ops->recv(chan, nskb))
2174 kfree_skb(nskb); 2530 kfree_skb(nskb);
2175 } 2531 }
2176 2532
@@ -2200,9 +2556,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2200 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); 2556 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2201 2557
2202 if (conn->hcon->type == LE_LINK) 2558 if (conn->hcon->type == LE_LINK)
2203 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING); 2559 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2204 else 2560 else
2205 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); 2561 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2206 2562
2207 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE); 2563 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2208 cmd->code = code; 2564 cmd->code = code;
@@ -2314,8 +2670,8 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2314 efs.stype = chan->local_stype; 2670 efs.stype = chan->local_stype;
2315 efs.msdu = cpu_to_le16(chan->local_msdu); 2671 efs.msdu = cpu_to_le16(chan->local_msdu);
2316 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); 2672 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2317 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); 2673 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2318 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO); 2674 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2319 break; 2675 break;
2320 2676
2321 case L2CAP_MODE_STREAMING: 2677 case L2CAP_MODE_STREAMING:
@@ -2338,20 +2694,24 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2338static void l2cap_ack_timeout(struct work_struct *work) 2694static void l2cap_ack_timeout(struct work_struct *work)
2339{ 2695{
2340 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 2696 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2341 ack_timer.work); 2697 ack_timer.work);
2698 u16 frames_to_ack;
2342 2699
2343 BT_DBG("chan %p", chan); 2700 BT_DBG("chan %p", chan);
2344 2701
2345 l2cap_chan_lock(chan); 2702 l2cap_chan_lock(chan);
2346 2703
2347 __l2cap_send_ack(chan); 2704 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2705 chan->last_acked_seq);
2348 2706
2349 l2cap_chan_unlock(chan); 2707 if (frames_to_ack)
2708 l2cap_send_rr_or_rnr(chan, 0);
2350 2709
2710 l2cap_chan_unlock(chan);
2351 l2cap_chan_put(chan); 2711 l2cap_chan_put(chan);
2352} 2712}
2353 2713
2354static inline int l2cap_ertm_init(struct l2cap_chan *chan) 2714int l2cap_ertm_init(struct l2cap_chan *chan)
2355{ 2715{
2356 int err; 2716 int err;
2357 2717
@@ -2360,7 +2720,6 @@ static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2360 chan->expected_ack_seq = 0; 2720 chan->expected_ack_seq = 0;
2361 chan->unacked_frames = 0; 2721 chan->unacked_frames = 0;
2362 chan->buffer_seq = 0; 2722 chan->buffer_seq = 0;
2363 chan->num_acked = 0;
2364 chan->frames_sent = 0; 2723 chan->frames_sent = 0;
2365 chan->last_acked_seq = 0; 2724 chan->last_acked_seq = 0;
2366 chan->sdu = NULL; 2725 chan->sdu = NULL;
@@ -2381,12 +2740,15 @@ static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2381 2740
2382 skb_queue_head_init(&chan->srej_q); 2741 skb_queue_head_init(&chan->srej_q);
2383 2742
2384 INIT_LIST_HEAD(&chan->srej_l);
2385 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win); 2743 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2386 if (err < 0) 2744 if (err < 0)
2387 return err; 2745 return err;
2388 2746
2389 return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win); 2747 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2748 if (err < 0)
2749 l2cap_seq_list_free(&chan->srej_list);
2750
2751 return err;
2390} 2752}
2391 2753
2392static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) 2754static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
@@ -2512,6 +2874,7 @@ done:
2512 break; 2874 break;
2513 2875
2514 case L2CAP_MODE_STREAMING: 2876 case L2CAP_MODE_STREAMING:
2877 l2cap_txwin_setup(chan);
2515 rfc.mode = L2CAP_MODE_STREAMING; 2878 rfc.mode = L2CAP_MODE_STREAMING;
2516 rfc.txwin_size = 0; 2879 rfc.txwin_size = 0;
2517 rfc.max_transmit = 0; 2880 rfc.max_transmit = 0;
@@ -2542,7 +2905,7 @@ done:
2542 } 2905 }
2543 2906
2544 req->dcid = cpu_to_le16(chan->dcid); 2907 req->dcid = cpu_to_le16(chan->dcid);
2545 req->flags = cpu_to_le16(0); 2908 req->flags = __constant_cpu_to_le16(0);
2546 2909
2547 return ptr - data; 2910 return ptr - data;
2548} 2911}
@@ -2762,7 +3125,7 @@ done:
2762 } 3125 }
2763 rsp->scid = cpu_to_le16(chan->dcid); 3126 rsp->scid = cpu_to_le16(chan->dcid);
2764 rsp->result = cpu_to_le16(result); 3127 rsp->result = cpu_to_le16(result);
2765 rsp->flags = cpu_to_le16(0x0000); 3128 rsp->flags = __constant_cpu_to_le16(0);
2766 3129
2767 return ptr - data; 3130 return ptr - data;
2768} 3131}
@@ -2861,7 +3224,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
2861 } 3224 }
2862 3225
2863 req->dcid = cpu_to_le16(chan->dcid); 3226 req->dcid = cpu_to_le16(chan->dcid);
2864 req->flags = cpu_to_le16(0x0000); 3227 req->flags = __constant_cpu_to_le16(0);
2865 3228
2866 return ptr - data; 3229 return ptr - data;
2867} 3230}
@@ -2888,8 +3251,8 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2888 3251
2889 rsp.scid = cpu_to_le16(chan->dcid); 3252 rsp.scid = cpu_to_le16(chan->dcid);
2890 rsp.dcid = cpu_to_le16(chan->scid); 3253 rsp.dcid = cpu_to_le16(chan->scid);
2891 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 3254 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
2892 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 3255 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
2893 l2cap_send_cmd(conn, chan->ident, 3256 l2cap_send_cmd(conn, chan->ident,
2894 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 3257 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2895 3258
@@ -2929,8 +3292,8 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2929 * did not send an RFC option. 3292 * did not send an RFC option.
2930 */ 3293 */
2931 rfc.mode = chan->mode; 3294 rfc.mode = chan->mode;
2932 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); 3295 rfc.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2933 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); 3296 rfc.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2934 rfc.max_pdu_size = cpu_to_le16(chan->imtu); 3297 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2935 3298
2936 BT_ERR("Expected RFC option was not found, using defaults"); 3299 BT_ERR("Expected RFC option was not found, using defaults");
@@ -2993,7 +3356,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2993 lock_sock(parent); 3356 lock_sock(parent);
2994 3357
2995 /* Check if the ACL is secure enough (if not SDP) */ 3358 /* Check if the ACL is secure enough (if not SDP) */
2996 if (psm != cpu_to_le16(0x0001) && 3359 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
2997 !hci_conn_check_link_mode(conn->hcon)) { 3360 !hci_conn_check_link_mode(conn->hcon)) {
2998 conn->disc_reason = HCI_ERROR_AUTH_FAILURE; 3361 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2999 result = L2CAP_CR_SEC_BLOCK; 3362 result = L2CAP_CR_SEC_BLOCK;
@@ -3002,25 +3365,16 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
3002 3365
3003 result = L2CAP_CR_NO_MEM; 3366 result = L2CAP_CR_NO_MEM;
3004 3367
3005 /* Check for backlog size */ 3368 /* Check if we already have channel with that dcid */
3006 if (sk_acceptq_is_full(parent)) { 3369 if (__l2cap_get_chan_by_dcid(conn, scid))
3007 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3008 goto response; 3370 goto response;
3009 }
3010 3371
3011 chan = pchan->ops->new_connection(pchan->data); 3372 chan = pchan->ops->new_connection(pchan);
3012 if (!chan) 3373 if (!chan)
3013 goto response; 3374 goto response;
3014 3375
3015 sk = chan->sk; 3376 sk = chan->sk;
3016 3377
3017 /* Check if we already have channel with that dcid */
3018 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3019 sock_set_flag(sk, SOCK_ZAPPED);
3020 chan->ops->close(chan->data);
3021 goto response;
3022 }
3023
3024 hci_conn_hold(conn->hcon); 3378 hci_conn_hold(conn->hcon);
3025 3379
3026 bacpy(&bt_sk(sk)->src, conn->src); 3380 bacpy(&bt_sk(sk)->src, conn->src);
@@ -3074,7 +3428,7 @@ sendresp:
3074 3428
3075 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { 3429 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3076 struct l2cap_info_req info; 3430 struct l2cap_info_req info;
3077 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 3431 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3078 3432
3079 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 3433 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3080 conn->info_ident = l2cap_get_ident(conn); 3434 conn->info_ident = l2cap_get_ident(conn);
@@ -3196,7 +3550,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3196 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { 3550 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3197 struct l2cap_cmd_rej_cid rej; 3551 struct l2cap_cmd_rej_cid rej;
3198 3552
3199 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID); 3553 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3200 rej.scid = cpu_to_le16(chan->scid); 3554 rej.scid = cpu_to_le16(chan->scid);
3201 rej.dcid = cpu_to_le16(chan->dcid); 3555 rej.dcid = cpu_to_le16(chan->dcid);
3202 3556
@@ -3218,11 +3572,11 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3218 memcpy(chan->conf_req + chan->conf_len, req->data, len); 3572 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3219 chan->conf_len += len; 3573 chan->conf_len += len;
3220 3574
3221 if (flags & 0x0001) { 3575 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3222 /* Incomplete config. Send empty response. */ 3576 /* Incomplete config. Send empty response. */
3223 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 3577 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3224 l2cap_build_conf_rsp(chan, rsp, 3578 l2cap_build_conf_rsp(chan, rsp,
3225 L2CAP_CONF_SUCCESS, 0x0001), rsp); 3579 L2CAP_CONF_SUCCESS, flags), rsp);
3226 goto unlock; 3580 goto unlock;
3227 } 3581 }
3228 3582
@@ -3245,8 +3599,6 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3245 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { 3599 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3246 set_default_fcs(chan); 3600 set_default_fcs(chan);
3247 3601
3248 l2cap_state_change(chan, BT_CONNECTED);
3249
3250 if (chan->mode == L2CAP_MODE_ERTM || 3602 if (chan->mode == L2CAP_MODE_ERTM ||
3251 chan->mode == L2CAP_MODE_STREAMING) 3603 chan->mode == L2CAP_MODE_STREAMING)
3252 err = l2cap_ertm_init(chan); 3604 err = l2cap_ertm_init(chan);
@@ -3278,7 +3630,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3278 3630
3279 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 3631 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3280 l2cap_build_conf_rsp(chan, rsp, 3632 l2cap_build_conf_rsp(chan, rsp,
3281 L2CAP_CONF_SUCCESS, 0x0000), rsp); 3633 L2CAP_CONF_SUCCESS, flags), rsp);
3282 } 3634 }
3283 3635
3284unlock: 3636unlock:
@@ -3369,7 +3721,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3369 goto done; 3721 goto done;
3370 } 3722 }
3371 3723
3372 if (flags & 0x01) 3724 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3373 goto done; 3725 goto done;
3374 3726
3375 set_bit(CONF_INPUT_DONE, &chan->conf_state); 3727 set_bit(CONF_INPUT_DONE, &chan->conf_state);
@@ -3377,7 +3729,6 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3377 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) { 3729 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3378 set_default_fcs(chan); 3730 set_default_fcs(chan);
3379 3731
3380 l2cap_state_change(chan, BT_CONNECTED);
3381 if (chan->mode == L2CAP_MODE_ERTM || 3732 if (chan->mode == L2CAP_MODE_ERTM ||
3382 chan->mode == L2CAP_MODE_STREAMING) 3733 chan->mode == L2CAP_MODE_STREAMING)
3383 err = l2cap_ertm_init(chan); 3734 err = l2cap_ertm_init(chan);
@@ -3431,7 +3782,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
3431 3782
3432 l2cap_chan_unlock(chan); 3783 l2cap_chan_unlock(chan);
3433 3784
3434 chan->ops->close(chan->data); 3785 chan->ops->close(chan);
3435 l2cap_chan_put(chan); 3786 l2cap_chan_put(chan);
3436 3787
3437 mutex_unlock(&conn->chan_lock); 3788 mutex_unlock(&conn->chan_lock);
@@ -3465,7 +3816,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
3465 3816
3466 l2cap_chan_unlock(chan); 3817 l2cap_chan_unlock(chan);
3467 3818
3468 chan->ops->close(chan->data); 3819 chan->ops->close(chan);
3469 l2cap_chan_put(chan); 3820 l2cap_chan_put(chan);
3470 3821
3471 mutex_unlock(&conn->chan_lock); 3822 mutex_unlock(&conn->chan_lock);
@@ -3486,8 +3837,8 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
3486 u8 buf[8]; 3837 u8 buf[8];
3487 u32 feat_mask = l2cap_feat_mask; 3838 u32 feat_mask = l2cap_feat_mask;
3488 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; 3839 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3489 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 3840 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3490 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 3841 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3491 if (!disable_ertm) 3842 if (!disable_ertm)
3492 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING 3843 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3493 | L2CAP_FEAT_FCS; 3844 | L2CAP_FEAT_FCS;
@@ -3507,15 +3858,15 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
3507 else 3858 else
3508 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP; 3859 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3509 3860
3510 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); 3861 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3511 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 3862 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3512 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan)); 3863 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3513 l2cap_send_cmd(conn, cmd->ident, 3864 l2cap_send_cmd(conn, cmd->ident,
3514 L2CAP_INFO_RSP, sizeof(buf), buf); 3865 L2CAP_INFO_RSP, sizeof(buf), buf);
3515 } else { 3866 } else {
3516 struct l2cap_info_rsp rsp; 3867 struct l2cap_info_rsp rsp;
3517 rsp.type = cpu_to_le16(type); 3868 rsp.type = cpu_to_le16(type);
3518 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); 3869 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3519 l2cap_send_cmd(conn, cmd->ident, 3870 l2cap_send_cmd(conn, cmd->ident,
3520 L2CAP_INFO_RSP, sizeof(rsp), &rsp); 3871 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3521 } 3872 }
@@ -3555,7 +3906,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
3555 3906
3556 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { 3907 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3557 struct l2cap_info_req req; 3908 struct l2cap_info_req req;
3558 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); 3909 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3559 3910
3560 conn->info_ident = l2cap_get_ident(conn); 3911 conn->info_ident = l2cap_get_ident(conn);
3561 3912
@@ -3790,9 +4141,9 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3790 4141
3791 err = l2cap_check_conn_param(min, max, latency, to_multiplier); 4142 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3792 if (err) 4143 if (err)
3793 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); 4144 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3794 else 4145 else
3795 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); 4146 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3796 4147
3797 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, 4148 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3798 sizeof(rsp), &rsp); 4149 sizeof(rsp), &rsp);
@@ -3940,7 +4291,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3940 BT_ERR("Wrong link type (%d)", err); 4291 BT_ERR("Wrong link type (%d)", err);
3941 4292
3942 /* FIXME: Map err to a valid reason */ 4293 /* FIXME: Map err to a valid reason */
3943 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); 4294 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3944 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); 4295 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3945 } 4296 }
3946 4297
@@ -3972,65 +4323,38 @@ static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3972 return 0; 4323 return 0;
3973} 4324}
3974 4325
3975static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) 4326static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3976{ 4327{
3977 u32 control = 0; 4328 struct l2cap_ctrl control;
3978 4329
3979 chan->frames_sent = 0; 4330 BT_DBG("chan %p", chan);
3980 4331
3981 control |= __set_reqseq(chan, chan->buffer_seq); 4332 memset(&control, 0, sizeof(control));
4333 control.sframe = 1;
4334 control.final = 1;
4335 control.reqseq = chan->buffer_seq;
4336 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3982 4337
3983 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 4338 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3984 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); 4339 control.super = L2CAP_SUPER_RNR;
3985 l2cap_send_sframe(chan, control); 4340 l2cap_send_sframe(chan, &control);
3986 set_bit(CONN_RNR_SENT, &chan->conn_state);
3987 } 4341 }
3988 4342
3989 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 4343 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3990 l2cap_retransmit_frames(chan); 4344 chan->unacked_frames > 0)
4345 __set_retrans_timer(chan);
3991 4346
4347 /* Send pending iframes */
3992 l2cap_ertm_send(chan); 4348 l2cap_ertm_send(chan);
3993 4349
3994 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && 4350 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3995 chan->frames_sent == 0) { 4351 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
3996 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); 4352 /* F-bit wasn't sent in an s-frame or i-frame yet, so
3997 l2cap_send_sframe(chan, control); 4353 * send it now.
3998 } 4354 */
3999} 4355 control.super = L2CAP_SUPER_RR;
4000 4356 l2cap_send_sframe(chan, &control);
4001static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
4002{
4003 struct sk_buff *next_skb;
4004 int tx_seq_offset, next_tx_seq_offset;
4005
4006 bt_cb(skb)->control.txseq = tx_seq;
4007 bt_cb(skb)->control.sar = sar;
4008
4009 next_skb = skb_peek(&chan->srej_q);
4010
4011 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4012
4013 while (next_skb) {
4014 if (bt_cb(next_skb)->control.txseq == tx_seq)
4015 return -EINVAL;
4016
4017 next_tx_seq_offset = __seq_offset(chan,
4018 bt_cb(next_skb)->control.txseq, chan->buffer_seq);
4019
4020 if (next_tx_seq_offset > tx_seq_offset) {
4021 __skb_queue_before(&chan->srej_q, next_skb, skb);
4022 return 0;
4023 }
4024
4025 if (skb_queue_is_last(&chan->srej_q, next_skb))
4026 next_skb = NULL;
4027 else
4028 next_skb = skb_queue_next(&chan->srej_q, next_skb);
4029 } 4357 }
4030
4031 __skb_queue_tail(&chan->srej_q, skb);
4032
4033 return 0;
4034} 4358}
4035 4359
4036static void append_skb_frag(struct sk_buff *skb, 4360static void append_skb_frag(struct sk_buff *skb,
@@ -4052,16 +4376,17 @@ static void append_skb_frag(struct sk_buff *skb,
4052 skb->truesize += new_frag->truesize; 4376 skb->truesize += new_frag->truesize;
4053} 4377}
4054 4378
4055static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control) 4379static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4380 struct l2cap_ctrl *control)
4056{ 4381{
4057 int err = -EINVAL; 4382 int err = -EINVAL;
4058 4383
4059 switch (__get_ctrl_sar(chan, control)) { 4384 switch (control->sar) {
4060 case L2CAP_SAR_UNSEGMENTED: 4385 case L2CAP_SAR_UNSEGMENTED:
4061 if (chan->sdu) 4386 if (chan->sdu)
4062 break; 4387 break;
4063 4388
4064 err = chan->ops->recv(chan->data, skb); 4389 err = chan->ops->recv(chan, skb);
4065 break; 4390 break;
4066 4391
4067 case L2CAP_SAR_START: 4392 case L2CAP_SAR_START:
@@ -4111,7 +4436,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u3
4111 if (chan->sdu->len != chan->sdu_len) 4436 if (chan->sdu->len != chan->sdu_len)
4112 break; 4437 break;
4113 4438
4114 err = chan->ops->recv(chan->data, chan->sdu); 4439 err = chan->ops->recv(chan, chan->sdu);
4115 4440
4116 if (!err) { 4441 if (!err) {
4117 /* Reassembly complete */ 4442 /* Reassembly complete */
@@ -4133,448 +4458,609 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u3
4133 return err; 4458 return err;
4134} 4459}
4135 4460
4136static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) 4461void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4137{ 4462{
4138 BT_DBG("chan %p, Enter local busy", chan); 4463 u8 event;
4139 4464
4140 set_bit(CONN_LOCAL_BUSY, &chan->conn_state); 4465 if (chan->mode != L2CAP_MODE_ERTM)
4141 l2cap_seq_list_clear(&chan->srej_list); 4466 return;
4142 4467
4143 __set_ack_timer(chan); 4468 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4469 l2cap_tx(chan, NULL, NULL, event);
4144} 4470}
4145 4471
4146static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan) 4472static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4147{ 4473{
4148 u32 control; 4474 int err = 0;
4149 4475 /* Pass sequential frames to l2cap_reassemble_sdu()
4150 if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) 4476 * until a gap is encountered.
4151 goto done; 4477 */
4152 4478
4153 control = __set_reqseq(chan, chan->buffer_seq); 4479 BT_DBG("chan %p", chan);
4154 control |= __set_ctrl_poll(chan);
4155 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4156 l2cap_send_sframe(chan, control);
4157 chan->retry_count = 1;
4158 4480
4159 __clear_retrans_timer(chan); 4481 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4160 __set_monitor_timer(chan); 4482 struct sk_buff *skb;
4483 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4484 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4161 4485
4162 set_bit(CONN_WAIT_F, &chan->conn_state); 4486 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4163 4487
4164done: 4488 if (!skb)
4165 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); 4489 break;
4166 clear_bit(CONN_RNR_SENT, &chan->conn_state);
4167 4490
4168 BT_DBG("chan %p, Exit local busy", chan); 4491 skb_unlink(skb, &chan->srej_q);
4169} 4492 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4493 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4494 if (err)
4495 break;
4496 }
4170 4497
4171void l2cap_chan_busy(struct l2cap_chan *chan, int busy) 4498 if (skb_queue_empty(&chan->srej_q)) {
4172{ 4499 chan->rx_state = L2CAP_RX_STATE_RECV;
4173 if (chan->mode == L2CAP_MODE_ERTM) { 4500 l2cap_send_ack(chan);
4174 if (busy)
4175 l2cap_ertm_enter_local_busy(chan);
4176 else
4177 l2cap_ertm_exit_local_busy(chan);
4178 } 4501 }
4502
4503 return err;
4179} 4504}
4180 4505
4181static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq) 4506static void l2cap_handle_srej(struct l2cap_chan *chan,
4507 struct l2cap_ctrl *control)
4182{ 4508{
4183 struct sk_buff *skb; 4509 struct sk_buff *skb;
4184 u32 control;
4185 4510
4186 while ((skb = skb_peek(&chan->srej_q)) && 4511 BT_DBG("chan %p, control %p", chan, control);
4187 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4188 int err;
4189 4512
4190 if (bt_cb(skb)->control.txseq != tx_seq) 4513 if (control->reqseq == chan->next_tx_seq) {
4191 break; 4514 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4515 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4516 return;
4517 }
4192 4518
4193 skb = skb_dequeue(&chan->srej_q); 4519 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4194 control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
4195 err = l2cap_reassemble_sdu(chan, skb, control);
4196 4520
4197 if (err < 0) { 4521 if (skb == NULL) {
4198 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 4522 BT_DBG("Seq %d not available for retransmission",
4199 break; 4523 control->reqseq);
4200 } 4524 return;
4525 }
4201 4526
4202 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej); 4527 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4203 tx_seq = __next_seq(chan, tx_seq); 4528 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4529 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4530 return;
4204 } 4531 }
4205}
4206 4532
4207static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq) 4533 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4208{
4209 struct srej_list *l, *tmp;
4210 u32 control;
4211 4534
4212 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { 4535 if (control->poll) {
4213 if (l->tx_seq == tx_seq) { 4536 l2cap_pass_to_tx(chan, control);
4214 list_del(&l->list); 4537
4215 kfree(l); 4538 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4216 return; 4539 l2cap_retransmit(chan, control);
4540 l2cap_ertm_send(chan);
4541
4542 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4543 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4544 chan->srej_save_reqseq = control->reqseq;
4545 }
4546 } else {
4547 l2cap_pass_to_tx_fbit(chan, control);
4548
4549 if (control->final) {
4550 if (chan->srej_save_reqseq != control->reqseq ||
4551 !test_and_clear_bit(CONN_SREJ_ACT,
4552 &chan->conn_state))
4553 l2cap_retransmit(chan, control);
4554 } else {
4555 l2cap_retransmit(chan, control);
4556 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4557 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4558 chan->srej_save_reqseq = control->reqseq;
4559 }
4217 } 4560 }
4218 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4219 control |= __set_reqseq(chan, l->tx_seq);
4220 l2cap_send_sframe(chan, control);
4221 list_del(&l->list);
4222 list_add_tail(&l->list, &chan->srej_l);
4223 } 4561 }
4224} 4562}
4225 4563
4226static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq) 4564static void l2cap_handle_rej(struct l2cap_chan *chan,
4565 struct l2cap_ctrl *control)
4227{ 4566{
4228 struct srej_list *new; 4567 struct sk_buff *skb;
4229 u32 control;
4230
4231 while (tx_seq != chan->expected_tx_seq) {
4232 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4233 control |= __set_reqseq(chan, chan->expected_tx_seq);
4234 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
4235 l2cap_send_sframe(chan, control);
4236 4568
4237 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); 4569 BT_DBG("chan %p, control %p", chan, control);
4238 if (!new)
4239 return -ENOMEM;
4240 4570
4241 new->tx_seq = chan->expected_tx_seq; 4571 if (control->reqseq == chan->next_tx_seq) {
4572 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4573 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4574 return;
4575 }
4242 4576
4243 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); 4577 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4244 4578
4245 list_add_tail(&new->list, &chan->srej_l); 4579 if (chan->max_tx && skb &&
4580 bt_cb(skb)->control.retries >= chan->max_tx) {
4581 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4582 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4583 return;
4246 } 4584 }
4247 4585
4248 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); 4586 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4249 4587
4250 return 0; 4588 l2cap_pass_to_tx(chan, control);
4589
4590 if (control->final) {
4591 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4592 l2cap_retransmit_all(chan, control);
4593 } else {
4594 l2cap_retransmit_all(chan, control);
4595 l2cap_ertm_send(chan);
4596 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4597 set_bit(CONN_REJ_ACT, &chan->conn_state);
4598 }
4251} 4599}
4252 4600
4253static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) 4601static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4254{ 4602{
4255 u16 tx_seq = __get_txseq(chan, rx_control); 4603 BT_DBG("chan %p, txseq %d", chan, txseq);
4256 u16 req_seq = __get_reqseq(chan, rx_control);
4257 u8 sar = __get_ctrl_sar(chan, rx_control);
4258 int tx_seq_offset, expected_tx_seq_offset;
4259 int num_to_ack = (chan->tx_win/6) + 1;
4260 int err = 0;
4261 4604
4262 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len, 4605 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4263 tx_seq, rx_control); 4606 chan->expected_tx_seq);
4264 4607
4265 if (__is_ctrl_final(chan, rx_control) && 4608 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4266 test_bit(CONN_WAIT_F, &chan->conn_state)) { 4609 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4267 __clear_monitor_timer(chan); 4610 chan->tx_win) {
4268 if (chan->unacked_frames > 0) 4611 /* See notes below regarding "double poll" and
4269 __set_retrans_timer(chan); 4612 * invalid packets.
4270 clear_bit(CONN_WAIT_F, &chan->conn_state); 4613 */
4271 } 4614 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4615 BT_DBG("Invalid/Ignore - after SREJ");
4616 return L2CAP_TXSEQ_INVALID_IGNORE;
4617 } else {
4618 BT_DBG("Invalid - in window after SREJ sent");
4619 return L2CAP_TXSEQ_INVALID;
4620 }
4621 }
4272 4622
4273 chan->expected_ack_seq = req_seq; 4623 if (chan->srej_list.head == txseq) {
4274 l2cap_drop_acked_frames(chan); 4624 BT_DBG("Expected SREJ");
4625 return L2CAP_TXSEQ_EXPECTED_SREJ;
4626 }
4275 4627
4276 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); 4628 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4629 BT_DBG("Duplicate SREJ - txseq already stored");
4630 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4631 }
4277 4632
4278 /* invalid tx_seq */ 4633 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4279 if (tx_seq_offset >= chan->tx_win) { 4634 BT_DBG("Unexpected SREJ - not requested");
4280 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 4635 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4281 goto drop; 4636 }
4282 } 4637 }
4283 4638
4284 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 4639 if (chan->expected_tx_seq == txseq) {
4285 if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) 4640 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4286 l2cap_send_ack(chan); 4641 chan->tx_win) {
4287 goto drop; 4642 BT_DBG("Invalid - txseq outside tx window");
4643 return L2CAP_TXSEQ_INVALID;
4644 } else {
4645 BT_DBG("Expected");
4646 return L2CAP_TXSEQ_EXPECTED;
4647 }
4288 } 4648 }
4289 4649
4290 if (tx_seq == chan->expected_tx_seq) 4650 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4291 goto expected; 4651 __seq_offset(chan, chan->expected_tx_seq,
4652 chan->last_acked_seq)){
4653 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4654 return L2CAP_TXSEQ_DUPLICATE;
4655 }
4656
4657 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4658 /* A source of invalid packets is a "double poll" condition,
4659 * where delays cause us to send multiple poll packets. If
4660 * the remote stack receives and processes both polls,
4661 * sequence numbers can wrap around in such a way that a
4662 * resent frame has a sequence number that looks like new data
4663 * with a sequence gap. This would trigger an erroneous SREJ
4664 * request.
4665 *
4666 * Fortunately, this is impossible with a tx window that's
4667 * less than half of the maximum sequence number, which allows
4668 * invalid frames to be safely ignored.
4669 *
4670 * With tx window sizes greater than half of the tx window
4671 * maximum, the frame is invalid and cannot be ignored. This
4672 * causes a disconnect.
4673 */
4292 4674
4293 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 4675 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4294 struct srej_list *first; 4676 BT_DBG("Invalid/Ignore - txseq outside tx window");
4677 return L2CAP_TXSEQ_INVALID_IGNORE;
4678 } else {
4679 BT_DBG("Invalid - txseq outside tx window");
4680 return L2CAP_TXSEQ_INVALID;
4681 }
4682 } else {
4683 BT_DBG("Unexpected - txseq indicates missing frames");
4684 return L2CAP_TXSEQ_UNEXPECTED;
4685 }
4686}
4687
4688static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4689 struct l2cap_ctrl *control,
4690 struct sk_buff *skb, u8 event)
4691{
4692 int err = 0;
4693 bool skb_in_use = 0;
4295 4694
4296 first = list_first_entry(&chan->srej_l, 4695 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4297 struct srej_list, list); 4696 event);
4298 if (tx_seq == first->tx_seq) {
4299 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4300 l2cap_check_srej_gap(chan, tx_seq);
4301 4697
4302 list_del(&first->list); 4698 switch (event) {
4303 kfree(first); 4699 case L2CAP_EV_RECV_IFRAME:
4700 switch (l2cap_classify_txseq(chan, control->txseq)) {
4701 case L2CAP_TXSEQ_EXPECTED:
4702 l2cap_pass_to_tx(chan, control);
4304 4703
4305 if (list_empty(&chan->srej_l)) { 4704 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4306 chan->buffer_seq = chan->buffer_seq_srej; 4705 BT_DBG("Busy, discarding expected seq %d",
4307 clear_bit(CONN_SREJ_SENT, &chan->conn_state); 4706 control->txseq);
4308 l2cap_send_ack(chan); 4707 break;
4309 BT_DBG("chan %p, Exit SREJ_SENT", chan);
4310 } 4708 }
4311 } else {
4312 struct srej_list *l;
4313 4709
4314 /* duplicated tx_seq */ 4710 chan->expected_tx_seq = __next_seq(chan,
4315 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0) 4711 control->txseq);
4316 goto drop; 4712
4713 chan->buffer_seq = chan->expected_tx_seq;
4714 skb_in_use = 1;
4715
4716 err = l2cap_reassemble_sdu(chan, skb, control);
4717 if (err)
4718 break;
4317 4719
4318 list_for_each_entry(l, &chan->srej_l, list) { 4720 if (control->final) {
4319 if (l->tx_seq == tx_seq) { 4721 if (!test_and_clear_bit(CONN_REJ_ACT,
4320 l2cap_resend_srejframe(chan, tx_seq); 4722 &chan->conn_state)) {
4321 return 0; 4723 control->final = 0;
4724 l2cap_retransmit_all(chan, control);
4725 l2cap_ertm_send(chan);
4322 } 4726 }
4323 } 4727 }
4324 4728
4325 err = l2cap_send_srejframe(chan, tx_seq); 4729 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4326 if (err < 0) { 4730 l2cap_send_ack(chan);
4327 l2cap_send_disconn_req(chan->conn, chan, -err); 4731 break;
4328 return err; 4732 case L2CAP_TXSEQ_UNEXPECTED:
4733 l2cap_pass_to_tx(chan, control);
4734
4735 /* Can't issue SREJ frames in the local busy state.
4736 * Drop this frame, it will be seen as missing
4737 * when local busy is exited.
4738 */
4739 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4740 BT_DBG("Busy, discarding unexpected seq %d",
4741 control->txseq);
4742 break;
4329 } 4743 }
4330 }
4331 } else {
4332 expected_tx_seq_offset = __seq_offset(chan,
4333 chan->expected_tx_seq, chan->buffer_seq);
4334 4744
4335 /* duplicated tx_seq */ 4745 /* There was a gap in the sequence, so an SREJ
4336 if (tx_seq_offset < expected_tx_seq_offset) 4746 * must be sent for each missing frame. The
4337 goto drop; 4747 * current frame is stored for later use.
4748 */
4749 skb_queue_tail(&chan->srej_q, skb);
4750 skb_in_use = 1;
4751 BT_DBG("Queued %p (queue len %d)", skb,
4752 skb_queue_len(&chan->srej_q));
4338 4753
4339 set_bit(CONN_SREJ_SENT, &chan->conn_state); 4754 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4340 4755 l2cap_seq_list_clear(&chan->srej_list);
4341 BT_DBG("chan %p, Enter SREJ", chan); 4756 l2cap_send_srej(chan, control->txseq);
4342 4757
4343 INIT_LIST_HEAD(&chan->srej_l); 4758 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4344 chan->buffer_seq_srej = chan->buffer_seq; 4759 break;
4760 case L2CAP_TXSEQ_DUPLICATE:
4761 l2cap_pass_to_tx(chan, control);
4762 break;
4763 case L2CAP_TXSEQ_INVALID_IGNORE:
4764 break;
4765 case L2CAP_TXSEQ_INVALID:
4766 default:
4767 l2cap_send_disconn_req(chan->conn, chan,
4768 ECONNRESET);
4769 break;
4770 }
4771 break;
4772 case L2CAP_EV_RECV_RR:
4773 l2cap_pass_to_tx(chan, control);
4774 if (control->final) {
4775 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4345 4776
4346 __skb_queue_head_init(&chan->srej_q); 4777 if (!test_and_clear_bit(CONN_REJ_ACT,
4347 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); 4778 &chan->conn_state)) {
4779 control->final = 0;
4780 l2cap_retransmit_all(chan, control);
4781 }
4348 4782
4349 /* Set P-bit only if there are some I-frames to ack. */ 4783 l2cap_ertm_send(chan);
4350 if (__clear_ack_timer(chan)) 4784 } else if (control->poll) {
4351 set_bit(CONN_SEND_PBIT, &chan->conn_state); 4785 l2cap_send_i_or_rr_or_rnr(chan);
4786 } else {
4787 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4788 &chan->conn_state) &&
4789 chan->unacked_frames)
4790 __set_retrans_timer(chan);
4352 4791
4353 err = l2cap_send_srejframe(chan, tx_seq); 4792 l2cap_ertm_send(chan);
4354 if (err < 0) {
4355 l2cap_send_disconn_req(chan->conn, chan, -err);
4356 return err;
4357 } 4793 }
4794 break;
4795 case L2CAP_EV_RECV_RNR:
4796 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4797 l2cap_pass_to_tx(chan, control);
4798 if (control && control->poll) {
4799 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4800 l2cap_send_rr_or_rnr(chan, 0);
4801 }
4802 __clear_retrans_timer(chan);
4803 l2cap_seq_list_clear(&chan->retrans_list);
4804 break;
4805 case L2CAP_EV_RECV_REJ:
4806 l2cap_handle_rej(chan, control);
4807 break;
4808 case L2CAP_EV_RECV_SREJ:
4809 l2cap_handle_srej(chan, control);
4810 break;
4811 default:
4812 break;
4358 } 4813 }
4359 return 0;
4360
4361expected:
4362 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4363
4364 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4365 bt_cb(skb)->control.txseq = tx_seq;
4366 bt_cb(skb)->control.sar = sar;
4367 __skb_queue_tail(&chan->srej_q, skb);
4368 return 0;
4369 }
4370
4371 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4372 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4373 4814
4374 if (err < 0) { 4815 if (skb && !skb_in_use) {
4375 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 4816 BT_DBG("Freeing %p", skb);
4376 return err; 4817 kfree_skb(skb);
4377 } 4818 }
4378 4819
4379 if (__is_ctrl_final(chan, rx_control)) { 4820 return err;
4380 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) 4821}
4381 l2cap_retransmit_frames(chan);
4382 }
4383 4822
4823static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4824 struct l2cap_ctrl *control,
4825 struct sk_buff *skb, u8 event)
4826{
4827 int err = 0;
4828 u16 txseq = control->txseq;
4829 bool skb_in_use = 0;
4830
4831 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4832 event);
4833
4834 switch (event) {
4835 case L2CAP_EV_RECV_IFRAME:
4836 switch (l2cap_classify_txseq(chan, txseq)) {
4837 case L2CAP_TXSEQ_EXPECTED:
4838 /* Keep frame for reassembly later */
4839 l2cap_pass_to_tx(chan, control);
4840 skb_queue_tail(&chan->srej_q, skb);
4841 skb_in_use = 1;
4842 BT_DBG("Queued %p (queue len %d)", skb,
4843 skb_queue_len(&chan->srej_q));
4844
4845 chan->expected_tx_seq = __next_seq(chan, txseq);
4846 break;
4847 case L2CAP_TXSEQ_EXPECTED_SREJ:
4848 l2cap_seq_list_pop(&chan->srej_list);
4384 4849
4385 chan->num_acked = (chan->num_acked + 1) % num_to_ack; 4850 l2cap_pass_to_tx(chan, control);
4386 if (chan->num_acked == num_to_ack - 1) 4851 skb_queue_tail(&chan->srej_q, skb);
4387 l2cap_send_ack(chan); 4852 skb_in_use = 1;
4388 else 4853 BT_DBG("Queued %p (queue len %d)", skb,
4389 __set_ack_timer(chan); 4854 skb_queue_len(&chan->srej_q));
4390 4855
4391 return 0; 4856 err = l2cap_rx_queued_iframes(chan);
4857 if (err)
4858 break;
4392 4859
4393drop: 4860 break;
4394 kfree_skb(skb); 4861 case L2CAP_TXSEQ_UNEXPECTED:
4395 return 0; 4862 /* Got a frame that can't be reassembled yet.
4396} 4863 * Save it for later, and send SREJs to cover
4864 * the missing frames.
4865 */
4866 skb_queue_tail(&chan->srej_q, skb);
4867 skb_in_use = 1;
4868 BT_DBG("Queued %p (queue len %d)", skb,
4869 skb_queue_len(&chan->srej_q));
4870
4871 l2cap_pass_to_tx(chan, control);
4872 l2cap_send_srej(chan, control->txseq);
4873 break;
4874 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4875 /* This frame was requested with an SREJ, but
4876 * some expected retransmitted frames are
4877 * missing. Request retransmission of missing
4878 * SREJ'd frames.
4879 */
4880 skb_queue_tail(&chan->srej_q, skb);
4881 skb_in_use = 1;
4882 BT_DBG("Queued %p (queue len %d)", skb,
4883 skb_queue_len(&chan->srej_q));
4884
4885 l2cap_pass_to_tx(chan, control);
4886 l2cap_send_srej_list(chan, control->txseq);
4887 break;
4888 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4889 /* We've already queued this frame. Drop this copy. */
4890 l2cap_pass_to_tx(chan, control);
4891 break;
4892 case L2CAP_TXSEQ_DUPLICATE:
4893 /* Expecting a later sequence number, so this frame
4894 * was already received. Ignore it completely.
4895 */
4896 break;
4897 case L2CAP_TXSEQ_INVALID_IGNORE:
4898 break;
4899 case L2CAP_TXSEQ_INVALID:
4900 default:
4901 l2cap_send_disconn_req(chan->conn, chan,
4902 ECONNRESET);
4903 break;
4904 }
4905 break;
4906 case L2CAP_EV_RECV_RR:
4907 l2cap_pass_to_tx(chan, control);
4908 if (control->final) {
4909 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4397 4910
4398static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control) 4911 if (!test_and_clear_bit(CONN_REJ_ACT,
4399{ 4912 &chan->conn_state)) {
4400 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, 4913 control->final = 0;
4401 __get_reqseq(chan, rx_control), rx_control); 4914 l2cap_retransmit_all(chan, control);
4915 }
4402 4916
4403 chan->expected_ack_seq = __get_reqseq(chan, rx_control); 4917 l2cap_ertm_send(chan);
4404 l2cap_drop_acked_frames(chan); 4918 } else if (control->poll) {
4919 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4920 &chan->conn_state) &&
4921 chan->unacked_frames) {
4922 __set_retrans_timer(chan);
4923 }
4405 4924
4406 if (__is_ctrl_poll(chan, rx_control)) { 4925 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4407 set_bit(CONN_SEND_FBIT, &chan->conn_state); 4926 l2cap_send_srej_tail(chan);
4408 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 4927 } else {
4409 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && 4928 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4410 (chan->unacked_frames > 0)) 4929 &chan->conn_state) &&
4930 chan->unacked_frames)
4411 __set_retrans_timer(chan); 4931 __set_retrans_timer(chan);
4412 4932
4413 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4933 l2cap_send_ack(chan);
4414 l2cap_send_srejtail(chan); 4934 }
4935 break;
4936 case L2CAP_EV_RECV_RNR:
4937 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4938 l2cap_pass_to_tx(chan, control);
4939 if (control->poll) {
4940 l2cap_send_srej_tail(chan);
4415 } else { 4941 } else {
4416 l2cap_send_i_or_rr_or_rnr(chan); 4942 struct l2cap_ctrl rr_control;
4943 memset(&rr_control, 0, sizeof(rr_control));
4944 rr_control.sframe = 1;
4945 rr_control.super = L2CAP_SUPER_RR;
4946 rr_control.reqseq = chan->buffer_seq;
4947 l2cap_send_sframe(chan, &rr_control);
4417 } 4948 }
4418 4949
4419 } else if (__is_ctrl_final(chan, rx_control)) { 4950 break;
4420 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4951 case L2CAP_EV_RECV_REJ:
4421 4952 l2cap_handle_rej(chan, control);
4422 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) 4953 break;
4423 l2cap_retransmit_frames(chan); 4954 case L2CAP_EV_RECV_SREJ:
4424 4955 l2cap_handle_srej(chan, control);
4425 } else { 4956 break;
4426 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && 4957 }
4427 (chan->unacked_frames > 0))
4428 __set_retrans_timer(chan);
4429 4958
4430 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4959 if (skb && !skb_in_use) {
4431 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) 4960 BT_DBG("Freeing %p", skb);
4432 l2cap_send_ack(chan); 4961 kfree_skb(skb);
4433 else
4434 l2cap_ertm_send(chan);
4435 } 4962 }
4963
4964 return err;
4436} 4965}
4437 4966
4438static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control) 4967static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4439{ 4968{
4440 u16 tx_seq = __get_reqseq(chan, rx_control); 4969 /* Make sure reqseq is for a packet that has been sent but not acked */
4441 4970 u16 unacked;
4442 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4443
4444 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4445
4446 chan->expected_ack_seq = tx_seq;
4447 l2cap_drop_acked_frames(chan);
4448
4449 if (__is_ctrl_final(chan, rx_control)) {
4450 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4451 l2cap_retransmit_frames(chan);
4452 } else {
4453 l2cap_retransmit_frames(chan);
4454 4971
4455 if (test_bit(CONN_WAIT_F, &chan->conn_state)) 4972 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
4456 set_bit(CONN_REJ_ACT, &chan->conn_state); 4973 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
4457 }
4458} 4974}
4459static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4460{
4461 u16 tx_seq = __get_reqseq(chan, rx_control);
4462
4463 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4464
4465 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4466 4975
4467 if (__is_ctrl_poll(chan, rx_control)) { 4976static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4468 chan->expected_ack_seq = tx_seq; 4977 struct sk_buff *skb, u8 event)
4469 l2cap_drop_acked_frames(chan); 4978{
4470 4979 int err = 0;
4471 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4472 l2cap_retransmit_one_frame(chan, tx_seq);
4473 4980
4474 l2cap_ertm_send(chan); 4981 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
4982 control, skb, event, chan->rx_state);
4475 4983
4476 if (test_bit(CONN_WAIT_F, &chan->conn_state)) { 4984 if (__valid_reqseq(chan, control->reqseq)) {
4477 chan->srej_save_reqseq = tx_seq; 4985 switch (chan->rx_state) {
4478 set_bit(CONN_SREJ_ACT, &chan->conn_state); 4986 case L2CAP_RX_STATE_RECV:
4987 err = l2cap_rx_state_recv(chan, control, skb, event);
4988 break;
4989 case L2CAP_RX_STATE_SREJ_SENT:
4990 err = l2cap_rx_state_srej_sent(chan, control, skb,
4991 event);
4992 break;
4993 default:
4994 /* shut it down */
4995 break;
4479 } 4996 }
4480 } else if (__is_ctrl_final(chan, rx_control)) {
4481 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4482 chan->srej_save_reqseq == tx_seq)
4483 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4484 else
4485 l2cap_retransmit_one_frame(chan, tx_seq);
4486 } else { 4997 } else {
4487 l2cap_retransmit_one_frame(chan, tx_seq); 4998 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
4488 if (test_bit(CONN_WAIT_F, &chan->conn_state)) { 4999 control->reqseq, chan->next_tx_seq,
4489 chan->srej_save_reqseq = tx_seq; 5000 chan->expected_ack_seq);
4490 set_bit(CONN_SREJ_ACT, &chan->conn_state); 5001 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4491 }
4492 } 5002 }
5003
5004 return err;
4493} 5005}
4494 5006
4495static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control) 5007static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5008 struct sk_buff *skb)
4496{ 5009{
4497 u16 tx_seq = __get_reqseq(chan, rx_control); 5010 int err = 0;
4498 5011
4499 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); 5012 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5013 chan->rx_state);
4500 5014
4501 set_bit(CONN_REMOTE_BUSY, &chan->conn_state); 5015 if (l2cap_classify_txseq(chan, control->txseq) ==
4502 chan->expected_ack_seq = tx_seq; 5016 L2CAP_TXSEQ_EXPECTED) {
4503 l2cap_drop_acked_frames(chan); 5017 l2cap_pass_to_tx(chan, control);
4504 5018
4505 if (__is_ctrl_poll(chan, rx_control)) 5019 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
4506 set_bit(CONN_SEND_FBIT, &chan->conn_state); 5020 __next_seq(chan, chan->buffer_seq));
4507 5021
4508 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 5022 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4509 __clear_retrans_timer(chan);
4510 if (__is_ctrl_poll(chan, rx_control))
4511 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4512 return;
4513 }
4514 5023
4515 if (__is_ctrl_poll(chan, rx_control)) { 5024 l2cap_reassemble_sdu(chan, skb, control);
4516 l2cap_send_srejtail(chan);
4517 } else { 5025 } else {
4518 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR); 5026 if (chan->sdu) {
4519 l2cap_send_sframe(chan, rx_control); 5027 kfree_skb(chan->sdu);
4520 } 5028 chan->sdu = NULL;
4521} 5029 }
4522 5030 chan->sdu_last_frag = NULL;
4523static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) 5031 chan->sdu_len = 0;
4524{
4525 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4526 5032
4527 if (__is_ctrl_final(chan, rx_control) && 5033 if (skb) {
4528 test_bit(CONN_WAIT_F, &chan->conn_state)) { 5034 BT_DBG("Freeing %p", skb);
4529 __clear_monitor_timer(chan); 5035 kfree_skb(skb);
4530 if (chan->unacked_frames > 0) 5036 }
4531 __set_retrans_timer(chan);
4532 clear_bit(CONN_WAIT_F, &chan->conn_state);
4533 } 5037 }
4534 5038
4535 switch (__get_ctrl_super(chan, rx_control)) { 5039 chan->last_acked_seq = control->txseq;
4536 case L2CAP_SUPER_RR: 5040 chan->expected_tx_seq = __next_seq(chan, control->txseq);
4537 l2cap_data_channel_rrframe(chan, rx_control);
4538 break;
4539 5041
4540 case L2CAP_SUPER_REJ: 5042 return err;
4541 l2cap_data_channel_rejframe(chan, rx_control);
4542 break;
4543
4544 case L2CAP_SUPER_SREJ:
4545 l2cap_data_channel_srejframe(chan, rx_control);
4546 break;
4547
4548 case L2CAP_SUPER_RNR:
4549 l2cap_data_channel_rnrframe(chan, rx_control);
4550 break;
4551 }
4552
4553 kfree_skb(skb);
4554 return 0;
4555} 5043}
4556 5044
4557static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) 5045static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4558{ 5046{
4559 u32 control; 5047 struct l2cap_ctrl *control = &bt_cb(skb)->control;
4560 u16 req_seq; 5048 u16 len;
4561 int len, next_tx_seq_offset, req_seq_offset; 5049 u8 event;
4562 5050
4563 __unpack_control(chan, skb); 5051 __unpack_control(chan, skb);
4564 5052
4565 control = __get_control(chan, skb->data);
4566 skb_pull(skb, __ctrl_size(chan));
4567 len = skb->len; 5053 len = skb->len;
4568 5054
4569 /* 5055 /*
4570 * We can just drop the corrupted I-frame here. 5056 * We can just drop the corrupted I-frame here.
4571 * Receiver will miss it and start proper recovery 5057 * Receiver will miss it and start proper recovery
4572 * procedures and ask retransmission. 5058 * procedures and ask for retransmission.
4573 */ 5059 */
4574 if (l2cap_check_fcs(chan, skb)) 5060 if (l2cap_check_fcs(chan, skb))
4575 goto drop; 5061 goto drop;
4576 5062
4577 if (__is_sar_start(chan, control) && !__is_sframe(chan, control)) 5063 if (!control->sframe && control->sar == L2CAP_SAR_START)
4578 len -= L2CAP_SDULEN_SIZE; 5064 len -= L2CAP_SDULEN_SIZE;
4579 5065
4580 if (chan->fcs == L2CAP_FCS_CRC16) 5066 if (chan->fcs == L2CAP_FCS_CRC16)
@@ -4585,34 +5071,57 @@ static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4585 goto drop; 5071 goto drop;
4586 } 5072 }
4587 5073
4588 req_seq = __get_reqseq(chan, control); 5074 if (!control->sframe) {
4589 5075 int err;
4590 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4591
4592 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4593 chan->expected_ack_seq);
4594 5076
4595 /* check for invalid req-seq */ 5077 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
4596 if (req_seq_offset > next_tx_seq_offset) { 5078 control->sar, control->reqseq, control->final,
4597 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 5079 control->txseq);
4598 goto drop;
4599 }
4600 5080
4601 if (!__is_sframe(chan, control)) { 5081 /* Validate F-bit - F=0 always valid, F=1 only
4602 if (len < 0) { 5082 * valid in TX WAIT_F
4603 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 5083 */
5084 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
4604 goto drop; 5085 goto drop;
5086
5087 if (chan->mode != L2CAP_MODE_STREAMING) {
5088 event = L2CAP_EV_RECV_IFRAME;
5089 err = l2cap_rx(chan, control, skb, event);
5090 } else {
5091 err = l2cap_stream_rx(chan, control, skb);
4605 } 5092 }
4606 5093
4607 l2cap_data_channel_iframe(chan, control, skb); 5094 if (err)
5095 l2cap_send_disconn_req(chan->conn, chan,
5096 ECONNRESET);
4608 } else { 5097 } else {
5098 const u8 rx_func_to_event[4] = {
5099 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5100 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5101 };
5102
5103 /* Only I-frames are expected in streaming mode */
5104 if (chan->mode == L2CAP_MODE_STREAMING)
5105 goto drop;
5106
5107 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5108 control->reqseq, control->final, control->poll,
5109 control->super);
5110
4609 if (len != 0) { 5111 if (len != 0) {
4610 BT_ERR("%d", len); 5112 BT_ERR("%d", len);
4611 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 5113 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4612 goto drop; 5114 goto drop;
4613 } 5115 }
4614 5116
4615 l2cap_data_channel_sframe(chan, control, skb); 5117 /* Validate F and P bits */
5118 if (control->final && (control->poll ||
5119 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5120 goto drop;
5121
5122 event = rx_func_to_event[control->super];
5123 if (l2cap_rx(chan, control, skb, event))
5124 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4616 } 5125 }
4617 5126
4618 return 0; 5127 return 0;
@@ -4622,19 +5131,27 @@ drop:
4622 return 0; 5131 return 0;
4623} 5132}
4624 5133
4625static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) 5134static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5135 struct sk_buff *skb)
4626{ 5136{
4627 struct l2cap_chan *chan; 5137 struct l2cap_chan *chan;
4628 u32 control;
4629 u16 tx_seq;
4630 int len;
4631 5138
4632 chan = l2cap_get_chan_by_scid(conn, cid); 5139 chan = l2cap_get_chan_by_scid(conn, cid);
4633 if (!chan) { 5140 if (!chan) {
4634 BT_DBG("unknown cid 0x%4.4x", cid); 5141 if (cid == L2CAP_CID_A2MP) {
4635 /* Drop packet and return */ 5142 chan = a2mp_channel_create(conn, skb);
4636 kfree_skb(skb); 5143 if (!chan) {
4637 return 0; 5144 kfree_skb(skb);
5145 return;
5146 }
5147
5148 l2cap_chan_lock(chan);
5149 } else {
5150 BT_DBG("unknown cid 0x%4.4x", cid);
5151 /* Drop packet and return */
5152 kfree_skb(skb);
5153 return;
5154 }
4638 } 5155 }
4639 5156
4640 BT_DBG("chan %p, len %d", chan, skb->len); 5157 BT_DBG("chan %p, len %d", chan, skb->len);
@@ -4652,49 +5169,13 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
4652 if (chan->imtu < skb->len) 5169 if (chan->imtu < skb->len)
4653 goto drop; 5170 goto drop;
4654 5171
4655 if (!chan->ops->recv(chan->data, skb)) 5172 if (!chan->ops->recv(chan, skb))
4656 goto done; 5173 goto done;
4657 break; 5174 break;
4658 5175
4659 case L2CAP_MODE_ERTM: 5176 case L2CAP_MODE_ERTM:
4660 l2cap_ertm_data_rcv(chan, skb);
4661
4662 goto done;
4663
4664 case L2CAP_MODE_STREAMING: 5177 case L2CAP_MODE_STREAMING:
4665 control = __get_control(chan, skb->data); 5178 l2cap_data_rcv(chan, skb);
4666 skb_pull(skb, __ctrl_size(chan));
4667 len = skb->len;
4668
4669 if (l2cap_check_fcs(chan, skb))
4670 goto drop;
4671
4672 if (__is_sar_start(chan, control))
4673 len -= L2CAP_SDULEN_SIZE;
4674
4675 if (chan->fcs == L2CAP_FCS_CRC16)
4676 len -= L2CAP_FCS_SIZE;
4677
4678 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4679 goto drop;
4680
4681 tx_seq = __get_txseq(chan, control);
4682
4683 if (chan->expected_tx_seq != tx_seq) {
4684 /* Frame(s) missing - must discard partial SDU */
4685 kfree_skb(chan->sdu);
4686 chan->sdu = NULL;
4687 chan->sdu_last_frag = NULL;
4688 chan->sdu_len = 0;
4689
4690 /* TODO: Notify userland of missing data */
4691 }
4692
4693 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4694
4695 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4696 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4697
4698 goto done; 5179 goto done;
4699 5180
4700 default: 5181 default:
@@ -4707,11 +5188,10 @@ drop:
4707 5188
4708done: 5189done:
4709 l2cap_chan_unlock(chan); 5190 l2cap_chan_unlock(chan);
4710
4711 return 0;
4712} 5191}
4713 5192
4714static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb) 5193static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5194 struct sk_buff *skb)
4715{ 5195{
4716 struct l2cap_chan *chan; 5196 struct l2cap_chan *chan;
4717 5197
@@ -4727,17 +5207,15 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
4727 if (chan->imtu < skb->len) 5207 if (chan->imtu < skb->len)
4728 goto drop; 5208 goto drop;
4729 5209
4730 if (!chan->ops->recv(chan->data, skb)) 5210 if (!chan->ops->recv(chan, skb))
4731 return 0; 5211 return;
4732 5212
4733drop: 5213drop:
4734 kfree_skb(skb); 5214 kfree_skb(skb);
4735
4736 return 0;
4737} 5215}
4738 5216
4739static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid, 5217static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4740 struct sk_buff *skb) 5218 struct sk_buff *skb)
4741{ 5219{
4742 struct l2cap_chan *chan; 5220 struct l2cap_chan *chan;
4743 5221
@@ -4753,13 +5231,11 @@ static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4753 if (chan->imtu < skb->len) 5231 if (chan->imtu < skb->len)
4754 goto drop; 5232 goto drop;
4755 5233
4756 if (!chan->ops->recv(chan->data, skb)) 5234 if (!chan->ops->recv(chan, skb))
4757 return 0; 5235 return;
4758 5236
4759drop: 5237drop:
4760 kfree_skb(skb); 5238 kfree_skb(skb);
4761
4762 return 0;
4763} 5239}
4764 5240
4765static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) 5241static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
@@ -4787,7 +5263,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4787 5263
4788 case L2CAP_CID_CONN_LESS: 5264 case L2CAP_CID_CONN_LESS:
4789 psm = get_unaligned((__le16 *) skb->data); 5265 psm = get_unaligned((__le16 *) skb->data);
4790 skb_pull(skb, 2); 5266 skb_pull(skb, L2CAP_PSMLEN_SIZE);
4791 l2cap_conless_channel(conn, psm, skb); 5267 l2cap_conless_channel(conn, psm, skb);
4792 break; 5268 break;
4793 5269
@@ -4981,6 +5457,17 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4981 rsp.status = cpu_to_le16(stat); 5457 rsp.status = cpu_to_le16(stat);
4982 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 5458 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4983 sizeof(rsp), &rsp); 5459 sizeof(rsp), &rsp);
5460
5461 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5462 res == L2CAP_CR_SUCCESS) {
5463 char buf[128];
5464 set_bit(CONF_REQ_SENT, &chan->conf_state);
5465 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5466 L2CAP_CONF_REQ,
5467 l2cap_build_conf_req(chan, buf),
5468 buf);
5469 chan->num_conf_req++;
5470 }
4984 } 5471 }
4985 5472
4986 l2cap_chan_unlock(chan); 5473 l2cap_chan_unlock(chan);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 3bb1611b9d4..a4bb27e8427 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -27,7 +27,6 @@
27 27
28/* Bluetooth L2CAP sockets. */ 28/* Bluetooth L2CAP sockets. */
29 29
30#include <linux/security.h>
31#include <linux/export.h> 30#include <linux/export.h>
32 31
33#include <net/bluetooth/bluetooth.h> 32#include <net/bluetooth/bluetooth.h>
@@ -89,8 +88,8 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
89 if (err < 0) 88 if (err < 0)
90 goto done; 89 goto done;
91 90
92 if (__le16_to_cpu(la.l2_psm) == 0x0001 || 91 if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP ||
93 __le16_to_cpu(la.l2_psm) == 0x0003) 92 __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM)
94 chan->sec_level = BT_SECURITY_SDP; 93 chan->sec_level = BT_SECURITY_SDP;
95 94
96 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr); 95 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
@@ -446,6 +445,22 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
446 return err; 445 return err;
447} 446}
448 447
448static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu)
449{
450 switch (chan->scid) {
451 case L2CAP_CID_LE_DATA:
452 if (mtu < L2CAP_LE_MIN_MTU)
453 return false;
454 break;
455
456 default:
457 if (mtu < L2CAP_DEFAULT_MIN_MTU)
458 return false;
459 }
460
461 return true;
462}
463
449static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) 464static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
450{ 465{
451 struct sock *sk = sock->sk; 466 struct sock *sk = sock->sk;
@@ -484,6 +499,11 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
484 break; 499 break;
485 } 500 }
486 501
502 if (!l2cap_valid_mtu(chan, opts.imtu)) {
503 err = -EINVAL;
504 break;
505 }
506
487 chan->mode = opts.mode; 507 chan->mode = opts.mode;
488 switch (chan->mode) { 508 switch (chan->mode) {
489 case L2CAP_MODE_BASIC: 509 case L2CAP_MODE_BASIC:
@@ -873,9 +893,34 @@ static int l2cap_sock_release(struct socket *sock)
873 return err; 893 return err;
874} 894}
875 895
876static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data) 896static void l2cap_sock_cleanup_listen(struct sock *parent)
877{ 897{
878 struct sock *sk, *parent = data; 898 struct sock *sk;
899
900 BT_DBG("parent %p", parent);
901
902 /* Close not yet accepted channels */
903 while ((sk = bt_accept_dequeue(parent, NULL))) {
904 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
905
906 l2cap_chan_lock(chan);
907 __clear_chan_timer(chan);
908 l2cap_chan_close(chan, ECONNRESET);
909 l2cap_chan_unlock(chan);
910
911 l2cap_sock_kill(sk);
912 }
913}
914
915static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
916{
917 struct sock *sk, *parent = chan->data;
918
919 /* Check for backlog size */
920 if (sk_acceptq_is_full(parent)) {
921 BT_DBG("backlog full %d", parent->sk_ack_backlog);
922 return NULL;
923 }
879 924
880 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, 925 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
881 GFP_ATOMIC); 926 GFP_ATOMIC);
@@ -889,10 +934,10 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data)
889 return l2cap_pi(sk)->chan; 934 return l2cap_pi(sk)->chan;
890} 935}
891 936
892static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb) 937static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
893{ 938{
894 int err; 939 int err;
895 struct sock *sk = data; 940 struct sock *sk = chan->data;
896 struct l2cap_pinfo *pi = l2cap_pi(sk); 941 struct l2cap_pinfo *pi = l2cap_pi(sk);
897 942
898 lock_sock(sk); 943 lock_sock(sk);
@@ -925,16 +970,57 @@ done:
925 return err; 970 return err;
926} 971}
927 972
928static void l2cap_sock_close_cb(void *data) 973static void l2cap_sock_close_cb(struct l2cap_chan *chan)
929{ 974{
930 struct sock *sk = data; 975 struct sock *sk = chan->data;
931 976
932 l2cap_sock_kill(sk); 977 l2cap_sock_kill(sk);
933} 978}
934 979
935static void l2cap_sock_state_change_cb(void *data, int state) 980static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
936{ 981{
937 struct sock *sk = data; 982 struct sock *sk = chan->data;
983 struct sock *parent;
984
985 lock_sock(sk);
986
987 parent = bt_sk(sk)->parent;
988
989 sock_set_flag(sk, SOCK_ZAPPED);
990
991 switch (chan->state) {
992 case BT_OPEN:
993 case BT_BOUND:
994 case BT_CLOSED:
995 break;
996 case BT_LISTEN:
997 l2cap_sock_cleanup_listen(sk);
998 sk->sk_state = BT_CLOSED;
999 chan->state = BT_CLOSED;
1000
1001 break;
1002 default:
1003 sk->sk_state = BT_CLOSED;
1004 chan->state = BT_CLOSED;
1005
1006 sk->sk_err = err;
1007
1008 if (parent) {
1009 bt_accept_unlink(sk);
1010 parent->sk_data_ready(parent, 0);
1011 } else {
1012 sk->sk_state_change(sk);
1013 }
1014
1015 break;
1016 }
1017
1018 release_sock(sk);
1019}
1020
1021static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state)
1022{
1023 struct sock *sk = chan->data;
938 1024
939 sk->sk_state = state; 1025 sk->sk_state = state;
940} 1026}
@@ -955,12 +1041,34 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
955 return skb; 1041 return skb;
956} 1042}
957 1043
1044static void l2cap_sock_ready_cb(struct l2cap_chan *chan)
1045{
1046 struct sock *sk = chan->data;
1047 struct sock *parent;
1048
1049 lock_sock(sk);
1050
1051 parent = bt_sk(sk)->parent;
1052
1053 BT_DBG("sk %p, parent %p", sk, parent);
1054
1055 sk->sk_state = BT_CONNECTED;
1056 sk->sk_state_change(sk);
1057
1058 if (parent)
1059 parent->sk_data_ready(parent, 0);
1060
1061 release_sock(sk);
1062}
1063
958static struct l2cap_ops l2cap_chan_ops = { 1064static struct l2cap_ops l2cap_chan_ops = {
959 .name = "L2CAP Socket Interface", 1065 .name = "L2CAP Socket Interface",
960 .new_connection = l2cap_sock_new_connection_cb, 1066 .new_connection = l2cap_sock_new_connection_cb,
961 .recv = l2cap_sock_recv_cb, 1067 .recv = l2cap_sock_recv_cb,
962 .close = l2cap_sock_close_cb, 1068 .close = l2cap_sock_close_cb,
1069 .teardown = l2cap_sock_teardown_cb,
963 .state_change = l2cap_sock_state_change_cb, 1070 .state_change = l2cap_sock_state_change_cb,
1071 .ready = l2cap_sock_ready_cb,
964 .alloc_skb = l2cap_sock_alloc_skb_cb, 1072 .alloc_skb = l2cap_sock_alloc_skb_cb,
965}; 1073};
966 1074
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index 506628876f3..e1c97527e16 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -26,12 +26,7 @@
26 26
27#define pr_fmt(fmt) "Bluetooth: " fmt 27#define pr_fmt(fmt) "Bluetooth: " fmt
28 28
29#include <linux/module.h> 29#include <linux/export.h>
30
31#include <linux/kernel.h>
32#include <linux/stddef.h>
33#include <linux/string.h>
34#include <asm/errno.h>
35 30
36#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
37 32
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 3e5e3362ea0..a6e0f3d8da6 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -24,8 +24,6 @@
24 24
25/* Bluetooth HCI Management interface */ 25/* Bluetooth HCI Management interface */
26 26
27#include <linux/kernel.h>
28#include <linux/uaccess.h>
29#include <linux/module.h> 27#include <linux/module.h>
30#include <asm/unaligned.h> 28#include <asm/unaligned.h>
31 29
@@ -714,7 +712,8 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
714} 712}
715 713
716static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, 714static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
717 void (*cb)(struct pending_cmd *cmd, void *data), 715 void (*cb)(struct pending_cmd *cmd,
716 void *data),
718 void *data) 717 void *data)
719{ 718{
720 struct list_head *p, *n; 719 struct list_head *p, *n;
@@ -871,7 +870,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
871 } 870 }
872 871
873 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || 872 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
874 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { 873 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
875 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 874 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
876 MGMT_STATUS_BUSY); 875 MGMT_STATUS_BUSY);
877 goto failed; 876 goto failed;
@@ -978,7 +977,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
978 } 977 }
979 978
980 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || 979 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
981 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { 980 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
982 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, 981 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
983 MGMT_STATUS_BUSY); 982 MGMT_STATUS_BUSY);
984 goto failed; 983 goto failed;
@@ -1001,7 +1000,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1001 scan = 0; 1000 scan = 0;
1002 1001
1003 if (test_bit(HCI_ISCAN, &hdev->flags) && 1002 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1004 hdev->discov_timeout > 0) 1003 hdev->discov_timeout > 0)
1005 cancel_delayed_work(&hdev->discov_off); 1004 cancel_delayed_work(&hdev->discov_off);
1006 } 1005 }
1007 1006
@@ -1056,7 +1055,7 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1056 bool changed = false; 1055 bool changed = false;
1057 1056
1058 if (!!cp->val != test_bit(HCI_LINK_SECURITY, 1057 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1059 &hdev->dev_flags)) { 1058 &hdev->dev_flags)) {
1060 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags); 1059 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1061 changed = true; 1060 changed = true;
1062 } 1061 }
@@ -1317,7 +1316,7 @@ static bool enable_service_cache(struct hci_dev *hdev)
1317} 1316}
1318 1317
1319static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, 1318static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1320 u16 len) 1319 u16 len)
1321{ 1320{
1322 struct mgmt_cp_remove_uuid *cp = data; 1321 struct mgmt_cp_remove_uuid *cp = data;
1323 struct pending_cmd *cmd; 1322 struct pending_cmd *cmd;
@@ -1442,7 +1441,7 @@ unlock:
1442} 1441}
1443 1442
1444static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, 1443static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1445 u16 len) 1444 u16 len)
1446{ 1445{
1447 struct mgmt_cp_load_link_keys *cp = data; 1446 struct mgmt_cp_load_link_keys *cp = data;
1448 u16 key_count, expected_len; 1447 u16 key_count, expected_len;
@@ -1454,13 +1453,13 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1454 sizeof(struct mgmt_link_key_info); 1453 sizeof(struct mgmt_link_key_info);
1455 if (expected_len != len) { 1454 if (expected_len != len) {
1456 BT_ERR("load_link_keys: expected %u bytes, got %u bytes", 1455 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1457 len, expected_len); 1456 len, expected_len);
1458 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 1457 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1459 MGMT_STATUS_INVALID_PARAMS); 1458 MGMT_STATUS_INVALID_PARAMS);
1460 } 1459 }
1461 1460
1462 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys, 1461 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1463 key_count); 1462 key_count);
1464 1463
1465 hci_dev_lock(hdev); 1464 hci_dev_lock(hdev);
1466 1465
@@ -1535,10 +1534,10 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1535 if (cp->disconnect) { 1534 if (cp->disconnect) {
1536 if (cp->addr.type == BDADDR_BREDR) 1535 if (cp->addr.type == BDADDR_BREDR)
1537 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, 1536 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1538 &cp->addr.bdaddr); 1537 &cp->addr.bdaddr);
1539 else 1538 else
1540 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, 1539 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1541 &cp->addr.bdaddr); 1540 &cp->addr.bdaddr);
1542 } else { 1541 } else {
1543 conn = NULL; 1542 conn = NULL;
1544 } 1543 }
@@ -1594,7 +1593,8 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1594 } 1593 }
1595 1594
1596 if (cp->addr.type == BDADDR_BREDR) 1595 if (cp->addr.type == BDADDR_BREDR)
1597 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); 1596 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1597 &cp->addr.bdaddr);
1598 else 1598 else
1599 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); 1599 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1600 1600
@@ -1813,7 +1813,7 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
1813 hdev->io_capability = cp->io_capability; 1813 hdev->io_capability = cp->io_capability;
1814 1814
1815 BT_DBG("%s IO capability set to 0x%02x", hdev->name, 1815 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
1816 hdev->io_capability); 1816 hdev->io_capability);
1817 1817
1818 hci_dev_unlock(hdev); 1818 hci_dev_unlock(hdev);
1819 1819
@@ -1821,7 +1821,7 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
1821 0); 1821 0);
1822} 1822}
1823 1823
1824static inline struct pending_cmd *find_pairing(struct hci_conn *conn) 1824static struct pending_cmd *find_pairing(struct hci_conn *conn)
1825{ 1825{
1826 struct hci_dev *hdev = conn->hdev; 1826 struct hci_dev *hdev = conn->hdev;
1827 struct pending_cmd *cmd; 1827 struct pending_cmd *cmd;
@@ -1927,8 +1927,15 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1927 rp.addr.type = cp->addr.type; 1927 rp.addr.type = cp->addr.type;
1928 1928
1929 if (IS_ERR(conn)) { 1929 if (IS_ERR(conn)) {
1930 int status;
1931
1932 if (PTR_ERR(conn) == -EBUSY)
1933 status = MGMT_STATUS_BUSY;
1934 else
1935 status = MGMT_STATUS_CONNECT_FAILED;
1936
1930 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, 1937 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
1931 MGMT_STATUS_CONNECT_FAILED, &rp, 1938 status, &rp,
1932 sizeof(rp)); 1939 sizeof(rp));
1933 goto unlock; 1940 goto unlock;
1934 } 1941 }
@@ -1959,7 +1966,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1959 cmd->user_data = conn; 1966 cmd->user_data = conn;
1960 1967
1961 if (conn->state == BT_CONNECTED && 1968 if (conn->state == BT_CONNECTED &&
1962 hci_conn_security(conn, sec_level, auth_type)) 1969 hci_conn_security(conn, sec_level, auth_type))
1963 pairing_complete(cmd, 0); 1970 pairing_complete(cmd, 0);
1964 1971
1965 err = 0; 1972 err = 0;
@@ -2256,7 +2263,7 @@ unlock:
2256} 2263}
2257 2264
2258static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev, 2265static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2259 void *data, u16 len) 2266 void *data, u16 len)
2260{ 2267{
2261 struct mgmt_cp_remove_remote_oob_data *cp = data; 2268 struct mgmt_cp_remove_remote_oob_data *cp = data;
2262 u8 status; 2269 u8 status;
@@ -2425,7 +2432,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2425 2432
2426 case DISCOVERY_RESOLVING: 2433 case DISCOVERY_RESOLVING:
2427 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, 2434 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2428 NAME_PENDING); 2435 NAME_PENDING);
2429 if (!e) { 2436 if (!e) {
2430 mgmt_pending_remove(cmd); 2437 mgmt_pending_remove(cmd);
2431 err = cmd_complete(sk, hdev->id, 2438 err = cmd_complete(sk, hdev->id,
@@ -2647,7 +2654,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2647 sizeof(struct mgmt_ltk_info); 2654 sizeof(struct mgmt_ltk_info);
2648 if (expected_len != len) { 2655 if (expected_len != len) {
2649 BT_ERR("load_keys: expected %u bytes, got %u bytes", 2656 BT_ERR("load_keys: expected %u bytes, got %u bytes",
2650 len, expected_len); 2657 len, expected_len);
2651 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 2658 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
2652 EINVAL); 2659 EINVAL);
2653 } 2660 }
@@ -2772,7 +2779,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2772 } 2779 }
2773 2780
2774 if (opcode >= ARRAY_SIZE(mgmt_handlers) || 2781 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
2775 mgmt_handlers[opcode].func == NULL) { 2782 mgmt_handlers[opcode].func == NULL) {
2776 BT_DBG("Unknown op %u", opcode); 2783 BT_DBG("Unknown op %u", opcode);
2777 err = cmd_status(sk, index, opcode, 2784 err = cmd_status(sk, index, opcode,
2778 MGMT_STATUS_UNKNOWN_COMMAND); 2785 MGMT_STATUS_UNKNOWN_COMMAND);
@@ -2780,7 +2787,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2780 } 2787 }
2781 2788
2782 if ((hdev && opcode < MGMT_OP_READ_INFO) || 2789 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
2783 (!hdev && opcode >= MGMT_OP_READ_INFO)) { 2790 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
2784 err = cmd_status(sk, index, opcode, 2791 err = cmd_status(sk, index, opcode,
2785 MGMT_STATUS_INVALID_INDEX); 2792 MGMT_STATUS_INVALID_INDEX);
2786 goto done; 2793 goto done;
@@ -2789,7 +2796,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2789 handler = &mgmt_handlers[opcode]; 2796 handler = &mgmt_handlers[opcode];
2790 2797
2791 if ((handler->var_len && len < handler->data_len) || 2798 if ((handler->var_len && len < handler->data_len) ||
2792 (!handler->var_len && len != handler->data_len)) { 2799 (!handler->var_len && len != handler->data_len)) {
2793 err = cmd_status(sk, index, opcode, 2800 err = cmd_status(sk, index, opcode,
2794 MGMT_STATUS_INVALID_PARAMS); 2801 MGMT_STATUS_INVALID_PARAMS);
2795 goto done; 2802 goto done;
@@ -2973,7 +2980,7 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
2973 bacpy(&ev.key.addr.bdaddr, &key->bdaddr); 2980 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
2974 ev.key.addr.type = BDADDR_BREDR; 2981 ev.key.addr.type = BDADDR_BREDR;
2975 ev.key.type = key->type; 2982 ev.key.type = key->type;
2976 memcpy(ev.key.val, key->val, 16); 2983 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
2977 ev.key.pin_len = key->pin_len; 2984 ev.key.pin_len = key->pin_len;
2978 2985
2979 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); 2986 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
@@ -3108,7 +3115,7 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3108 mgmt_pending_remove(cmd); 3115 mgmt_pending_remove(cmd);
3109 3116
3110 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, 3117 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3111 hdev); 3118 hdev);
3112 return err; 3119 return err;
3113} 3120}
3114 3121
@@ -3198,7 +3205,7 @@ int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3198} 3205}
3199 3206
3200int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, 3207int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3201 u8 link_type, u8 addr_type) 3208 u8 link_type, u8 addr_type)
3202{ 3209{
3203 struct mgmt_ev_user_passkey_request ev; 3210 struct mgmt_ev_user_passkey_request ev;
3204 3211
@@ -3212,8 +3219,8 @@ int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3212} 3219}
3213 3220
3214static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3221static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3215 u8 link_type, u8 addr_type, u8 status, 3222 u8 link_type, u8 addr_type, u8 status,
3216 u8 opcode) 3223 u8 opcode)
3217{ 3224{
3218 struct pending_cmd *cmd; 3225 struct pending_cmd *cmd;
3219 struct mgmt_rp_user_confirm_reply rp; 3226 struct mgmt_rp_user_confirm_reply rp;
@@ -3244,7 +3251,8 @@ int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3244 u8 link_type, u8 addr_type, u8 status) 3251 u8 link_type, u8 addr_type, u8 status)
3245{ 3252{
3246 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, 3253 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3247 status, MGMT_OP_USER_CONFIRM_NEG_REPLY); 3254 status,
3255 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3248} 3256}
3249 3257
3250int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3258int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
@@ -3258,7 +3266,8 @@ int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3258 u8 link_type, u8 addr_type, u8 status) 3266 u8 link_type, u8 addr_type, u8 status)
3259{ 3267{
3260 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, 3268 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3261 status, MGMT_OP_USER_PASSKEY_NEG_REPLY); 3269 status,
3270 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3262} 3271}
3263 3272
3264int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 3273int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 8a602388f1e..c75107ef892 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -26,22 +26,8 @@
26 */ 26 */
27 27
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/errno.h>
30#include <linux/kernel.h>
31#include <linux/sched.h>
32#include <linux/signal.h>
33#include <linux/init.h>
34#include <linux/wait.h>
35#include <linux/device.h>
36#include <linux/debugfs.h> 29#include <linux/debugfs.h>
37#include <linux/seq_file.h>
38#include <linux/net.h>
39#include <linux/mutex.h>
40#include <linux/kthread.h> 30#include <linux/kthread.h>
41#include <linux/slab.h>
42
43#include <net/sock.h>
44#include <linux/uaccess.h>
45#include <asm/unaligned.h> 31#include <asm/unaligned.h>
46 32
47#include <net/bluetooth/bluetooth.h> 33#include <net/bluetooth/bluetooth.h>
@@ -115,14 +101,14 @@ static void rfcomm_session_del(struct rfcomm_session *s);
115#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1) 101#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1)
116#define __get_rpn_parity(line) (((line) >> 3) & 0x7) 102#define __get_rpn_parity(line) (((line) >> 3) & 0x7)
117 103
118static inline void rfcomm_schedule(void) 104static void rfcomm_schedule(void)
119{ 105{
120 if (!rfcomm_thread) 106 if (!rfcomm_thread)
121 return; 107 return;
122 wake_up_process(rfcomm_thread); 108 wake_up_process(rfcomm_thread);
123} 109}
124 110
125static inline void rfcomm_session_put(struct rfcomm_session *s) 111static void rfcomm_session_put(struct rfcomm_session *s)
126{ 112{
127 if (atomic_dec_and_test(&s->refcnt)) 113 if (atomic_dec_and_test(&s->refcnt))
128 rfcomm_session_del(s); 114 rfcomm_session_del(s);
@@ -227,7 +213,7 @@ static int rfcomm_l2sock_create(struct socket **sock)
227 return err; 213 return err;
228} 214}
229 215
230static inline int rfcomm_check_security(struct rfcomm_dlc *d) 216static int rfcomm_check_security(struct rfcomm_dlc *d)
231{ 217{
232 struct sock *sk = d->session->sock->sk; 218 struct sock *sk = d->session->sock->sk;
233 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn; 219 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
@@ -1750,7 +1736,7 @@ static void rfcomm_process_connect(struct rfcomm_session *s)
1750/* Send data queued for the DLC. 1736/* Send data queued for the DLC.
1751 * Return number of frames left in the queue. 1737 * Return number of frames left in the queue.
1752 */ 1738 */
1753static inline int rfcomm_process_tx(struct rfcomm_dlc *d) 1739static int rfcomm_process_tx(struct rfcomm_dlc *d)
1754{ 1740{
1755 struct sk_buff *skb; 1741 struct sk_buff *skb;
1756 int err; 1742 int err;
@@ -1798,7 +1784,7 @@ static inline int rfcomm_process_tx(struct rfcomm_dlc *d)
1798 return skb_queue_len(&d->tx_queue); 1784 return skb_queue_len(&d->tx_queue);
1799} 1785}
1800 1786
1801static inline void rfcomm_process_dlcs(struct rfcomm_session *s) 1787static void rfcomm_process_dlcs(struct rfcomm_session *s)
1802{ 1788{
1803 struct rfcomm_dlc *d; 1789 struct rfcomm_dlc *d;
1804 struct list_head *p, *n; 1790 struct list_head *p, *n;
@@ -1858,7 +1844,7 @@ static inline void rfcomm_process_dlcs(struct rfcomm_session *s)
1858 } 1844 }
1859} 1845}
1860 1846
1861static inline void rfcomm_process_rx(struct rfcomm_session *s) 1847static void rfcomm_process_rx(struct rfcomm_session *s)
1862{ 1848{
1863 struct socket *sock = s->sock; 1849 struct socket *sock = s->sock;
1864 struct sock *sk = sock->sk; 1850 struct sock *sk = sock->sk;
@@ -1883,7 +1869,7 @@ static inline void rfcomm_process_rx(struct rfcomm_session *s)
1883 } 1869 }
1884} 1870}
1885 1871
1886static inline void rfcomm_accept_connection(struct rfcomm_session *s) 1872static void rfcomm_accept_connection(struct rfcomm_session *s)
1887{ 1873{
1888 struct socket *sock = s->sock, *nsock; 1874 struct socket *sock = s->sock, *nsock;
1889 int err; 1875 int err;
@@ -1917,7 +1903,7 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s)
1917 sock_release(nsock); 1903 sock_release(nsock);
1918} 1904}
1919 1905
1920static inline void rfcomm_check_connection(struct rfcomm_session *s) 1906static void rfcomm_check_connection(struct rfcomm_session *s)
1921{ 1907{
1922 struct sock *sk = s->sock->sk; 1908 struct sock *sk = s->sock->sk;
1923 1909
@@ -1941,7 +1927,7 @@ static inline void rfcomm_check_connection(struct rfcomm_session *s)
1941 } 1927 }
1942} 1928}
1943 1929
1944static inline void rfcomm_process_sessions(void) 1930static void rfcomm_process_sessions(void)
1945{ 1931{
1946 struct list_head *p, *n; 1932 struct list_head *p, *n;
1947 1933
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index e8707debb86..7e1e59645c0 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -25,27 +25,8 @@
25 * RFCOMM sockets. 25 * RFCOMM sockets.
26 */ 26 */
27 27
28#include <linux/module.h> 28#include <linux/export.h>
29
30#include <linux/types.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/poll.h>
36#include <linux/fcntl.h>
37#include <linux/init.h>
38#include <linux/interrupt.h>
39#include <linux/socket.h>
40#include <linux/skbuff.h>
41#include <linux/list.h>
42#include <linux/device.h>
43#include <linux/debugfs.h> 29#include <linux/debugfs.h>
44#include <linux/seq_file.h>
45#include <linux/security.h>
46#include <net/sock.h>
47
48#include <linux/uaccess.h>
49 30
50#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h> 32#include <net/bluetooth/hci_core.h>
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index d1820ff14ae..cb960773c00 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -31,11 +31,6 @@
31#include <linux/tty_driver.h> 31#include <linux/tty_driver.h>
32#include <linux/tty_flip.h> 32#include <linux/tty_flip.h>
33 33
34#include <linux/capability.h>
35#include <linux/slab.h>
36#include <linux/skbuff.h>
37#include <linux/workqueue.h>
38
39#include <net/bluetooth/bluetooth.h> 34#include <net/bluetooth/bluetooth.h>
40#include <net/bluetooth/hci_core.h> 35#include <net/bluetooth/hci_core.h>
41#include <net/bluetooth/rfcomm.h> 36#include <net/bluetooth/rfcomm.h>
@@ -132,7 +127,7 @@ static struct rfcomm_dev *__rfcomm_dev_get(int id)
132 return NULL; 127 return NULL;
133} 128}
134 129
135static inline struct rfcomm_dev *rfcomm_dev_get(int id) 130static struct rfcomm_dev *rfcomm_dev_get(int id)
136{ 131{
137 struct rfcomm_dev *dev; 132 struct rfcomm_dev *dev;
138 133
@@ -345,7 +340,7 @@ static void rfcomm_wfree(struct sk_buff *skb)
345 tty_port_put(&dev->port); 340 tty_port_put(&dev->port);
346} 341}
347 342
348static inline void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev) 343static void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev)
349{ 344{
350 tty_port_get(&dev->port); 345 tty_port_get(&dev->port);
351 atomic_add(skb->truesize, &dev->wmem_alloc); 346 atomic_add(skb->truesize, &dev->wmem_alloc);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index cbdd313659a..40bbe25dcff 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -25,26 +25,8 @@
25/* Bluetooth SCO sockets. */ 25/* Bluetooth SCO sockets. */
26 26
27#include <linux/module.h> 27#include <linux/module.h>
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/kernel.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/poll.h>
35#include <linux/fcntl.h>
36#include <linux/init.h>
37#include <linux/interrupt.h>
38#include <linux/socket.h>
39#include <linux/skbuff.h>
40#include <linux/device.h>
41#include <linux/debugfs.h> 28#include <linux/debugfs.h>
42#include <linux/seq_file.h> 29#include <linux/seq_file.h>
43#include <linux/list.h>
44#include <linux/security.h>
45#include <net/sock.h>
46
47#include <linux/uaccess.h>
48 30
49#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
50#include <net/bluetooth/hci_core.h> 32#include <net/bluetooth/hci_core.h>
@@ -123,7 +105,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
123 return conn; 105 return conn;
124} 106}
125 107
126static inline struct sock *sco_chan_get(struct sco_conn *conn) 108static struct sock *sco_chan_get(struct sco_conn *conn)
127{ 109{
128 struct sock *sk = NULL; 110 struct sock *sk = NULL;
129 sco_conn_lock(conn); 111 sco_conn_lock(conn);
@@ -157,7 +139,8 @@ static int sco_conn_del(struct hci_conn *hcon, int err)
157 return 0; 139 return 0;
158} 140}
159 141
160static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent) 142static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
143 struct sock *parent)
161{ 144{
162 int err = 0; 145 int err = 0;
163 146
@@ -228,7 +211,7 @@ done:
228 return err; 211 return err;
229} 212}
230 213
231static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) 214static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
232{ 215{
233 struct sco_conn *conn = sco_pi(sk)->conn; 216 struct sco_conn *conn = sco_pi(sk)->conn;
234 struct sk_buff *skb; 217 struct sk_buff *skb;
@@ -254,7 +237,7 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
254 return len; 237 return len;
255} 238}
256 239
257static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) 240static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
258{ 241{
259 struct sock *sk = sco_chan_get(conn); 242 struct sock *sk = sco_chan_get(conn);
260 243
@@ -523,7 +506,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
523 goto done; 506 goto done;
524 507
525 err = bt_sock_wait_state(sk, BT_CONNECTED, 508 err = bt_sock_wait_state(sk, BT_CONNECTED,
526 sock_sndtimeo(sk, flags & O_NONBLOCK)); 509 sock_sndtimeo(sk, flags & O_NONBLOCK));
527 510
528done: 511done:
529 release_sock(sk); 512 release_sock(sk);
@@ -788,7 +771,7 @@ static int sco_sock_shutdown(struct socket *sock, int how)
788 771
789 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 772 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
790 err = bt_sock_wait_state(sk, BT_CLOSED, 773 err = bt_sock_wait_state(sk, BT_CLOSED,
791 sk->sk_lingertime); 774 sk->sk_lingertime);
792 } 775 }
793 release_sock(sk); 776 release_sock(sk);
794 return err; 777 return err;
@@ -878,7 +861,7 @@ static void sco_conn_ready(struct sco_conn *conn)
878 bh_lock_sock(parent); 861 bh_lock_sock(parent);
879 862
880 sk = sco_sock_alloc(sock_net(parent), NULL, 863 sk = sco_sock_alloc(sock_net(parent), NULL,
881 BTPROTO_SCO, GFP_ATOMIC); 864 BTPROTO_SCO, GFP_ATOMIC);
882 if (!sk) { 865 if (!sk) {
883 bh_unlock_sock(parent); 866 bh_unlock_sock(parent);
884 goto done; 867 goto done;
@@ -907,7 +890,7 @@ done:
907/* ----- SCO interface with lower layer (HCI) ----- */ 890/* ----- SCO interface with lower layer (HCI) ----- */
908int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) 891int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
909{ 892{
910 register struct sock *sk; 893 struct sock *sk;
911 struct hlist_node *node; 894 struct hlist_node *node;
912 int lm = 0; 895 int lm = 0;
913 896
@@ -920,7 +903,7 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
920 continue; 903 continue;
921 904
922 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) || 905 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) ||
923 !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { 906 !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
924 lm |= HCI_LM_ACCEPT; 907 lm |= HCI_LM_ACCEPT;
925 break; 908 break;
926 } 909 }
@@ -981,7 +964,7 @@ static int sco_debugfs_show(struct seq_file *f, void *p)
981 964
982 sk_for_each(sk, node, &sco_sk_list.head) { 965 sk_for_each(sk, node, &sco_sk_list.head) {
983 seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src), 966 seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
984 batostr(&bt_sk(sk)->dst), sk->sk_state); 967 batostr(&bt_sk(sk)->dst), sk->sk_state);
985 } 968 }
986 969
987 read_unlock(&sco_sk_list.lock); 970 read_unlock(&sco_sk_list.lock);
@@ -1044,8 +1027,8 @@ int __init sco_init(void)
1044 } 1027 }
1045 1028
1046 if (bt_debugfs) { 1029 if (bt_debugfs) {
1047 sco_debugfs = debugfs_create_file("sco", 0444, 1030 sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
1048 bt_debugfs, NULL, &sco_debugfs_fops); 1031 NULL, &sco_debugfs_fops);
1049 if (!sco_debugfs) 1032 if (!sco_debugfs)
1050 BT_ERR("Failed to create SCO debug file"); 1033 BT_ERR("Failed to create SCO debug file");
1051 } 1034 }
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 37df4e9b389..16ef0dc85a0 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -20,14 +20,15 @@
20 SOFTWARE IS DISCLAIMED. 20 SOFTWARE IS DISCLAIMED.
21*/ 21*/
22 22
23#include <linux/crypto.h>
24#include <linux/scatterlist.h>
25#include <crypto/b128ops.h>
26
23#include <net/bluetooth/bluetooth.h> 27#include <net/bluetooth/bluetooth.h>
24#include <net/bluetooth/hci_core.h> 28#include <net/bluetooth/hci_core.h>
25#include <net/bluetooth/l2cap.h> 29#include <net/bluetooth/l2cap.h>
26#include <net/bluetooth/mgmt.h> 30#include <net/bluetooth/mgmt.h>
27#include <net/bluetooth/smp.h> 31#include <net/bluetooth/smp.h>
28#include <linux/crypto.h>
29#include <linux/scatterlist.h>
30#include <crypto/b128ops.h>
31 32
32#define SMP_TIMEOUT msecs_to_jiffies(30000) 33#define SMP_TIMEOUT msecs_to_jiffies(30000)
33 34
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index e41456bd3cc..b98d3d78ca7 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -120,7 +120,9 @@ static u32 *fake_cow_metrics(struct dst_entry *dst, unsigned long old)
120 return NULL; 120 return NULL;
121} 121}
122 122
123static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, const void *daddr) 123static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst,
124 struct sk_buff *skb,
125 const void *daddr)
124{ 126{
125 return NULL; 127 return NULL;
126} 128}
@@ -373,19 +375,29 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
373 if (!skb->dev) 375 if (!skb->dev)
374 goto free_skb; 376 goto free_skb;
375 dst = skb_dst(skb); 377 dst = skb_dst(skb);
376 neigh = dst_get_neighbour_noref(dst); 378 neigh = dst_neigh_lookup_skb(dst, skb);
377 if (neigh->hh.hh_len) { 379 if (neigh) {
378 neigh_hh_bridge(&neigh->hh, skb); 380 int ret;
379 skb->dev = nf_bridge->physindev; 381
380 return br_handle_frame_finish(skb); 382 if (neigh->hh.hh_len) {
381 } else { 383 neigh_hh_bridge(&neigh->hh, skb);
382 /* the neighbour function below overwrites the complete 384 skb->dev = nf_bridge->physindev;
383 * MAC header, so we save the Ethernet source address and 385 ret = br_handle_frame_finish(skb);
384 * protocol number. */ 386 } else {
385 skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN); 387 /* the neighbour function below overwrites the complete
386 /* tell br_dev_xmit to continue with forwarding */ 388 * MAC header, so we save the Ethernet source address and
387 nf_bridge->mask |= BRNF_BRIDGED_DNAT; 389 * protocol number.
388 return neigh->output(neigh, skb); 390 */
391 skb_copy_from_linear_data_offset(skb,
392 -(ETH_HLEN-ETH_ALEN),
393 skb->nf_bridge->data,
394 ETH_HLEN-ETH_ALEN);
395 /* tell br_dev_xmit to continue with forwarding */
396 nf_bridge->mask |= BRNF_BRIDGED_DNAT;
397 ret = neigh->output(neigh, skb);
398 }
399 neigh_release(neigh);
400 return ret;
389 } 401 }
390free_skb: 402free_skb:
391 kfree_skb(skb); 403 kfree_skb(skb);
@@ -764,9 +776,9 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
764 return NF_DROP; 776 return NF_DROP;
765 777
766 if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb)) 778 if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
767 pf = PF_INET; 779 pf = NFPROTO_IPV4;
768 else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) 780 else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
769 pf = PF_INET6; 781 pf = NFPROTO_IPV6;
770 else 782 else
771 return NF_ACCEPT; 783 return NF_ACCEPT;
772 784
@@ -778,13 +790,13 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
778 nf_bridge->mask |= BRNF_PKT_TYPE; 790 nf_bridge->mask |= BRNF_PKT_TYPE;
779 } 791 }
780 792
781 if (pf == PF_INET && br_parse_ip_options(skb)) 793 if (pf == NFPROTO_IPV4 && br_parse_ip_options(skb))
782 return NF_DROP; 794 return NF_DROP;
783 795
784 /* The physdev module checks on this */ 796 /* The physdev module checks on this */
785 nf_bridge->mask |= BRNF_BRIDGED; 797 nf_bridge->mask |= BRNF_BRIDGED;
786 nf_bridge->physoutdev = skb->dev; 798 nf_bridge->physoutdev = skb->dev;
787 if (pf == PF_INET) 799 if (pf == NFPROTO_IPV4)
788 skb->protocol = htons(ETH_P_IP); 800 skb->protocol = htons(ETH_P_IP);
789 else 801 else
790 skb->protocol = htons(ETH_P_IPV6); 802 skb->protocol = htons(ETH_P_IPV6);
@@ -871,9 +883,9 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
871 return NF_DROP; 883 return NF_DROP;
872 884
873 if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb)) 885 if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
874 pf = PF_INET; 886 pf = NFPROTO_IPV4;
875 else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) 887 else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
876 pf = PF_INET6; 888 pf = NFPROTO_IPV6;
877 else 889 else
878 return NF_ACCEPT; 890 return NF_ACCEPT;
879 891
@@ -886,7 +898,7 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
886 898
887 nf_bridge_pull_encap_header(skb); 899 nf_bridge_pull_encap_header(skb);
888 nf_bridge_save_header(skb); 900 nf_bridge_save_header(skb);
889 if (pf == PF_INET) 901 if (pf == NFPROTO_IPV4)
890 skb->protocol = htons(ETH_P_IP); 902 skb->protocol = htons(ETH_P_IP);
891 else 903 else
892 skb->protocol = htons(ETH_P_IPV6); 904 skb->protocol = htons(ETH_P_IPV6);
@@ -919,49 +931,49 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = {
919 { 931 {
920 .hook = br_nf_pre_routing, 932 .hook = br_nf_pre_routing,
921 .owner = THIS_MODULE, 933 .owner = THIS_MODULE,
922 .pf = PF_BRIDGE, 934 .pf = NFPROTO_BRIDGE,
923 .hooknum = NF_BR_PRE_ROUTING, 935 .hooknum = NF_BR_PRE_ROUTING,
924 .priority = NF_BR_PRI_BRNF, 936 .priority = NF_BR_PRI_BRNF,
925 }, 937 },
926 { 938 {
927 .hook = br_nf_local_in, 939 .hook = br_nf_local_in,
928 .owner = THIS_MODULE, 940 .owner = THIS_MODULE,
929 .pf = PF_BRIDGE, 941 .pf = NFPROTO_BRIDGE,
930 .hooknum = NF_BR_LOCAL_IN, 942 .hooknum = NF_BR_LOCAL_IN,
931 .priority = NF_BR_PRI_BRNF, 943 .priority = NF_BR_PRI_BRNF,
932 }, 944 },
933 { 945 {
934 .hook = br_nf_forward_ip, 946 .hook = br_nf_forward_ip,
935 .owner = THIS_MODULE, 947 .owner = THIS_MODULE,
936 .pf = PF_BRIDGE, 948 .pf = NFPROTO_BRIDGE,
937 .hooknum = NF_BR_FORWARD, 949 .hooknum = NF_BR_FORWARD,
938 .priority = NF_BR_PRI_BRNF - 1, 950 .priority = NF_BR_PRI_BRNF - 1,
939 }, 951 },
940 { 952 {
941 .hook = br_nf_forward_arp, 953 .hook = br_nf_forward_arp,
942 .owner = THIS_MODULE, 954 .owner = THIS_MODULE,
943 .pf = PF_BRIDGE, 955 .pf = NFPROTO_BRIDGE,
944 .hooknum = NF_BR_FORWARD, 956 .hooknum = NF_BR_FORWARD,
945 .priority = NF_BR_PRI_BRNF, 957 .priority = NF_BR_PRI_BRNF,
946 }, 958 },
947 { 959 {
948 .hook = br_nf_post_routing, 960 .hook = br_nf_post_routing,
949 .owner = THIS_MODULE, 961 .owner = THIS_MODULE,
950 .pf = PF_BRIDGE, 962 .pf = NFPROTO_BRIDGE,
951 .hooknum = NF_BR_POST_ROUTING, 963 .hooknum = NF_BR_POST_ROUTING,
952 .priority = NF_BR_PRI_LAST, 964 .priority = NF_BR_PRI_LAST,
953 }, 965 },
954 { 966 {
955 .hook = ip_sabotage_in, 967 .hook = ip_sabotage_in,
956 .owner = THIS_MODULE, 968 .owner = THIS_MODULE,
957 .pf = PF_INET, 969 .pf = NFPROTO_IPV4,
958 .hooknum = NF_INET_PRE_ROUTING, 970 .hooknum = NF_INET_PRE_ROUTING,
959 .priority = NF_IP_PRI_FIRST, 971 .priority = NF_IP_PRI_FIRST,
960 }, 972 },
961 { 973 {
962 .hook = ip_sabotage_in, 974 .hook = ip_sabotage_in,
963 .owner = THIS_MODULE, 975 .owner = THIS_MODULE,
964 .pf = PF_INET6, 976 .pf = NFPROTO_IPV6,
965 .hooknum = NF_INET_PRE_ROUTING, 977 .hooknum = NF_INET_PRE_ROUTING,
966 .priority = NF_IP6_PRI_FIRST, 978 .priority = NF_IP6_PRI_FIRST,
967 }, 979 },
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index 5449294bdd5..19063473c71 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -145,19 +145,24 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
145 145
146 if (!ub->skb) { 146 if (!ub->skb) {
147 if (!(ub->skb = ulog_alloc_skb(size))) 147 if (!(ub->skb = ulog_alloc_skb(size)))
148 goto alloc_failure; 148 goto unlock;
149 } else if (size > skb_tailroom(ub->skb)) { 149 } else if (size > skb_tailroom(ub->skb)) {
150 ulog_send(group); 150 ulog_send(group);
151 151
152 if (!(ub->skb = ulog_alloc_skb(size))) 152 if (!(ub->skb = ulog_alloc_skb(size)))
153 goto alloc_failure; 153 goto unlock;
154 } 154 }
155 155
156 nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, 0, 156 nlh = nlmsg_put(ub->skb, 0, ub->qlen, 0,
157 size - NLMSG_ALIGN(sizeof(*nlh))); 157 size - NLMSG_ALIGN(sizeof(*nlh)), 0);
158 if (!nlh) {
159 kfree_skb(ub->skb);
160 ub->skb = NULL;
161 goto unlock;
162 }
158 ub->qlen++; 163 ub->qlen++;
159 164
160 pm = NLMSG_DATA(nlh); 165 pm = nlmsg_data(nlh);
161 166
162 /* Fill in the ulog data */ 167 /* Fill in the ulog data */
163 pm->version = EBT_ULOG_VERSION; 168 pm->version = EBT_ULOG_VERSION;
@@ -209,14 +214,6 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
209 214
210unlock: 215unlock:
211 spin_unlock_bh(lock); 216 spin_unlock_bh(lock);
212
213 return;
214
215nlmsg_failure:
216 pr_debug("error during NLMSG_PUT. This should "
217 "not happen, please report to author.\n");
218alloc_failure:
219 goto unlock;
220} 217}
221 218
222/* this function is registered with the netfilter core */ 219/* this function is registered with the netfilter core */
@@ -285,6 +282,9 @@ static int __init ebt_ulog_init(void)
285{ 282{
286 int ret; 283 int ret;
287 int i; 284 int i;
285 struct netlink_kernel_cfg cfg = {
286 .groups = EBT_ULOG_MAXNLGROUPS,
287 };
288 288
289 if (nlbufsiz >= 128*1024) { 289 if (nlbufsiz >= 128*1024) {
290 pr_warning("Netlink buffer has to be <= 128kB," 290 pr_warning("Netlink buffer has to be <= 128kB,"
@@ -299,8 +299,7 @@ static int __init ebt_ulog_init(void)
299 } 299 }
300 300
301 ebtulognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, 301 ebtulognl = netlink_kernel_create(&init_net, NETLINK_NFLOG,
302 EBT_ULOG_MAXNLGROUPS, NULL, NULL, 302 THIS_MODULE, &cfg);
303 THIS_MODULE);
304 if (!ebtulognl) 303 if (!ebtulognl)
305 ret = -ENOMEM; 304 ret = -ENOMEM;
306 else if ((ret = xt_register_target(&ebt_ulog_tg_reg)) != 0) 305 else if ((ret = xt_register_target(&ebt_ulog_tg_reg)) != 0)
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 554b3128960..551d2fd6a80 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -90,11 +90,8 @@ static int caifd_refcnt_read(struct caif_device_entry *e)
90/* Allocate new CAIF device. */ 90/* Allocate new CAIF device. */
91static struct caif_device_entry *caif_device_alloc(struct net_device *dev) 91static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
92{ 92{
93 struct caif_device_entry_list *caifdevs;
94 struct caif_device_entry *caifd; 93 struct caif_device_entry *caifd;
95 94
96 caifdevs = caif_device_list(dev_net(dev));
97
98 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); 95 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
99 if (!caifd) 96 if (!caifd)
100 return NULL; 97 return NULL;
@@ -131,6 +128,11 @@ void caif_flow_cb(struct sk_buff *skb)
131 128
132 rcu_read_lock(); 129 rcu_read_lock();
133 caifd = caif_get(skb->dev); 130 caifd = caif_get(skb->dev);
131
132 WARN_ON(caifd == NULL);
133 if (caifd == NULL)
134 return;
135
134 caifd_hold(caifd); 136 caifd_hold(caifd);
135 rcu_read_unlock(); 137 rcu_read_unlock();
136 138
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 047cd0eec02..44f270fc2d0 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -175,15 +175,17 @@ static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl)
175 175
176void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid) 176void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
177{ 177{
178 struct cfpkt *pkt;
178 struct cfctrl *cfctrl = container_obj(layer); 179 struct cfctrl *cfctrl = container_obj(layer);
179 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
180 struct cflayer *dn = cfctrl->serv.layer.dn; 180 struct cflayer *dn = cfctrl->serv.layer.dn;
181 if (!pkt) 181
182 return;
183 if (!dn) { 182 if (!dn) {
184 pr_debug("not able to send enum request\n"); 183 pr_debug("not able to send enum request\n");
185 return; 184 return;
186 } 185 }
186 pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
187 if (!pkt)
188 return;
187 caif_assert(offsetof(struct cfctrl, serv.layer) == 0); 189 caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
188 init_info(cfpkt_info(pkt), cfctrl); 190 init_info(cfpkt_info(pkt), cfctrl);
189 cfpkt_info(pkt)->dev_info->id = physlinkid; 191 cfpkt_info(pkt)->dev_info->id = physlinkid;
@@ -302,18 +304,17 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
302 struct cflayer *client) 304 struct cflayer *client)
303{ 305{
304 int ret; 306 int ret;
307 struct cfpkt *pkt;
305 struct cfctrl *cfctrl = container_obj(layer); 308 struct cfctrl *cfctrl = container_obj(layer);
306 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
307 struct cflayer *dn = cfctrl->serv.layer.dn; 309 struct cflayer *dn = cfctrl->serv.layer.dn;
308 310
309 if (!pkt)
310 return -ENOMEM;
311
312 if (!dn) { 311 if (!dn) {
313 pr_debug("not able to send link-down request\n"); 312 pr_debug("not able to send link-down request\n");
314 return -ENODEV; 313 return -ENODEV;
315 } 314 }
316 315 pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
316 if (!pkt)
317 return -ENOMEM;
317 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY); 318 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
318 cfpkt_addbdy(pkt, channelid); 319 cfpkt_addbdy(pkt, channelid);
319 init_info(cfpkt_info(pkt), cfctrl); 320 init_info(cfpkt_info(pkt), cfctrl);
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 0ce2ad0696d..821022a7214 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -41,6 +41,7 @@
41 */ 41 */
42 42
43#include <linux/module.h> 43#include <linux/module.h>
44#include <linux/stddef.h>
44#include <linux/init.h> 45#include <linux/init.h>
45#include <linux/kmod.h> 46#include <linux/kmod.h>
46#include <linux/slab.h> 47#include <linux/slab.h>
@@ -220,30 +221,46 @@ static int can_create(struct net *net, struct socket *sock, int protocol,
220 * -ENOBUFS on full driver queue (see net_xmit_errno()) 221 * -ENOBUFS on full driver queue (see net_xmit_errno())
221 * -ENOMEM when local loopback failed at calling skb_clone() 222 * -ENOMEM when local loopback failed at calling skb_clone()
222 * -EPERM when trying to send on a non-CAN interface 223 * -EPERM when trying to send on a non-CAN interface
224 * -EMSGSIZE CAN frame size is bigger than CAN interface MTU
223 * -EINVAL when the skb->data does not contain a valid CAN frame 225 * -EINVAL when the skb->data does not contain a valid CAN frame
224 */ 226 */
225int can_send(struct sk_buff *skb, int loop) 227int can_send(struct sk_buff *skb, int loop)
226{ 228{
227 struct sk_buff *newskb = NULL; 229 struct sk_buff *newskb = NULL;
228 struct can_frame *cf = (struct can_frame *)skb->data; 230 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
229 int err; 231 int err = -EINVAL;
232
233 if (skb->len == CAN_MTU) {
234 skb->protocol = htons(ETH_P_CAN);
235 if (unlikely(cfd->len > CAN_MAX_DLEN))
236 goto inval_skb;
237 } else if (skb->len == CANFD_MTU) {
238 skb->protocol = htons(ETH_P_CANFD);
239 if (unlikely(cfd->len > CANFD_MAX_DLEN))
240 goto inval_skb;
241 } else
242 goto inval_skb;
230 243
231 if (skb->len != sizeof(struct can_frame) || cf->can_dlc > 8) { 244 /*
232 kfree_skb(skb); 245 * Make sure the CAN frame can pass the selected CAN netdevice.
233 return -EINVAL; 246 * As structs can_frame and canfd_frame are similar, we can provide
247 * CAN FD frames to legacy CAN drivers as long as the length is <= 8
248 */
249 if (unlikely(skb->len > skb->dev->mtu && cfd->len > CAN_MAX_DLEN)) {
250 err = -EMSGSIZE;
251 goto inval_skb;
234 } 252 }
235 253
236 if (skb->dev->type != ARPHRD_CAN) { 254 if (unlikely(skb->dev->type != ARPHRD_CAN)) {
237 kfree_skb(skb); 255 err = -EPERM;
238 return -EPERM; 256 goto inval_skb;
239 } 257 }
240 258
241 if (!(skb->dev->flags & IFF_UP)) { 259 if (unlikely(!(skb->dev->flags & IFF_UP))) {
242 kfree_skb(skb); 260 err = -ENETDOWN;
243 return -ENETDOWN; 261 goto inval_skb;
244 } 262 }
245 263
246 skb->protocol = htons(ETH_P_CAN);
247 skb_reset_network_header(skb); 264 skb_reset_network_header(skb);
248 skb_reset_transport_header(skb); 265 skb_reset_transport_header(skb);
249 266
@@ -300,6 +317,10 @@ int can_send(struct sk_buff *skb, int loop)
300 can_stats.tx_frames_delta++; 317 can_stats.tx_frames_delta++;
301 318
302 return 0; 319 return 0;
320
321inval_skb:
322 kfree_skb(skb);
323 return err;
303} 324}
304EXPORT_SYMBOL(can_send); 325EXPORT_SYMBOL(can_send);
305 326
@@ -334,8 +355,8 @@ static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
334 * relevant bits for the filter. 355 * relevant bits for the filter.
335 * 356 *
336 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can 357 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
337 * filter for error frames (CAN_ERR_FLAG bit set in mask). For error frames 358 * filter for error messages (CAN_ERR_FLAG bit set in mask). For error msg
338 * there is a special filterlist and a special rx path filter handling. 359 * frames there is a special filterlist and a special rx path filter handling.
339 * 360 *
340 * Return: 361 * Return:
341 * Pointer to optimal filterlist for the given can_id/mask pair. 362 * Pointer to optimal filterlist for the given can_id/mask pair.
@@ -347,7 +368,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
347{ 368{
348 canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */ 369 canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */
349 370
350 /* filter for error frames in extra filterlist */ 371 /* filter for error message frames in extra filterlist */
351 if (*mask & CAN_ERR_FLAG) { 372 if (*mask & CAN_ERR_FLAG) {
352 /* clear CAN_ERR_FLAG in filter entry */ 373 /* clear CAN_ERR_FLAG in filter entry */
353 *mask &= CAN_ERR_MASK; 374 *mask &= CAN_ERR_MASK;
@@ -408,7 +429,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
408 * <received_can_id> & mask == can_id & mask 429 * <received_can_id> & mask == can_id & mask
409 * 430 *
410 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can 431 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
411 * filter for error frames (CAN_ERR_FLAG bit set in mask). 432 * filter for error message frames (CAN_ERR_FLAG bit set in mask).
412 * 433 *
413 * The provided pointer to the sk_buff is guaranteed to be valid as long as 434 * The provided pointer to the sk_buff is guaranteed to be valid as long as
414 * the callback function is running. The callback function must *not* free 435 * the callback function is running. The callback function must *not* free
@@ -578,7 +599,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
578 return 0; 599 return 0;
579 600
580 if (can_id & CAN_ERR_FLAG) { 601 if (can_id & CAN_ERR_FLAG) {
581 /* check for error frame entries only */ 602 /* check for error message frame entries only */
582 hlist_for_each_entry_rcu(r, n, &d->rx[RX_ERR], list) { 603 hlist_for_each_entry_rcu(r, n, &d->rx[RX_ERR], list) {
583 if (can_id & r->mask) { 604 if (can_id & r->mask) {
584 deliver(skb, r); 605 deliver(skb, r);
@@ -632,24 +653,11 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
632 return matches; 653 return matches;
633} 654}
634 655
635static int can_rcv(struct sk_buff *skb, struct net_device *dev, 656static void can_receive(struct sk_buff *skb, struct net_device *dev)
636 struct packet_type *pt, struct net_device *orig_dev)
637{ 657{
638 struct dev_rcv_lists *d; 658 struct dev_rcv_lists *d;
639 struct can_frame *cf = (struct can_frame *)skb->data;
640 int matches; 659 int matches;
641 660
642 if (!net_eq(dev_net(dev), &init_net))
643 goto drop;
644
645 if (WARN_ONCE(dev->type != ARPHRD_CAN ||
646 skb->len != sizeof(struct can_frame) ||
647 cf->can_dlc > 8,
648 "PF_CAN: dropped non conform skbuf: "
649 "dev type %d, len %d, can_dlc %d\n",
650 dev->type, skb->len, cf->can_dlc))
651 goto drop;
652
653 /* update statistics */ 661 /* update statistics */
654 can_stats.rx_frames++; 662 can_stats.rx_frames++;
655 can_stats.rx_frames_delta++; 663 can_stats.rx_frames_delta++;
@@ -673,7 +681,49 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
673 can_stats.matches++; 681 can_stats.matches++;
674 can_stats.matches_delta++; 682 can_stats.matches_delta++;
675 } 683 }
684}
676 685
686static int can_rcv(struct sk_buff *skb, struct net_device *dev,
687 struct packet_type *pt, struct net_device *orig_dev)
688{
689 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
690
691 if (unlikely(!net_eq(dev_net(dev), &init_net)))
692 goto drop;
693
694 if (WARN_ONCE(dev->type != ARPHRD_CAN ||
695 skb->len != CAN_MTU ||
696 cfd->len > CAN_MAX_DLEN,
697 "PF_CAN: dropped non conform CAN skbuf: "
698 "dev type %d, len %d, datalen %d\n",
699 dev->type, skb->len, cfd->len))
700 goto drop;
701
702 can_receive(skb, dev);
703 return NET_RX_SUCCESS;
704
705drop:
706 kfree_skb(skb);
707 return NET_RX_DROP;
708}
709
710static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
711 struct packet_type *pt, struct net_device *orig_dev)
712{
713 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
714
715 if (unlikely(!net_eq(dev_net(dev), &init_net)))
716 goto drop;
717
718 if (WARN_ONCE(dev->type != ARPHRD_CAN ||
719 skb->len != CANFD_MTU ||
720 cfd->len > CANFD_MAX_DLEN,
721 "PF_CAN: dropped non conform CAN FD skbuf: "
722 "dev type %d, len %d, datalen %d\n",
723 dev->type, skb->len, cfd->len))
724 goto drop;
725
726 can_receive(skb, dev);
677 return NET_RX_SUCCESS; 727 return NET_RX_SUCCESS;
678 728
679drop: 729drop:
@@ -807,10 +857,14 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
807 857
808static struct packet_type can_packet __read_mostly = { 858static struct packet_type can_packet __read_mostly = {
809 .type = cpu_to_be16(ETH_P_CAN), 859 .type = cpu_to_be16(ETH_P_CAN),
810 .dev = NULL,
811 .func = can_rcv, 860 .func = can_rcv,
812}; 861};
813 862
863static struct packet_type canfd_packet __read_mostly = {
864 .type = cpu_to_be16(ETH_P_CANFD),
865 .func = canfd_rcv,
866};
867
814static const struct net_proto_family can_family_ops = { 868static const struct net_proto_family can_family_ops = {
815 .family = PF_CAN, 869 .family = PF_CAN,
816 .create = can_create, 870 .create = can_create,
@@ -824,6 +878,12 @@ static struct notifier_block can_netdev_notifier __read_mostly = {
824 878
825static __init int can_init(void) 879static __init int can_init(void)
826{ 880{
881 /* check for correct padding to be able to use the structs similarly */
882 BUILD_BUG_ON(offsetof(struct can_frame, can_dlc) !=
883 offsetof(struct canfd_frame, len) ||
884 offsetof(struct can_frame, data) !=
885 offsetof(struct canfd_frame, data));
886
827 printk(banner); 887 printk(banner);
828 888
829 memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list)); 889 memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list));
@@ -846,6 +906,7 @@ static __init int can_init(void)
846 sock_register(&can_family_ops); 906 sock_register(&can_family_ops);
847 register_netdevice_notifier(&can_netdev_notifier); 907 register_netdevice_notifier(&can_netdev_notifier);
848 dev_add_pack(&can_packet); 908 dev_add_pack(&can_packet);
909 dev_add_pack(&canfd_packet);
849 910
850 return 0; 911 return 0;
851} 912}
@@ -860,6 +921,7 @@ static __exit void can_exit(void)
860 can_remove_proc(); 921 can_remove_proc();
861 922
862 /* protocol unregister */ 923 /* protocol unregister */
924 dev_remove_pack(&canfd_packet);
863 dev_remove_pack(&can_packet); 925 dev_remove_pack(&can_packet);
864 unregister_netdevice_notifier(&can_netdev_notifier); 926 unregister_netdevice_notifier(&can_netdev_notifier);
865 sock_unregister(PF_CAN); 927 sock_unregister(PF_CAN);
diff --git a/net/can/af_can.h b/net/can/af_can.h
index fd882dbadad..1dccb4c3389 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -104,6 +104,9 @@ struct s_pstats {
104 unsigned long rcv_entries_max; 104 unsigned long rcv_entries_max;
105}; 105};
106 106
107/* receive filters subscribed for 'all' CAN devices */
108extern struct dev_rcv_lists can_rx_alldev_list;
109
107/* function prototypes for the CAN networklayer procfs (proc.c) */ 110/* function prototypes for the CAN networklayer procfs (proc.c) */
108extern void can_init_proc(void); 111extern void can_init_proc(void);
109extern void can_remove_proc(void); 112extern void can_remove_proc(void);
diff --git a/net/can/proc.c b/net/can/proc.c
index ba873c36d2f..3b6dd318049 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -83,9 +83,6 @@ static const char rx_list_name[][8] = {
83 [RX_EFF] = "rx_eff", 83 [RX_EFF] = "rx_eff",
84}; 84};
85 85
86/* receive filters subscribed for 'all' CAN devices */
87extern struct dev_rcv_lists can_rx_alldev_list;
88
89/* 86/*
90 * af_can statistics stuff 87 * af_can statistics stuff
91 */ 88 */
diff --git a/net/can/raw.c b/net/can/raw.c
index 46cca3a91d1..3e9c89356a9 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -82,6 +82,7 @@ struct raw_sock {
82 struct notifier_block notifier; 82 struct notifier_block notifier;
83 int loopback; 83 int loopback;
84 int recv_own_msgs; 84 int recv_own_msgs;
85 int fd_frames;
85 int count; /* number of active filters */ 86 int count; /* number of active filters */
86 struct can_filter dfilter; /* default/single filter */ 87 struct can_filter dfilter; /* default/single filter */
87 struct can_filter *filter; /* pointer to filter(s) */ 88 struct can_filter *filter; /* pointer to filter(s) */
@@ -119,6 +120,14 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
119 if (!ro->recv_own_msgs && oskb->sk == sk) 120 if (!ro->recv_own_msgs && oskb->sk == sk)
120 return; 121 return;
121 122
123 /* do not pass frames with DLC > 8 to a legacy socket */
124 if (!ro->fd_frames) {
125 struct canfd_frame *cfd = (struct canfd_frame *)oskb->data;
126
127 if (unlikely(cfd->len > CAN_MAX_DLEN))
128 return;
129 }
130
122 /* clone the given skb to be able to enqueue it into the rcv queue */ 131 /* clone the given skb to be able to enqueue it into the rcv queue */
123 skb = skb_clone(oskb, GFP_ATOMIC); 132 skb = skb_clone(oskb, GFP_ATOMIC);
124 if (!skb) 133 if (!skb)
@@ -291,6 +300,7 @@ static int raw_init(struct sock *sk)
291 /* set default loopback behaviour */ 300 /* set default loopback behaviour */
292 ro->loopback = 1; 301 ro->loopback = 1;
293 ro->recv_own_msgs = 0; 302 ro->recv_own_msgs = 0;
303 ro->fd_frames = 0;
294 304
295 /* set notifier */ 305 /* set notifier */
296 ro->notifier.notifier_call = raw_notifier; 306 ro->notifier.notifier_call = raw_notifier;
@@ -569,6 +579,15 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
569 579
570 break; 580 break;
571 581
582 case CAN_RAW_FD_FRAMES:
583 if (optlen != sizeof(ro->fd_frames))
584 return -EINVAL;
585
586 if (copy_from_user(&ro->fd_frames, optval, optlen))
587 return -EFAULT;
588
589 break;
590
572 default: 591 default:
573 return -ENOPROTOOPT; 592 return -ENOPROTOOPT;
574 } 593 }
@@ -627,6 +646,12 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
627 val = &ro->recv_own_msgs; 646 val = &ro->recv_own_msgs;
628 break; 647 break;
629 648
649 case CAN_RAW_FD_FRAMES:
650 if (len > sizeof(int))
651 len = sizeof(int);
652 val = &ro->fd_frames;
653 break;
654
630 default: 655 default:
631 return -ENOPROTOOPT; 656 return -ENOPROTOOPT;
632 } 657 }
@@ -662,8 +687,13 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
662 } else 687 } else
663 ifindex = ro->ifindex; 688 ifindex = ro->ifindex;
664 689
665 if (size != sizeof(struct can_frame)) 690 if (ro->fd_frames) {
666 return -EINVAL; 691 if (unlikely(size != CANFD_MTU && size != CAN_MTU))
692 return -EINVAL;
693 } else {
694 if (unlikely(size != CAN_MTU))
695 return -EINVAL;
696 }
667 697
668 dev = dev_get_by_index(&init_net, ifindex); 698 dev = dev_get_by_index(&init_net, ifindex);
669 if (!dev) 699 if (!dev)
@@ -705,7 +735,9 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock,
705 struct msghdr *msg, size_t size, int flags) 735 struct msghdr *msg, size_t size, int flags)
706{ 736{
707 struct sock *sk = sock->sk; 737 struct sock *sk = sock->sk;
738 struct raw_sock *ro = raw_sk(sk);
708 struct sk_buff *skb; 739 struct sk_buff *skb;
740 int rxmtu;
709 int err = 0; 741 int err = 0;
710 int noblock; 742 int noblock;
711 743
@@ -716,10 +748,20 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock,
716 if (!skb) 748 if (!skb)
717 return err; 749 return err;
718 750
719 if (size < skb->len) 751 /*
752 * when serving a legacy socket the DLC <= 8 is already checked inside
753 * raw_rcv(). Now check if we need to pass a canfd_frame to a legacy
754 * socket and cut the possible CANFD_MTU/CAN_MTU length to CAN_MTU
755 */
756 if (!ro->fd_frames)
757 rxmtu = CAN_MTU;
758 else
759 rxmtu = skb->len;
760
761 if (size < rxmtu)
720 msg->msg_flags |= MSG_TRUNC; 762 msg->msg_flags |= MSG_TRUNC;
721 else 763 else
722 size = skb->len; 764 size = rxmtu;
723 765
724 err = memcpy_toiovec(msg->msg_iov, skb->data, size); 766 err = memcpy_toiovec(msg->msg_iov, skb->data, size);
725 if (err < 0) { 767 if (err < 0) {
diff --git a/net/ceph/pagelist.c b/net/ceph/pagelist.c
index 13cb409a7bb..665cd23020f 100644
--- a/net/ceph/pagelist.c
+++ b/net/ceph/pagelist.c
@@ -72,8 +72,7 @@ int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len)
72} 72}
73EXPORT_SYMBOL(ceph_pagelist_append); 73EXPORT_SYMBOL(ceph_pagelist_append);
74 74
75/** 75/* Allocate enough pages for a pagelist to append the given amount
76 * Allocate enough pages for a pagelist to append the given amount
77 * of data without without allocating. 76 * of data without without allocating.
78 * Returns: 0 on success, -ENOMEM on error. 77 * Returns: 0 on success, -ENOMEM on error.
79 */ 78 */
@@ -95,9 +94,7 @@ int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space)
95} 94}
96EXPORT_SYMBOL(ceph_pagelist_reserve); 95EXPORT_SYMBOL(ceph_pagelist_reserve);
97 96
98/** 97/* Free any pages that have been preallocated. */
99 * Free any pages that have been preallocated.
100 */
101int ceph_pagelist_free_reserve(struct ceph_pagelist *pl) 98int ceph_pagelist_free_reserve(struct ceph_pagelist *pl)
102{ 99{
103 while (!list_empty(&pl->free_list)) { 100 while (!list_empty(&pl->free_list)) {
@@ -112,9 +109,7 @@ int ceph_pagelist_free_reserve(struct ceph_pagelist *pl)
112} 109}
113EXPORT_SYMBOL(ceph_pagelist_free_reserve); 110EXPORT_SYMBOL(ceph_pagelist_free_reserve);
114 111
115/** 112/* Create a truncation point. */
116 * Create a truncation point.
117 */
118void ceph_pagelist_set_cursor(struct ceph_pagelist *pl, 113void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
119 struct ceph_pagelist_cursor *c) 114 struct ceph_pagelist_cursor *c)
120{ 115{
@@ -124,8 +119,7 @@ void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
124} 119}
125EXPORT_SYMBOL(ceph_pagelist_set_cursor); 120EXPORT_SYMBOL(ceph_pagelist_set_cursor);
126 121
127/** 122/* Truncate a pagelist to the given point. Move extra pages to reserve.
128 * Truncate a pagelist to the given point. Move extra pages to reserve.
129 * This won't sleep. 123 * This won't sleep.
130 * Returns: 0 on success, 124 * Returns: 0 on success,
131 * -EINVAL if the pagelist doesn't match the trunc point pagelist 125 * -EINVAL if the pagelist doesn't match the trunc point pagelist
diff --git a/net/core/datagram.c b/net/core/datagram.c
index ae6acf6a3de..0337e2b7686 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -248,7 +248,6 @@ void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
248 unlock_sock_fast(sk, slow); 248 unlock_sock_fast(sk, slow);
249 249
250 /* skb is now orphaned, can be freed outside of locked section */ 250 /* skb is now orphaned, can be freed outside of locked section */
251 trace_kfree_skb(skb, skb_free_datagram_locked);
252 __kfree_skb(skb); 251 __kfree_skb(skb);
253} 252}
254EXPORT_SYMBOL(skb_free_datagram_locked); 253EXPORT_SYMBOL(skb_free_datagram_locked);
diff --git a/net/core/dev.c b/net/core/dev.c
index 0f28a9e0b8a..73e87c7b437 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1691,7 +1691,8 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1691 rcu_read_unlock(); 1691 rcu_read_unlock();
1692} 1692}
1693 1693
1694/* netif_setup_tc - Handle tc mappings on real_num_tx_queues change 1694/**
1695 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1695 * @dev: Network device 1696 * @dev: Network device
1696 * @txq: number of queues available 1697 * @txq: number of queues available
1697 * 1698 *
@@ -1793,6 +1794,18 @@ int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
1793EXPORT_SYMBOL(netif_set_real_num_rx_queues); 1794EXPORT_SYMBOL(netif_set_real_num_rx_queues);
1794#endif 1795#endif
1795 1796
1797/**
1798 * netif_get_num_default_rss_queues - default number of RSS queues
1799 *
1800 * This routine should set an upper limit on the number of RSS queues
1801 * used by default by multiqueue devices.
1802 */
1803int netif_get_num_default_rss_queues(void)
1804{
1805 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
1806}
1807EXPORT_SYMBOL(netif_get_num_default_rss_queues);
1808
1796static inline void __netif_reschedule(struct Qdisc *q) 1809static inline void __netif_reschedule(struct Qdisc *q)
1797{ 1810{
1798 struct softnet_data *sd; 1811 struct softnet_data *sd;
@@ -2459,6 +2472,23 @@ static DEFINE_PER_CPU(int, xmit_recursion);
2459#define RECURSION_LIMIT 10 2472#define RECURSION_LIMIT 10
2460 2473
2461/** 2474/**
2475 * dev_loopback_xmit - loop back @skb
2476 * @skb: buffer to transmit
2477 */
2478int dev_loopback_xmit(struct sk_buff *skb)
2479{
2480 skb_reset_mac_header(skb);
2481 __skb_pull(skb, skb_network_offset(skb));
2482 skb->pkt_type = PACKET_LOOPBACK;
2483 skb->ip_summed = CHECKSUM_UNNECESSARY;
2484 WARN_ON(!skb_dst(skb));
2485 skb_dst_force(skb);
2486 netif_rx_ni(skb);
2487 return 0;
2488}
2489EXPORT_SYMBOL(dev_loopback_xmit);
2490
2491/**
2462 * dev_queue_xmit - transmit a buffer 2492 * dev_queue_xmit - transmit a buffer
2463 * @skb: buffer to transmit 2493 * @skb: buffer to transmit
2464 * 2494 *
@@ -5646,7 +5676,7 @@ int netdev_refcnt_read(const struct net_device *dev)
5646} 5676}
5647EXPORT_SYMBOL(netdev_refcnt_read); 5677EXPORT_SYMBOL(netdev_refcnt_read);
5648 5678
5649/* 5679/**
5650 * netdev_wait_allrefs - wait until all references are gone. 5680 * netdev_wait_allrefs - wait until all references are gone.
5651 * 5681 *
5652 * This is called when unregistering network devices. 5682 * This is called when unregistering network devices.
diff --git a/net/core/dst.c b/net/core/dst.c
index 43d94cedbf7..07bacff84aa 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -152,7 +152,7 @@ EXPORT_SYMBOL(dst_discard);
152const u32 dst_default_metrics[RTAX_MAX]; 152const u32 dst_default_metrics[RTAX_MAX];
153 153
154void *dst_alloc(struct dst_ops *ops, struct net_device *dev, 154void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
155 int initial_ref, int initial_obsolete, int flags) 155 int initial_ref, int initial_obsolete, unsigned short flags)
156{ 156{
157 struct dst_entry *dst; 157 struct dst_entry *dst;
158 158
@@ -171,7 +171,6 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
171 dst_init_metrics(dst, dst_default_metrics, true); 171 dst_init_metrics(dst, dst_default_metrics, true);
172 dst->expires = 0UL; 172 dst->expires = 0UL;
173 dst->path = dst; 173 dst->path = dst;
174 RCU_INIT_POINTER(dst->_neighbour, NULL);
175#ifdef CONFIG_XFRM 174#ifdef CONFIG_XFRM
176 dst->xfrm = NULL; 175 dst->xfrm = NULL;
177#endif 176#endif
@@ -188,6 +187,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
188 dst->__use = 0; 187 dst->__use = 0;
189 dst->lastuse = jiffies; 188 dst->lastuse = jiffies;
190 dst->flags = flags; 189 dst->flags = flags;
190 dst->pending_confirm = 0;
191 dst->next = NULL; 191 dst->next = NULL;
192 if (!(flags & DST_NOCOUNT)) 192 if (!(flags & DST_NOCOUNT))
193 dst_entries_add(ops, 1); 193 dst_entries_add(ops, 1);
@@ -224,19 +224,12 @@ EXPORT_SYMBOL(__dst_free);
224struct dst_entry *dst_destroy(struct dst_entry * dst) 224struct dst_entry *dst_destroy(struct dst_entry * dst)
225{ 225{
226 struct dst_entry *child; 226 struct dst_entry *child;
227 struct neighbour *neigh;
228 227
229 smp_rmb(); 228 smp_rmb();
230 229
231again: 230again:
232 neigh = rcu_dereference_protected(dst->_neighbour, 1);
233 child = dst->child; 231 child = dst->child;
234 232
235 if (neigh) {
236 RCU_INIT_POINTER(dst->_neighbour, NULL);
237 neigh_release(neigh);
238 }
239
240 if (!(dst->flags & DST_NOCOUNT)) 233 if (!(dst->flags & DST_NOCOUNT))
241 dst_entries_add(dst->ops, -1); 234 dst_entries_add(dst->ops, -1);
242 235
@@ -360,19 +353,9 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
360 if (!unregister) { 353 if (!unregister) {
361 dst->input = dst->output = dst_discard; 354 dst->input = dst->output = dst_discard;
362 } else { 355 } else {
363 struct neighbour *neigh;
364
365 dst->dev = dev_net(dst->dev)->loopback_dev; 356 dst->dev = dev_net(dst->dev)->loopback_dev;
366 dev_hold(dst->dev); 357 dev_hold(dst->dev);
367 dev_put(dev); 358 dev_put(dev);
368 rcu_read_lock();
369 neigh = dst_get_neighbour_noref(dst);
370 if (neigh && neigh->dev == dev) {
371 neigh->dev = dst->dev;
372 dev_hold(dst->dev);
373 dev_put(dev);
374 }
375 rcu_read_unlock();
376 } 359 }
377} 360}
378 361
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 9c2afb48027..cbf033dcaf1 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -729,6 +729,40 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
729 return dev->ethtool_ops->set_wol(dev, &wol); 729 return dev->ethtool_ops->set_wol(dev, &wol);
730} 730}
731 731
732static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
733{
734 struct ethtool_eee edata;
735 int rc;
736
737 if (!dev->ethtool_ops->get_eee)
738 return -EOPNOTSUPP;
739
740 memset(&edata, 0, sizeof(struct ethtool_eee));
741 edata.cmd = ETHTOOL_GEEE;
742 rc = dev->ethtool_ops->get_eee(dev, &edata);
743
744 if (rc)
745 return rc;
746
747 if (copy_to_user(useraddr, &edata, sizeof(edata)))
748 return -EFAULT;
749
750 return 0;
751}
752
753static int ethtool_set_eee(struct net_device *dev, char __user *useraddr)
754{
755 struct ethtool_eee edata;
756
757 if (!dev->ethtool_ops->set_eee)
758 return -EOPNOTSUPP;
759
760 if (copy_from_user(&edata, useraddr, sizeof(edata)))
761 return -EFAULT;
762
763 return dev->ethtool_ops->set_eee(dev, &edata);
764}
765
732static int ethtool_nway_reset(struct net_device *dev) 766static int ethtool_nway_reset(struct net_device *dev)
733{ 767{
734 if (!dev->ethtool_ops->nway_reset) 768 if (!dev->ethtool_ops->nway_reset)
@@ -1409,6 +1443,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1409 case ETHTOOL_GSET: 1443 case ETHTOOL_GSET:
1410 case ETHTOOL_GDRVINFO: 1444 case ETHTOOL_GDRVINFO:
1411 case ETHTOOL_GMSGLVL: 1445 case ETHTOOL_GMSGLVL:
1446 case ETHTOOL_GLINK:
1412 case ETHTOOL_GCOALESCE: 1447 case ETHTOOL_GCOALESCE:
1413 case ETHTOOL_GRINGPARAM: 1448 case ETHTOOL_GRINGPARAM:
1414 case ETHTOOL_GPAUSEPARAM: 1449 case ETHTOOL_GPAUSEPARAM:
@@ -1417,6 +1452,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1417 case ETHTOOL_GSG: 1452 case ETHTOOL_GSG:
1418 case ETHTOOL_GSSET_INFO: 1453 case ETHTOOL_GSSET_INFO:
1419 case ETHTOOL_GSTRINGS: 1454 case ETHTOOL_GSTRINGS:
1455 case ETHTOOL_GSTATS:
1420 case ETHTOOL_GTSO: 1456 case ETHTOOL_GTSO:
1421 case ETHTOOL_GPERMADDR: 1457 case ETHTOOL_GPERMADDR:
1422 case ETHTOOL_GUFO: 1458 case ETHTOOL_GUFO:
@@ -1429,8 +1465,11 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1429 case ETHTOOL_GRXCLSRLCNT: 1465 case ETHTOOL_GRXCLSRLCNT:
1430 case ETHTOOL_GRXCLSRULE: 1466 case ETHTOOL_GRXCLSRULE:
1431 case ETHTOOL_GRXCLSRLALL: 1467 case ETHTOOL_GRXCLSRLALL:
1468 case ETHTOOL_GRXFHINDIR:
1432 case ETHTOOL_GFEATURES: 1469 case ETHTOOL_GFEATURES:
1470 case ETHTOOL_GCHANNELS:
1433 case ETHTOOL_GET_TS_INFO: 1471 case ETHTOOL_GET_TS_INFO:
1472 case ETHTOOL_GEEE:
1434 break; 1473 break;
1435 default: 1474 default:
1436 if (!capable(CAP_NET_ADMIN)) 1475 if (!capable(CAP_NET_ADMIN))
@@ -1471,6 +1510,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1471 rc = ethtool_set_value_void(dev, useraddr, 1510 rc = ethtool_set_value_void(dev, useraddr,
1472 dev->ethtool_ops->set_msglevel); 1511 dev->ethtool_ops->set_msglevel);
1473 break; 1512 break;
1513 case ETHTOOL_GEEE:
1514 rc = ethtool_get_eee(dev, useraddr);
1515 break;
1516 case ETHTOOL_SEEE:
1517 rc = ethtool_set_eee(dev, useraddr);
1518 break;
1474 case ETHTOOL_NWAY_RST: 1519 case ETHTOOL_NWAY_RST:
1475 rc = ethtool_nway_reset(dev); 1520 rc = ethtool_nway_reset(dev);
1476 break; 1521 break;
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 72cceb79d0d..ab7db83236c 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -151,6 +151,8 @@ static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
151 151
152 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) { 152 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
153 list_del_rcu(&rule->list); 153 list_del_rcu(&rule->list);
154 if (ops->delete)
155 ops->delete(rule);
154 fib_rule_put(rule); 156 fib_rule_put(rule);
155 } 157 }
156} 158}
@@ -499,6 +501,8 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
499 501
500 notify_rule_change(RTM_DELRULE, rule, ops, nlh, 502 notify_rule_change(RTM_DELRULE, rule, ops, nlh,
501 NETLINK_CB(skb).pid); 503 NETLINK_CB(skb).pid);
504 if (ops->delete)
505 ops->delete(rule);
502 fib_rule_put(rule); 506 fib_rule_put(rule);
503 flush_route_cache(ops); 507 flush_route_cache(ops);
504 rules_ops_put(ops); 508 rules_ops_put(ops);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index d81d026138f..117afaf5126 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -474,8 +474,8 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
474} 474}
475EXPORT_SYMBOL(neigh_lookup_nodev); 475EXPORT_SYMBOL(neigh_lookup_nodev);
476 476
477struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey, 477struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
478 struct net_device *dev) 478 struct net_device *dev, bool want_ref)
479{ 479{
480 u32 hash_val; 480 u32 hash_val;
481 int key_len = tbl->key_len; 481 int key_len = tbl->key_len;
@@ -535,14 +535,16 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
535 n1 = rcu_dereference_protected(n1->next, 535 n1 = rcu_dereference_protected(n1->next,
536 lockdep_is_held(&tbl->lock))) { 536 lockdep_is_held(&tbl->lock))) {
537 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) { 537 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
538 neigh_hold(n1); 538 if (want_ref)
539 neigh_hold(n1);
539 rc = n1; 540 rc = n1;
540 goto out_tbl_unlock; 541 goto out_tbl_unlock;
541 } 542 }
542 } 543 }
543 544
544 n->dead = 0; 545 n->dead = 0;
545 neigh_hold(n); 546 if (want_ref)
547 neigh_hold(n);
546 rcu_assign_pointer(n->next, 548 rcu_assign_pointer(n->next,
547 rcu_dereference_protected(nht->hash_buckets[hash_val], 549 rcu_dereference_protected(nht->hash_buckets[hash_val],
548 lockdep_is_held(&tbl->lock))); 550 lockdep_is_held(&tbl->lock)));
@@ -558,7 +560,7 @@ out_neigh_release:
558 neigh_release(n); 560 neigh_release(n);
559 goto out; 561 goto out;
560} 562}
561EXPORT_SYMBOL(neigh_create); 563EXPORT_SYMBOL(__neigh_create);
562 564
563static u32 pneigh_hash(const void *pkey, int key_len) 565static u32 pneigh_hash(const void *pkey, int key_len)
564{ 566{
@@ -1199,10 +1201,23 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1199 write_unlock_bh(&neigh->lock); 1201 write_unlock_bh(&neigh->lock);
1200 1202
1201 rcu_read_lock(); 1203 rcu_read_lock();
1202 /* On shaper/eql skb->dst->neighbour != neigh :( */ 1204
1203 if (dst && (n2 = dst_get_neighbour_noref(dst)) != NULL) 1205 /* Why not just use 'neigh' as-is? The problem is that
1204 n1 = n2; 1206 * things such as shaper, eql, and sch_teql can end up
1207 * using alternative, different, neigh objects to output
1208 * the packet in the output path. So what we need to do
1209 * here is re-lookup the top-level neigh in the path so
1210 * we can reinject the packet there.
1211 */
1212 n2 = NULL;
1213 if (dst) {
1214 n2 = dst_neigh_lookup_skb(dst, skb);
1215 if (n2)
1216 n1 = n2;
1217 }
1205 n1->output(n1, skb); 1218 n1->output(n1, skb);
1219 if (n2)
1220 neigh_release(n2);
1206 rcu_read_unlock(); 1221 rcu_read_unlock();
1207 1222
1208 write_lock_bh(&neigh->lock); 1223 write_lock_bh(&neigh->lock);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index fdf9e61d065..72607174ea5 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -417,72 +417,6 @@ static struct attribute_group netstat_group = {
417 .name = "statistics", 417 .name = "statistics",
418 .attrs = netstat_attrs, 418 .attrs = netstat_attrs,
419}; 419};
420
421#ifdef CONFIG_WIRELESS_EXT_SYSFS
422/* helper function that does all the locking etc for wireless stats */
423static ssize_t wireless_show(struct device *d, char *buf,
424 ssize_t (*format)(const struct iw_statistics *,
425 char *))
426{
427 struct net_device *dev = to_net_dev(d);
428 const struct iw_statistics *iw;
429 ssize_t ret = -EINVAL;
430
431 if (!rtnl_trylock())
432 return restart_syscall();
433 if (dev_isalive(dev)) {
434 iw = get_wireless_stats(dev);
435 if (iw)
436 ret = (*format)(iw, buf);
437 }
438 rtnl_unlock();
439
440 return ret;
441}
442
443/* show function template for wireless fields */
444#define WIRELESS_SHOW(name, field, format_string) \
445static ssize_t format_iw_##name(const struct iw_statistics *iw, char *buf) \
446{ \
447 return sprintf(buf, format_string, iw->field); \
448} \
449static ssize_t show_iw_##name(struct device *d, \
450 struct device_attribute *attr, char *buf) \
451{ \
452 return wireless_show(d, buf, format_iw_##name); \
453} \
454static DEVICE_ATTR(name, S_IRUGO, show_iw_##name, NULL)
455
456WIRELESS_SHOW(status, status, fmt_hex);
457WIRELESS_SHOW(link, qual.qual, fmt_dec);
458WIRELESS_SHOW(level, qual.level, fmt_dec);
459WIRELESS_SHOW(noise, qual.noise, fmt_dec);
460WIRELESS_SHOW(nwid, discard.nwid, fmt_dec);
461WIRELESS_SHOW(crypt, discard.code, fmt_dec);
462WIRELESS_SHOW(fragment, discard.fragment, fmt_dec);
463WIRELESS_SHOW(misc, discard.misc, fmt_dec);
464WIRELESS_SHOW(retries, discard.retries, fmt_dec);
465WIRELESS_SHOW(beacon, miss.beacon, fmt_dec);
466
467static struct attribute *wireless_attrs[] = {
468 &dev_attr_status.attr,
469 &dev_attr_link.attr,
470 &dev_attr_level.attr,
471 &dev_attr_noise.attr,
472 &dev_attr_nwid.attr,
473 &dev_attr_crypt.attr,
474 &dev_attr_fragment.attr,
475 &dev_attr_retries.attr,
476 &dev_attr_misc.attr,
477 &dev_attr_beacon.attr,
478 NULL
479};
480
481static struct attribute_group wireless_group = {
482 .name = "wireless",
483 .attrs = wireless_attrs,
484};
485#endif
486#endif /* CONFIG_SYSFS */ 420#endif /* CONFIG_SYSFS */
487 421
488#ifdef CONFIG_RPS 422#ifdef CONFIG_RPS
@@ -1463,14 +1397,6 @@ int netdev_register_kobject(struct net_device *net)
1463 groups++; 1397 groups++;
1464 1398
1465 *groups++ = &netstat_group; 1399 *groups++ = &netstat_group;
1466#ifdef CONFIG_WIRELESS_EXT_SYSFS
1467 if (net->ieee80211_ptr)
1468 *groups++ = &wireless_group;
1469#ifdef CONFIG_WIRELESS_EXT
1470 else if (net->wireless_handlers)
1471 *groups++ = &wireless_group;
1472#endif
1473#endif
1474#endif /* CONFIG_SYSFS */ 1400#endif /* CONFIG_SYSFS */
1475 1401
1476 error = device_add(dev); 1402 error = device_add(dev);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 21318d15bbc..045db8ad87c 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -541,19 +541,6 @@ static const int rta_max[RTM_NR_FAMILIES] =
541 [RTM_FAM(RTM_NEWACTION)] = TCAA_MAX, 541 [RTM_FAM(RTM_NEWACTION)] = TCAA_MAX,
542}; 542};
543 543
544void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
545{
546 struct rtattr *rta;
547 int size = RTA_LENGTH(attrlen);
548
549 rta = (struct rtattr *)skb_put(skb, RTA_ALIGN(size));
550 rta->rta_type = attrtype;
551 rta->rta_len = size;
552 memcpy(RTA_DATA(rta), data, attrlen);
553 memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size);
554}
555EXPORT_SYMBOL(__rta_fill);
556
557int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo) 544int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
558{ 545{
559 struct sock *rtnl = net->rtnl; 546 struct sock *rtnl = net->rtnl;
@@ -628,7 +615,7 @@ nla_put_failure:
628EXPORT_SYMBOL(rtnetlink_put_metrics); 615EXPORT_SYMBOL(rtnetlink_put_metrics);
629 616
630int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, 617int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
631 u32 ts, u32 tsage, long expires, u32 error) 618 long expires, u32 error)
632{ 619{
633 struct rta_cacheinfo ci = { 620 struct rta_cacheinfo ci = {
634 .rta_lastuse = jiffies_to_clock_t(jiffies - dst->lastuse), 621 .rta_lastuse = jiffies_to_clock_t(jiffies - dst->lastuse),
@@ -636,8 +623,6 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
636 .rta_clntref = atomic_read(&(dst->__refcnt)), 623 .rta_clntref = atomic_read(&(dst->__refcnt)),
637 .rta_error = error, 624 .rta_error = error,
638 .rta_id = id, 625 .rta_id = id,
639 .rta_ts = ts,
640 .rta_tsage = tsage,
641 }; 626 };
642 627
643 if (expires) 628 if (expires)
@@ -2189,7 +2174,7 @@ skip:
2189} 2174}
2190 2175
2191/** 2176/**
2192 * ndo_dflt_fdb_dump: default netdevice operation to dump an FDB table. 2177 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
2193 * @nlh: netlink message header 2178 * @nlh: netlink message header
2194 * @dev: netdevice 2179 * @dev: netdevice
2195 * 2180 *
@@ -2366,8 +2351,13 @@ static struct notifier_block rtnetlink_dev_notifier = {
2366static int __net_init rtnetlink_net_init(struct net *net) 2351static int __net_init rtnetlink_net_init(struct net *net)
2367{ 2352{
2368 struct sock *sk; 2353 struct sock *sk;
2369 sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX, 2354 struct netlink_kernel_cfg cfg = {
2370 rtnetlink_rcv, &rtnl_mutex, THIS_MODULE); 2355 .groups = RTNLGRP_MAX,
2356 .input = rtnetlink_rcv,
2357 .cb_mutex = &rtnl_mutex,
2358 };
2359
2360 sk = netlink_kernel_create(net, NETLINK_ROUTE, THIS_MODULE, &cfg);
2371 if (!sk) 2361 if (!sk)
2372 return -ENOMEM; 2362 return -ENOMEM;
2373 net->rtnl = sk; 2363 net->rtnl = sk;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 46a3d23d259..506f678e9d9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -160,8 +160,8 @@ static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
160 * @node: numa node to allocate memory on 160 * @node: numa node to allocate memory on
161 * 161 *
162 * Allocate a new &sk_buff. The returned buffer has no headroom and a 162 * Allocate a new &sk_buff. The returned buffer has no headroom and a
163 * tail room of size bytes. The object has a reference count of one. 163 * tail room of at least size bytes. The object has a reference count
164 * The return is the buffer. On a failure the return is %NULL. 164 * of one. The return is the buffer. On a failure the return is %NULL.
165 * 165 *
166 * Buffers may only be allocated from interrupts using a @gfp_mask of 166 * Buffers may only be allocated from interrupts using a @gfp_mask of
167 * %GFP_ATOMIC. 167 * %GFP_ATOMIC.
@@ -713,7 +713,8 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
713} 713}
714EXPORT_SYMBOL_GPL(skb_morph); 714EXPORT_SYMBOL_GPL(skb_morph);
715 715
716/* skb_copy_ubufs - copy userspace skb frags buffers to kernel 716/**
717 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
717 * @skb: the skb to modify 718 * @skb: the skb to modify
718 * @gfp_mask: allocation priority 719 * @gfp_mask: allocation priority
719 * 720 *
@@ -2614,7 +2615,7 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2614EXPORT_SYMBOL(skb_find_text); 2615EXPORT_SYMBOL(skb_find_text);
2615 2616
2616/** 2617/**
2617 * skb_append_datato_frags: - append the user data to a skb 2618 * skb_append_datato_frags - append the user data to a skb
2618 * @sk: sock structure 2619 * @sk: sock structure
2619 * @skb: skb structure to be appened with user data. 2620 * @skb: skb structure to be appened with user data.
2620 * @getfrag: call back function to be used for getting the user data 2621 * @getfrag: call back function to be used for getting the user data
diff --git a/net/core/sock.c b/net/core/sock.c
index 9e5b71fda6e..929bdcc2383 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1465,6 +1465,11 @@ void sock_rfree(struct sk_buff *skb)
1465} 1465}
1466EXPORT_SYMBOL(sock_rfree); 1466EXPORT_SYMBOL(sock_rfree);
1467 1467
1468void sock_edemux(struct sk_buff *skb)
1469{
1470 sock_put(skb->sk);
1471}
1472EXPORT_SYMBOL(sock_edemux);
1468 1473
1469int sock_i_uid(struct sock *sk) 1474int sock_i_uid(struct sock *sk)
1470{ 1475{
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 5fd146720f3..07a29eb34a4 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -4,7 +4,6 @@
4#include <net/netlink.h> 4#include <net/netlink.h>
5#include <net/net_namespace.h> 5#include <net/net_namespace.h>
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/rtnetlink.h>
8#include <net/sock.h> 7#include <net/sock.h>
9 8
10#include <linux/inet_diag.h> 9#include <linux/inet_diag.h>
@@ -35,9 +34,7 @@ EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
35 34
36int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype) 35int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
37{ 36{
38 __u32 *mem; 37 u32 mem[SK_MEMINFO_VARS];
39
40 mem = RTA_DATA(__RTA_PUT(skb, attrtype, SK_MEMINFO_VARS * sizeof(__u32)));
41 38
42 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk); 39 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
43 mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf; 40 mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
@@ -46,11 +43,9 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
46 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc; 43 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
47 mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued; 44 mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
48 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); 45 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
46 mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
49 47
50 return 0; 48 return nla_put(skb, attrtype, sizeof(mem), &mem);
51
52rtattr_failure:
53 return -EMSGSIZE;
54} 49}
55EXPORT_SYMBOL_GPL(sock_diag_put_meminfo); 50EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
56 51
@@ -120,7 +115,7 @@ static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h)
120static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 115static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
121{ 116{
122 int err; 117 int err;
123 struct sock_diag_req *req = NLMSG_DATA(nlh); 118 struct sock_diag_req *req = nlmsg_data(nlh);
124 const struct sock_diag_handler *hndl; 119 const struct sock_diag_handler *hndl;
125 120
126 if (nlmsg_len(nlh) < sizeof(*req)) 121 if (nlmsg_len(nlh) < sizeof(*req))
@@ -176,8 +171,12 @@ EXPORT_SYMBOL_GPL(sock_diag_nlsk);
176 171
177static int __init sock_diag_init(void) 172static int __init sock_diag_init(void)
178{ 173{
179 sock_diag_nlsk = netlink_kernel_create(&init_net, NETLINK_SOCK_DIAG, 0, 174 struct netlink_kernel_cfg cfg = {
180 sock_diag_rcv, NULL, THIS_MODULE); 175 .input = sock_diag_rcv,
176 };
177
178 sock_diag_nlsk = netlink_kernel_create(&init_net, NETLINK_SOCK_DIAG,
179 THIS_MODULE, &cfg);
181 return sock_diag_nlsk == NULL ? -ENOMEM : 0; 180 return sock_diag_nlsk == NULL ? -ENOMEM : 0;
182} 181}
183 182
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 656c7c75b19..81f2bb62dea 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -28,8 +28,7 @@
28#include <linux/module.h> 28#include <linux/module.h>
29#include <net/sock.h> 29#include <net/sock.h>
30 30
31/** 31/* Data Center Bridging (DCB) is a collection of Ethernet enhancements
32 * Data Center Bridging (DCB) is a collection of Ethernet enhancements
33 * intended to allow network traffic with differing requirements 32 * intended to allow network traffic with differing requirements
34 * (highly reliable, no drops vs. best effort vs. low latency) to operate 33 * (highly reliable, no drops vs. best effort vs. low latency) to operate
35 * and co-exist on Ethernet. Current DCB features are: 34 * and co-exist on Ethernet. Current DCB features are:
@@ -196,92 +195,66 @@ static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
196static LIST_HEAD(dcb_app_list); 195static LIST_HEAD(dcb_app_list);
197static DEFINE_SPINLOCK(dcb_lock); 196static DEFINE_SPINLOCK(dcb_lock);
198 197
199/* standard netlink reply call */ 198static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq,
200static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid, 199 u32 flags, struct nlmsghdr **nlhp)
201 u32 seq, u16 flags)
202{ 200{
203 struct sk_buff *dcbnl_skb; 201 struct sk_buff *skb;
204 struct dcbmsg *dcb; 202 struct dcbmsg *dcb;
205 struct nlmsghdr *nlh; 203 struct nlmsghdr *nlh;
206 int ret = -EINVAL;
207 204
208 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 205 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
209 if (!dcbnl_skb) 206 if (!skb)
210 return ret; 207 return NULL;
211 208
212 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, event, sizeof(*dcb), flags); 209 nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags);
210 BUG_ON(!nlh);
213 211
214 dcb = NLMSG_DATA(nlh); 212 dcb = nlmsg_data(nlh);
215 dcb->dcb_family = AF_UNSPEC; 213 dcb->dcb_family = AF_UNSPEC;
216 dcb->cmd = cmd; 214 dcb->cmd = cmd;
217 dcb->dcb_pad = 0; 215 dcb->dcb_pad = 0;
218 216
219 ret = nla_put_u8(dcbnl_skb, attr, value); 217 if (nlhp)
220 if (ret) 218 *nlhp = nlh;
221 goto err;
222 219
223 /* end the message, assign the nlmsg_len. */ 220 return skb;
224 nlmsg_end(dcbnl_skb, nlh);
225 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
226 if (ret)
227 return -EINVAL;
228
229 return 0;
230nlmsg_failure:
231err:
232 kfree_skb(dcbnl_skb);
233 return ret;
234} 221}
235 222
236static int dcbnl_getstate(struct net_device *netdev, struct nlattr **tb, 223static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh,
237 u32 pid, u32 seq, u16 flags) 224 u32 seq, struct nlattr **tb, struct sk_buff *skb)
238{ 225{
239 int ret = -EINVAL;
240
241 /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */ 226 /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
242 if (!netdev->dcbnl_ops->getstate) 227 if (!netdev->dcbnl_ops->getstate)
243 return ret; 228 return -EOPNOTSUPP;
244
245 ret = dcbnl_reply(netdev->dcbnl_ops->getstate(netdev), RTM_GETDCB,
246 DCB_CMD_GSTATE, DCB_ATTR_STATE, pid, seq, flags);
247 229
248 return ret; 230 return nla_put_u8(skb, DCB_ATTR_STATE,
231 netdev->dcbnl_ops->getstate(netdev));
249} 232}
250 233
251static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb, 234static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
252 u32 pid, u32 seq, u16 flags) 235 u32 seq, struct nlattr **tb, struct sk_buff *skb)
253{ 236{
254 struct sk_buff *dcbnl_skb;
255 struct nlmsghdr *nlh;
256 struct dcbmsg *dcb;
257 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest; 237 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
258 u8 value; 238 u8 value;
259 int ret = -EINVAL; 239 int ret;
260 int i; 240 int i;
261 int getall = 0; 241 int getall = 0;
262 242
263 if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->getpfccfg) 243 if (!tb[DCB_ATTR_PFC_CFG])
264 return ret; 244 return -EINVAL;
245
246 if (!netdev->dcbnl_ops->getpfccfg)
247 return -EOPNOTSUPP;
265 248
266 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX, 249 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
267 tb[DCB_ATTR_PFC_CFG], 250 tb[DCB_ATTR_PFC_CFG],
268 dcbnl_pfc_up_nest); 251 dcbnl_pfc_up_nest);
269 if (ret) 252 if (ret)
270 goto err_out; 253 return ret;
271
272 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
273 if (!dcbnl_skb)
274 goto err_out;
275
276 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
277
278 dcb = NLMSG_DATA(nlh);
279 dcb->dcb_family = AF_UNSPEC;
280 dcb->cmd = DCB_CMD_PFC_GCFG;
281 254
282 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PFC_CFG); 255 nest = nla_nest_start(skb, DCB_ATTR_PFC_CFG);
283 if (!nest) 256 if (!nest)
284 goto err; 257 return -EMSGSIZE;
285 258
286 if (data[DCB_PFC_UP_ATTR_ALL]) 259 if (data[DCB_PFC_UP_ATTR_ALL])
287 getall = 1; 260 getall = 1;
@@ -292,103 +265,53 @@ static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
292 265
293 netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, 266 netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
294 &value); 267 &value);
295 ret = nla_put_u8(dcbnl_skb, i, value); 268 ret = nla_put_u8(skb, i, value);
296
297 if (ret) { 269 if (ret) {
298 nla_nest_cancel(dcbnl_skb, nest); 270 nla_nest_cancel(skb, nest);
299 goto err; 271 return ret;
300 } 272 }
301 } 273 }
302 nla_nest_end(dcbnl_skb, nest); 274 nla_nest_end(skb, nest);
303
304 nlmsg_end(dcbnl_skb, nlh);
305
306 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
307 if (ret)
308 goto err_out;
309 275
310 return 0; 276 return 0;
311nlmsg_failure:
312err:
313 kfree_skb(dcbnl_skb);
314err_out:
315 return -EINVAL;
316} 277}
317 278
318static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb, 279static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
319 u32 pid, u32 seq, u16 flags) 280 u32 seq, struct nlattr **tb, struct sk_buff *skb)
320{ 281{
321 struct sk_buff *dcbnl_skb;
322 struct nlmsghdr *nlh;
323 struct dcbmsg *dcb;
324 u8 perm_addr[MAX_ADDR_LEN]; 282 u8 perm_addr[MAX_ADDR_LEN];
325 int ret = -EINVAL;
326 283
327 if (!netdev->dcbnl_ops->getpermhwaddr) 284 if (!netdev->dcbnl_ops->getpermhwaddr)
328 return ret; 285 return -EOPNOTSUPP;
329
330 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
331 if (!dcbnl_skb)
332 goto err_out;
333
334 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
335
336 dcb = NLMSG_DATA(nlh);
337 dcb->dcb_family = AF_UNSPEC;
338 dcb->cmd = DCB_CMD_GPERM_HWADDR;
339 286
340 netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); 287 netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
341 288
342 ret = nla_put(dcbnl_skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), 289 return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
343 perm_addr);
344
345 nlmsg_end(dcbnl_skb, nlh);
346
347 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
348 if (ret)
349 goto err_out;
350
351 return 0;
352
353nlmsg_failure:
354 kfree_skb(dcbnl_skb);
355err_out:
356 return -EINVAL;
357} 290}
358 291
359static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb, 292static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh,
360 u32 pid, u32 seq, u16 flags) 293 u32 seq, struct nlattr **tb, struct sk_buff *skb)
361{ 294{
362 struct sk_buff *dcbnl_skb;
363 struct nlmsghdr *nlh;
364 struct dcbmsg *dcb;
365 struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest; 295 struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
366 u8 value; 296 u8 value;
367 int ret = -EINVAL; 297 int ret;
368 int i; 298 int i;
369 int getall = 0; 299 int getall = 0;
370 300
371 if (!tb[DCB_ATTR_CAP] || !netdev->dcbnl_ops->getcap) 301 if (!tb[DCB_ATTR_CAP])
372 return ret; 302 return -EINVAL;
303
304 if (!netdev->dcbnl_ops->getcap)
305 return -EOPNOTSUPP;
373 306
374 ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP], 307 ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP],
375 dcbnl_cap_nest); 308 dcbnl_cap_nest);
376 if (ret) 309 if (ret)
377 goto err_out; 310 return ret;
378
379 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
380 if (!dcbnl_skb)
381 goto err_out;
382
383 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
384
385 dcb = NLMSG_DATA(nlh);
386 dcb->dcb_family = AF_UNSPEC;
387 dcb->cmd = DCB_CMD_GCAP;
388 311
389 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_CAP); 312 nest = nla_nest_start(skb, DCB_ATTR_CAP);
390 if (!nest) 313 if (!nest)
391 goto err; 314 return -EMSGSIZE;
392 315
393 if (data[DCB_CAP_ATTR_ALL]) 316 if (data[DCB_CAP_ATTR_ALL])
394 getall = 1; 317 getall = 1;
@@ -398,69 +321,41 @@ static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb,
398 continue; 321 continue;
399 322
400 if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) { 323 if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
401 ret = nla_put_u8(dcbnl_skb, i, value); 324 ret = nla_put_u8(skb, i, value);
402
403 if (ret) { 325 if (ret) {
404 nla_nest_cancel(dcbnl_skb, nest); 326 nla_nest_cancel(skb, nest);
405 goto err; 327 return ret;
406 } 328 }
407 } 329 }
408 } 330 }
409 nla_nest_end(dcbnl_skb, nest); 331 nla_nest_end(skb, nest);
410
411 nlmsg_end(dcbnl_skb, nlh);
412
413 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
414 if (ret)
415 goto err_out;
416 332
417 return 0; 333 return 0;
418nlmsg_failure:
419err:
420 kfree_skb(dcbnl_skb);
421err_out:
422 return -EINVAL;
423} 334}
424 335
425static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb, 336static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
426 u32 pid, u32 seq, u16 flags) 337 u32 seq, struct nlattr **tb, struct sk_buff *skb)
427{ 338{
428 struct sk_buff *dcbnl_skb;
429 struct nlmsghdr *nlh;
430 struct dcbmsg *dcb;
431 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest; 339 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
432 u8 value; 340 u8 value;
433 int ret = -EINVAL; 341 int ret;
434 int i; 342 int i;
435 int getall = 0; 343 int getall = 0;
436 344
437 if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->getnumtcs) 345 if (!tb[DCB_ATTR_NUMTCS])
438 return ret; 346 return -EINVAL;
347
348 if (!netdev->dcbnl_ops->getnumtcs)
349 return -EOPNOTSUPP;
439 350
440 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], 351 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
441 dcbnl_numtcs_nest); 352 dcbnl_numtcs_nest);
442 if (ret) { 353 if (ret)
443 ret = -EINVAL; 354 return ret;
444 goto err_out;
445 }
446
447 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
448 if (!dcbnl_skb) {
449 ret = -EINVAL;
450 goto err_out;
451 }
452
453 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
454
455 dcb = NLMSG_DATA(nlh);
456 dcb->dcb_family = AF_UNSPEC;
457 dcb->cmd = DCB_CMD_GNUMTCS;
458 355
459 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_NUMTCS); 356 nest = nla_nest_start(skb, DCB_ATTR_NUMTCS);
460 if (!nest) { 357 if (!nest)
461 ret = -EINVAL; 358 return -EMSGSIZE;
462 goto err;
463 }
464 359
465 if (data[DCB_NUMTCS_ATTR_ALL]) 360 if (data[DCB_NUMTCS_ATTR_ALL])
466 getall = 1; 361 getall = 1;
@@ -471,53 +366,37 @@ static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb,
471 366
472 ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value); 367 ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
473 if (!ret) { 368 if (!ret) {
474 ret = nla_put_u8(dcbnl_skb, i, value); 369 ret = nla_put_u8(skb, i, value);
475
476 if (ret) { 370 if (ret) {
477 nla_nest_cancel(dcbnl_skb, nest); 371 nla_nest_cancel(skb, nest);
478 ret = -EINVAL; 372 return ret;
479 goto err;
480 } 373 }
481 } else { 374 } else
482 goto err; 375 return -EINVAL;
483 }
484 }
485 nla_nest_end(dcbnl_skb, nest);
486
487 nlmsg_end(dcbnl_skb, nlh);
488
489 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
490 if (ret) {
491 ret = -EINVAL;
492 goto err_out;
493 } 376 }
377 nla_nest_end(skb, nest);
494 378
495 return 0; 379 return 0;
496nlmsg_failure:
497err:
498 kfree_skb(dcbnl_skb);
499err_out:
500 return ret;
501} 380}
502 381
503static int dcbnl_setnumtcs(struct net_device *netdev, struct nlattr **tb, 382static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
504 u32 pid, u32 seq, u16 flags) 383 u32 seq, struct nlattr **tb, struct sk_buff *skb)
505{ 384{
506 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1]; 385 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
507 int ret = -EINVAL; 386 int ret;
508 u8 value; 387 u8 value;
509 int i; 388 int i;
510 389
511 if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->setnumtcs) 390 if (!tb[DCB_ATTR_NUMTCS])
512 return ret; 391 return -EINVAL;
392
393 if (!netdev->dcbnl_ops->setnumtcs)
394 return -EOPNOTSUPP;
513 395
514 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], 396 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
515 dcbnl_numtcs_nest); 397 dcbnl_numtcs_nest);
516 398 if (ret)
517 if (ret) { 399 return ret;
518 ret = -EINVAL;
519 goto err;
520 }
521 400
522 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) { 401 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
523 if (data[i] == NULL) 402 if (data[i] == NULL)
@@ -526,84 +405,68 @@ static int dcbnl_setnumtcs(struct net_device *netdev, struct nlattr **tb,
526 value = nla_get_u8(data[i]); 405 value = nla_get_u8(data[i]);
527 406
528 ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value); 407 ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
529
530 if (ret) 408 if (ret)
531 goto operr; 409 break;
532 } 410 }
533 411
534operr: 412 return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret);
535 ret = dcbnl_reply(!!ret, RTM_SETDCB, DCB_CMD_SNUMTCS,
536 DCB_ATTR_NUMTCS, pid, seq, flags);
537
538err:
539 return ret;
540} 413}
541 414
542static int dcbnl_getpfcstate(struct net_device *netdev, struct nlattr **tb, 415static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
543 u32 pid, u32 seq, u16 flags) 416 u32 seq, struct nlattr **tb, struct sk_buff *skb)
544{ 417{
545 int ret = -EINVAL;
546
547 if (!netdev->dcbnl_ops->getpfcstate) 418 if (!netdev->dcbnl_ops->getpfcstate)
548 return ret; 419 return -EOPNOTSUPP;
549
550 ret = dcbnl_reply(netdev->dcbnl_ops->getpfcstate(netdev), RTM_GETDCB,
551 DCB_CMD_PFC_GSTATE, DCB_ATTR_PFC_STATE,
552 pid, seq, flags);
553 420
554 return ret; 421 return nla_put_u8(skb, DCB_ATTR_PFC_STATE,
422 netdev->dcbnl_ops->getpfcstate(netdev));
555} 423}
556 424
557static int dcbnl_setpfcstate(struct net_device *netdev, struct nlattr **tb, 425static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
558 u32 pid, u32 seq, u16 flags) 426 u32 seq, struct nlattr **tb, struct sk_buff *skb)
559{ 427{
560 int ret = -EINVAL;
561 u8 value; 428 u8 value;
562 429
563 if (!tb[DCB_ATTR_PFC_STATE] || !netdev->dcbnl_ops->setpfcstate) 430 if (!tb[DCB_ATTR_PFC_STATE])
564 return ret; 431 return -EINVAL;
432
433 if (!netdev->dcbnl_ops->setpfcstate)
434 return -EOPNOTSUPP;
565 435
566 value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]); 436 value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
567 437
568 netdev->dcbnl_ops->setpfcstate(netdev, value); 438 netdev->dcbnl_ops->setpfcstate(netdev, value);
569 439
570 ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SSTATE, DCB_ATTR_PFC_STATE, 440 return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0);
571 pid, seq, flags);
572
573 return ret;
574} 441}
575 442
576static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb, 443static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh,
577 u32 pid, u32 seq, u16 flags) 444 u32 seq, struct nlattr **tb, struct sk_buff *skb)
578{ 445{
579 struct sk_buff *dcbnl_skb;
580 struct nlmsghdr *nlh;
581 struct dcbmsg *dcb;
582 struct nlattr *app_nest; 446 struct nlattr *app_nest;
583 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; 447 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
584 u16 id; 448 u16 id;
585 u8 up, idtype; 449 u8 up, idtype;
586 int ret = -EINVAL; 450 int ret;
587 451
588 if (!tb[DCB_ATTR_APP]) 452 if (!tb[DCB_ATTR_APP])
589 goto out; 453 return -EINVAL;
590 454
591 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], 455 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
592 dcbnl_app_nest); 456 dcbnl_app_nest);
593 if (ret) 457 if (ret)
594 goto out; 458 return ret;
595 459
596 ret = -EINVAL;
597 /* all must be non-null */ 460 /* all must be non-null */
598 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || 461 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
599 (!app_tb[DCB_APP_ATTR_ID])) 462 (!app_tb[DCB_APP_ATTR_ID]))
600 goto out; 463 return -EINVAL;
601 464
602 /* either by eth type or by socket number */ 465 /* either by eth type or by socket number */
603 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); 466 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
604 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && 467 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
605 (idtype != DCB_APP_IDTYPE_PORTNUM)) 468 (idtype != DCB_APP_IDTYPE_PORTNUM))
606 goto out; 469 return -EINVAL;
607 470
608 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); 471 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
609 472
@@ -617,138 +480,106 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
617 up = dcb_getapp(netdev, &app); 480 up = dcb_getapp(netdev, &app);
618 } 481 }
619 482
620 /* send this back */ 483 app_nest = nla_nest_start(skb, DCB_ATTR_APP);
621 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
622 if (!dcbnl_skb)
623 goto out;
624
625 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
626 dcb = NLMSG_DATA(nlh);
627 dcb->dcb_family = AF_UNSPEC;
628 dcb->cmd = DCB_CMD_GAPP;
629
630 app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
631 if (!app_nest) 484 if (!app_nest)
632 goto out_cancel; 485 return -EMSGSIZE;
633 486
634 ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype); 487 ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype);
635 if (ret) 488 if (ret)
636 goto out_cancel; 489 goto out_cancel;
637 490
638 ret = nla_put_u16(dcbnl_skb, DCB_APP_ATTR_ID, id); 491 ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id);
639 if (ret) 492 if (ret)
640 goto out_cancel; 493 goto out_cancel;
641 494
642 ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_PRIORITY, up); 495 ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up);
643 if (ret) 496 if (ret)
644 goto out_cancel; 497 goto out_cancel;
645 498
646 nla_nest_end(dcbnl_skb, app_nest); 499 nla_nest_end(skb, app_nest);
647 nlmsg_end(dcbnl_skb, nlh);
648
649 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
650 if (ret)
651 goto nlmsg_failure;
652 500
653 goto out; 501 return 0;
654 502
655out_cancel: 503out_cancel:
656 nla_nest_cancel(dcbnl_skb, app_nest); 504 nla_nest_cancel(skb, app_nest);
657nlmsg_failure:
658 kfree_skb(dcbnl_skb);
659out:
660 return ret; 505 return ret;
661} 506}
662 507
663static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb, 508static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh,
664 u32 pid, u32 seq, u16 flags) 509 u32 seq, struct nlattr **tb, struct sk_buff *skb)
665{ 510{
666 int err, ret = -EINVAL; 511 int ret;
667 u16 id; 512 u16 id;
668 u8 up, idtype; 513 u8 up, idtype;
669 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; 514 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
670 515
671 if (!tb[DCB_ATTR_APP]) 516 if (!tb[DCB_ATTR_APP])
672 goto out; 517 return -EINVAL;
673 518
674 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], 519 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
675 dcbnl_app_nest); 520 dcbnl_app_nest);
676 if (ret) 521 if (ret)
677 goto out; 522 return ret;
678 523
679 ret = -EINVAL;
680 /* all must be non-null */ 524 /* all must be non-null */
681 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || 525 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
682 (!app_tb[DCB_APP_ATTR_ID]) || 526 (!app_tb[DCB_APP_ATTR_ID]) ||
683 (!app_tb[DCB_APP_ATTR_PRIORITY])) 527 (!app_tb[DCB_APP_ATTR_PRIORITY]))
684 goto out; 528 return -EINVAL;
685 529
686 /* either by eth type or by socket number */ 530 /* either by eth type or by socket number */
687 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); 531 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
688 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && 532 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
689 (idtype != DCB_APP_IDTYPE_PORTNUM)) 533 (idtype != DCB_APP_IDTYPE_PORTNUM))
690 goto out; 534 return -EINVAL;
691 535
692 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); 536 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
693 up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]); 537 up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
694 538
695 if (netdev->dcbnl_ops->setapp) { 539 if (netdev->dcbnl_ops->setapp) {
696 err = netdev->dcbnl_ops->setapp(netdev, idtype, id, up); 540 ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
697 } else { 541 } else {
698 struct dcb_app app; 542 struct dcb_app app;
699 app.selector = idtype; 543 app.selector = idtype;
700 app.protocol = id; 544 app.protocol = id;
701 app.priority = up; 545 app.priority = up;
702 err = dcb_setapp(netdev, &app); 546 ret = dcb_setapp(netdev, &app);
703 } 547 }
704 548
705 ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP, 549 ret = nla_put_u8(skb, DCB_ATTR_APP, ret);
706 pid, seq, flags);
707 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0); 550 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
708out: 551
709 return ret; 552 return ret;
710} 553}
711 554
712static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb, 555static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
713 u32 pid, u32 seq, u16 flags, int dir) 556 struct nlattr **tb, struct sk_buff *skb, int dir)
714{ 557{
715 struct sk_buff *dcbnl_skb;
716 struct nlmsghdr *nlh;
717 struct dcbmsg *dcb;
718 struct nlattr *pg_nest, *param_nest, *data; 558 struct nlattr *pg_nest, *param_nest, *data;
719 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1]; 559 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
720 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1]; 560 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
721 u8 prio, pgid, tc_pct, up_map; 561 u8 prio, pgid, tc_pct, up_map;
722 int ret = -EINVAL; 562 int ret;
723 int getall = 0; 563 int getall = 0;
724 int i; 564 int i;
725 565
726 if (!tb[DCB_ATTR_PG_CFG] || 566 if (!tb[DCB_ATTR_PG_CFG])
727 !netdev->dcbnl_ops->getpgtccfgtx || 567 return -EINVAL;
568
569 if (!netdev->dcbnl_ops->getpgtccfgtx ||
728 !netdev->dcbnl_ops->getpgtccfgrx || 570 !netdev->dcbnl_ops->getpgtccfgrx ||
729 !netdev->dcbnl_ops->getpgbwgcfgtx || 571 !netdev->dcbnl_ops->getpgbwgcfgtx ||
730 !netdev->dcbnl_ops->getpgbwgcfgrx) 572 !netdev->dcbnl_ops->getpgbwgcfgrx)
731 return ret; 573 return -EOPNOTSUPP;
732 574
733 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, 575 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
734 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest); 576 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
735
736 if (ret) 577 if (ret)
737 goto err_out; 578 return ret;
738
739 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
740 if (!dcbnl_skb)
741 goto err_out;
742
743 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
744
745 dcb = NLMSG_DATA(nlh);
746 dcb->dcb_family = AF_UNSPEC;
747 dcb->cmd = (dir) ? DCB_CMD_PGRX_GCFG : DCB_CMD_PGTX_GCFG;
748 579
749 pg_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PG_CFG); 580 pg_nest = nla_nest_start(skb, DCB_ATTR_PG_CFG);
750 if (!pg_nest) 581 if (!pg_nest)
751 goto err; 582 return -EMSGSIZE;
752 583
753 if (pg_tb[DCB_PG_ATTR_TC_ALL]) 584 if (pg_tb[DCB_PG_ATTR_TC_ALL])
754 getall = 1; 585 getall = 1;
@@ -766,7 +597,7 @@ static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
766 if (ret) 597 if (ret)
767 goto err_pg; 598 goto err_pg;
768 599
769 param_nest = nla_nest_start(dcbnl_skb, i); 600 param_nest = nla_nest_start(skb, i);
770 if (!param_nest) 601 if (!param_nest)
771 goto err_pg; 602 goto err_pg;
772 603
@@ -789,33 +620,33 @@ static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
789 620
790 if (param_tb[DCB_TC_ATTR_PARAM_PGID] || 621 if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
791 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 622 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
792 ret = nla_put_u8(dcbnl_skb, 623 ret = nla_put_u8(skb,
793 DCB_TC_ATTR_PARAM_PGID, pgid); 624 DCB_TC_ATTR_PARAM_PGID, pgid);
794 if (ret) 625 if (ret)
795 goto err_param; 626 goto err_param;
796 } 627 }
797 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] || 628 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
798 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 629 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
799 ret = nla_put_u8(dcbnl_skb, 630 ret = nla_put_u8(skb,
800 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map); 631 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
801 if (ret) 632 if (ret)
802 goto err_param; 633 goto err_param;
803 } 634 }
804 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] || 635 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
805 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 636 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
806 ret = nla_put_u8(dcbnl_skb, 637 ret = nla_put_u8(skb,
807 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio); 638 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
808 if (ret) 639 if (ret)
809 goto err_param; 640 goto err_param;
810 } 641 }
811 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] || 642 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
812 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 643 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
813 ret = nla_put_u8(dcbnl_skb, DCB_TC_ATTR_PARAM_BW_PCT, 644 ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT,
814 tc_pct); 645 tc_pct);
815 if (ret) 646 if (ret)
816 goto err_param; 647 goto err_param;
817 } 648 }
818 nla_nest_end(dcbnl_skb, param_nest); 649 nla_nest_end(skb, param_nest);
819 } 650 }
820 651
821 if (pg_tb[DCB_PG_ATTR_BW_ID_ALL]) 652 if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
@@ -838,80 +669,71 @@ static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
838 netdev->dcbnl_ops->getpgbwgcfgtx(netdev, 669 netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
839 i - DCB_PG_ATTR_BW_ID_0, &tc_pct); 670 i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
840 } 671 }
841 ret = nla_put_u8(dcbnl_skb, i, tc_pct); 672 ret = nla_put_u8(skb, i, tc_pct);
842
843 if (ret) 673 if (ret)
844 goto err_pg; 674 goto err_pg;
845 } 675 }
846 676
847 nla_nest_end(dcbnl_skb, pg_nest); 677 nla_nest_end(skb, pg_nest);
848
849 nlmsg_end(dcbnl_skb, nlh);
850
851 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
852 if (ret)
853 goto err_out;
854 678
855 return 0; 679 return 0;
856 680
857err_param: 681err_param:
858 nla_nest_cancel(dcbnl_skb, param_nest); 682 nla_nest_cancel(skb, param_nest);
859err_pg: 683err_pg:
860 nla_nest_cancel(dcbnl_skb, pg_nest); 684 nla_nest_cancel(skb, pg_nest);
861nlmsg_failure: 685
862err: 686 return -EMSGSIZE;
863 kfree_skb(dcbnl_skb);
864err_out:
865 ret = -EINVAL;
866 return ret;
867} 687}
868 688
869static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlattr **tb, 689static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
870 u32 pid, u32 seq, u16 flags) 690 u32 seq, struct nlattr **tb, struct sk_buff *skb)
871{ 691{
872 return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 0); 692 return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0);
873} 693}
874 694
875static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlattr **tb, 695static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
876 u32 pid, u32 seq, u16 flags) 696 u32 seq, struct nlattr **tb, struct sk_buff *skb)
877{ 697{
878 return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 1); 698 return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1);
879} 699}
880 700
881static int dcbnl_setstate(struct net_device *netdev, struct nlattr **tb, 701static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh,
882 u32 pid, u32 seq, u16 flags) 702 u32 seq, struct nlattr **tb, struct sk_buff *skb)
883{ 703{
884 int ret = -EINVAL;
885 u8 value; 704 u8 value;
886 705
887 if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->setstate) 706 if (!tb[DCB_ATTR_STATE])
888 return ret; 707 return -EINVAL;
889 708
890 value = nla_get_u8(tb[DCB_ATTR_STATE]); 709 if (!netdev->dcbnl_ops->setstate)
710 return -EOPNOTSUPP;
891 711
892 ret = dcbnl_reply(netdev->dcbnl_ops->setstate(netdev, value), 712 value = nla_get_u8(tb[DCB_ATTR_STATE]);
893 RTM_SETDCB, DCB_CMD_SSTATE, DCB_ATTR_STATE,
894 pid, seq, flags);
895 713
896 return ret; 714 return nla_put_u8(skb, DCB_ATTR_STATE,
715 netdev->dcbnl_ops->setstate(netdev, value));
897} 716}
898 717
899static int dcbnl_setpfccfg(struct net_device *netdev, struct nlattr **tb, 718static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
900 u32 pid, u32 seq, u16 flags) 719 u32 seq, struct nlattr **tb, struct sk_buff *skb)
901{ 720{
902 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1]; 721 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
903 int i; 722 int i;
904 int ret = -EINVAL; 723 int ret;
905 u8 value; 724 u8 value;
906 725
907 if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->setpfccfg) 726 if (!tb[DCB_ATTR_PFC_CFG])
908 return ret; 727 return -EINVAL;
728
729 if (!netdev->dcbnl_ops->setpfccfg)
730 return -EOPNOTSUPP;
909 731
910 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX, 732 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
911 tb[DCB_ATTR_PFC_CFG], 733 tb[DCB_ATTR_PFC_CFG],
912 dcbnl_pfc_up_nest); 734 dcbnl_pfc_up_nest);
913 if (ret) 735 if (ret)
914 goto err; 736 return ret;
915 737
916 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { 738 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
917 if (data[i] == NULL) 739 if (data[i] == NULL)
@@ -921,50 +743,53 @@ static int dcbnl_setpfccfg(struct net_device *netdev, struct nlattr **tb,
921 data[i]->nla_type - DCB_PFC_UP_ATTR_0, value); 743 data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
922 } 744 }
923 745
924 ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SCFG, DCB_ATTR_PFC_CFG, 746 return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0);
925 pid, seq, flags);
926err:
927 return ret;
928} 747}
929 748
930static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb, 749static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh,
931 u32 pid, u32 seq, u16 flags) 750 u32 seq, struct nlattr **tb, struct sk_buff *skb)
932{ 751{
933 int ret = -EINVAL; 752 int ret;
934 753
935 if (!tb[DCB_ATTR_SET_ALL] || !netdev->dcbnl_ops->setall) 754 if (!tb[DCB_ATTR_SET_ALL])
936 return ret; 755 return -EINVAL;
756
757 if (!netdev->dcbnl_ops->setall)
758 return -EOPNOTSUPP;
937 759
938 ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB, 760 ret = nla_put_u8(skb, DCB_ATTR_SET_ALL,
939 DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags); 761 netdev->dcbnl_ops->setall(netdev));
940 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0); 762 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
941 763
942 return ret; 764 return ret;
943} 765}
944 766
945static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb, 767static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
946 u32 pid, u32 seq, u16 flags, int dir) 768 u32 seq, struct nlattr **tb, struct sk_buff *skb,
769 int dir)
947{ 770{
948 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1]; 771 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
949 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1]; 772 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
950 int ret = -EINVAL; 773 int ret;
951 int i; 774 int i;
952 u8 pgid; 775 u8 pgid;
953 u8 up_map; 776 u8 up_map;
954 u8 prio; 777 u8 prio;
955 u8 tc_pct; 778 u8 tc_pct;
956 779
957 if (!tb[DCB_ATTR_PG_CFG] || 780 if (!tb[DCB_ATTR_PG_CFG])
958 !netdev->dcbnl_ops->setpgtccfgtx || 781 return -EINVAL;
782
783 if (!netdev->dcbnl_ops->setpgtccfgtx ||
959 !netdev->dcbnl_ops->setpgtccfgrx || 784 !netdev->dcbnl_ops->setpgtccfgrx ||
960 !netdev->dcbnl_ops->setpgbwgcfgtx || 785 !netdev->dcbnl_ops->setpgbwgcfgtx ||
961 !netdev->dcbnl_ops->setpgbwgcfgrx) 786 !netdev->dcbnl_ops->setpgbwgcfgrx)
962 return ret; 787 return -EOPNOTSUPP;
963 788
964 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, 789 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
965 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest); 790 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
966 if (ret) 791 if (ret)
967 goto err; 792 return ret;
968 793
969 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { 794 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
970 if (!pg_tb[i]) 795 if (!pg_tb[i])
@@ -973,7 +798,7 @@ static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb,
973 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX, 798 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
974 pg_tb[i], dcbnl_tc_param_nest); 799 pg_tb[i], dcbnl_tc_param_nest);
975 if (ret) 800 if (ret)
976 goto err; 801 return ret;
977 802
978 pgid = DCB_ATTR_VALUE_UNDEFINED; 803 pgid = DCB_ATTR_VALUE_UNDEFINED;
979 prio = DCB_ATTR_VALUE_UNDEFINED; 804 prio = DCB_ATTR_VALUE_UNDEFINED;
@@ -1026,63 +851,47 @@ static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb,
1026 } 851 }
1027 } 852 }
1028 853
1029 ret = dcbnl_reply(0, RTM_SETDCB, 854 return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0);
1030 (dir ? DCB_CMD_PGRX_SCFG : DCB_CMD_PGTX_SCFG),
1031 DCB_ATTR_PG_CFG, pid, seq, flags);
1032
1033err:
1034 return ret;
1035} 855}
1036 856
1037static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlattr **tb, 857static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1038 u32 pid, u32 seq, u16 flags) 858 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1039{ 859{
1040 return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 0); 860 return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0);
1041} 861}
1042 862
1043static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlattr **tb, 863static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1044 u32 pid, u32 seq, u16 flags) 864 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1045{ 865{
1046 return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 1); 866 return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1);
1047} 867}
1048 868
1049static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb, 869static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1050 u32 pid, u32 seq, u16 flags) 870 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1051{ 871{
1052 struct sk_buff *dcbnl_skb;
1053 struct nlmsghdr *nlh;
1054 struct dcbmsg *dcb;
1055 struct nlattr *bcn_nest; 872 struct nlattr *bcn_nest;
1056 struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1]; 873 struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
1057 u8 value_byte; 874 u8 value_byte;
1058 u32 value_integer; 875 u32 value_integer;
1059 int ret = -EINVAL; 876 int ret;
1060 bool getall = false; 877 bool getall = false;
1061 int i; 878 int i;
1062 879
1063 if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->getbcnrp || 880 if (!tb[DCB_ATTR_BCN])
881 return -EINVAL;
882
883 if (!netdev->dcbnl_ops->getbcnrp ||
1064 !netdev->dcbnl_ops->getbcncfg) 884 !netdev->dcbnl_ops->getbcncfg)
1065 return ret; 885 return -EOPNOTSUPP;
1066 886
1067 ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX, 887 ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX,
1068 tb[DCB_ATTR_BCN], dcbnl_bcn_nest); 888 tb[DCB_ATTR_BCN], dcbnl_bcn_nest);
1069
1070 if (ret) 889 if (ret)
1071 goto err_out; 890 return ret;
1072
1073 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1074 if (!dcbnl_skb)
1075 goto err_out;
1076
1077 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1078
1079 dcb = NLMSG_DATA(nlh);
1080 dcb->dcb_family = AF_UNSPEC;
1081 dcb->cmd = DCB_CMD_BCN_GCFG;
1082 891
1083 bcn_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_BCN); 892 bcn_nest = nla_nest_start(skb, DCB_ATTR_BCN);
1084 if (!bcn_nest) 893 if (!bcn_nest)
1085 goto err; 894 return -EMSGSIZE;
1086 895
1087 if (bcn_tb[DCB_BCN_ATTR_ALL]) 896 if (bcn_tb[DCB_BCN_ATTR_ALL])
1088 getall = true; 897 getall = true;
@@ -1093,7 +902,7 @@ static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
1093 902
1094 netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0, 903 netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
1095 &value_byte); 904 &value_byte);
1096 ret = nla_put_u8(dcbnl_skb, i, value_byte); 905 ret = nla_put_u8(skb, i, value_byte);
1097 if (ret) 906 if (ret)
1098 goto err_bcn; 907 goto err_bcn;
1099 } 908 }
@@ -1104,49 +913,41 @@ static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
1104 913
1105 netdev->dcbnl_ops->getbcncfg(netdev, i, 914 netdev->dcbnl_ops->getbcncfg(netdev, i,
1106 &value_integer); 915 &value_integer);
1107 ret = nla_put_u32(dcbnl_skb, i, value_integer); 916 ret = nla_put_u32(skb, i, value_integer);
1108 if (ret) 917 if (ret)
1109 goto err_bcn; 918 goto err_bcn;
1110 } 919 }
1111 920
1112 nla_nest_end(dcbnl_skb, bcn_nest); 921 nla_nest_end(skb, bcn_nest);
1113
1114 nlmsg_end(dcbnl_skb, nlh);
1115
1116 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
1117 if (ret)
1118 goto err_out;
1119 922
1120 return 0; 923 return 0;
1121 924
1122err_bcn: 925err_bcn:
1123 nla_nest_cancel(dcbnl_skb, bcn_nest); 926 nla_nest_cancel(skb, bcn_nest);
1124nlmsg_failure:
1125err:
1126 kfree_skb(dcbnl_skb);
1127err_out:
1128 ret = -EINVAL;
1129 return ret; 927 return ret;
1130} 928}
1131 929
1132static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb, 930static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1133 u32 pid, u32 seq, u16 flags) 931 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1134{ 932{
1135 struct nlattr *data[DCB_BCN_ATTR_MAX + 1]; 933 struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
1136 int i; 934 int i;
1137 int ret = -EINVAL; 935 int ret;
1138 u8 value_byte; 936 u8 value_byte;
1139 u32 value_int; 937 u32 value_int;
1140 938
1141 if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg || 939 if (!tb[DCB_ATTR_BCN])
940 return -EINVAL;
941
942 if (!netdev->dcbnl_ops->setbcncfg ||
1142 !netdev->dcbnl_ops->setbcnrp) 943 !netdev->dcbnl_ops->setbcnrp)
1143 return ret; 944 return -EOPNOTSUPP;
1144 945
1145 ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX, 946 ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX,
1146 tb[DCB_ATTR_BCN], 947 tb[DCB_ATTR_BCN],
1147 dcbnl_pfc_up_nest); 948 dcbnl_pfc_up_nest);
1148 if (ret) 949 if (ret)
1149 goto err; 950 return ret;
1150 951
1151 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) { 952 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
1152 if (data[i] == NULL) 953 if (data[i] == NULL)
@@ -1164,10 +965,7 @@ static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb,
1164 i, value_int); 965 i, value_int);
1165 } 966 }
1166 967
1167 ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_BCN_SCFG, DCB_ATTR_BCN, 968 return nla_put_u8(skb, DCB_ATTR_BCN, 0);
1168 pid, seq, flags);
1169err:
1170 return ret;
1171} 969}
1172 970
1173static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb, 971static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
@@ -1233,20 +1031,21 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1233 struct dcb_app_type *itr; 1031 struct dcb_app_type *itr;
1234 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1032 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1235 int dcbx; 1033 int dcbx;
1236 int err = -EMSGSIZE; 1034 int err;
1237 1035
1238 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name)) 1036 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1239 goto nla_put_failure; 1037 return -EMSGSIZE;
1038
1240 ieee = nla_nest_start(skb, DCB_ATTR_IEEE); 1039 ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
1241 if (!ieee) 1040 if (!ieee)
1242 goto nla_put_failure; 1041 return -EMSGSIZE;
1243 1042
1244 if (ops->ieee_getets) { 1043 if (ops->ieee_getets) {
1245 struct ieee_ets ets; 1044 struct ieee_ets ets;
1246 err = ops->ieee_getets(netdev, &ets); 1045 err = ops->ieee_getets(netdev, &ets);
1247 if (!err && 1046 if (!err &&
1248 nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) 1047 nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
1249 goto nla_put_failure; 1048 return -EMSGSIZE;
1250 } 1049 }
1251 1050
1252 if (ops->ieee_getmaxrate) { 1051 if (ops->ieee_getmaxrate) {
@@ -1256,7 +1055,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1256 err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, 1055 err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
1257 sizeof(maxrate), &maxrate); 1056 sizeof(maxrate), &maxrate);
1258 if (err) 1057 if (err)
1259 goto nla_put_failure; 1058 return -EMSGSIZE;
1260 } 1059 }
1261 } 1060 }
1262 1061
@@ -1265,12 +1064,12 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1265 err = ops->ieee_getpfc(netdev, &pfc); 1064 err = ops->ieee_getpfc(netdev, &pfc);
1266 if (!err && 1065 if (!err &&
1267 nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) 1066 nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
1268 goto nla_put_failure; 1067 return -EMSGSIZE;
1269 } 1068 }
1270 1069
1271 app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE); 1070 app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
1272 if (!app) 1071 if (!app)
1273 goto nla_put_failure; 1072 return -EMSGSIZE;
1274 1073
1275 spin_lock(&dcb_lock); 1074 spin_lock(&dcb_lock);
1276 list_for_each_entry(itr, &dcb_app_list, list) { 1075 list_for_each_entry(itr, &dcb_app_list, list) {
@@ -1279,7 +1078,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1279 &itr->app); 1078 &itr->app);
1280 if (err) { 1079 if (err) {
1281 spin_unlock(&dcb_lock); 1080 spin_unlock(&dcb_lock);
1282 goto nla_put_failure; 1081 return -EMSGSIZE;
1283 } 1082 }
1284 } 1083 }
1285 } 1084 }
@@ -1298,7 +1097,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1298 err = ops->ieee_peer_getets(netdev, &ets); 1097 err = ops->ieee_peer_getets(netdev, &ets);
1299 if (!err && 1098 if (!err &&
1300 nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) 1099 nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
1301 goto nla_put_failure; 1100 return -EMSGSIZE;
1302 } 1101 }
1303 1102
1304 if (ops->ieee_peer_getpfc) { 1103 if (ops->ieee_peer_getpfc) {
@@ -1306,7 +1105,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1306 err = ops->ieee_peer_getpfc(netdev, &pfc); 1105 err = ops->ieee_peer_getpfc(netdev, &pfc);
1307 if (!err && 1106 if (!err &&
1308 nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) 1107 nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
1309 goto nla_put_failure; 1108 return -EMSGSIZE;
1310 } 1109 }
1311 1110
1312 if (ops->peer_getappinfo && ops->peer_getapptable) { 1111 if (ops->peer_getappinfo && ops->peer_getapptable) {
@@ -1315,20 +1114,17 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1315 DCB_ATTR_IEEE_APP_UNSPEC, 1114 DCB_ATTR_IEEE_APP_UNSPEC,
1316 DCB_ATTR_IEEE_APP); 1115 DCB_ATTR_IEEE_APP);
1317 if (err) 1116 if (err)
1318 goto nla_put_failure; 1117 return -EMSGSIZE;
1319 } 1118 }
1320 1119
1321 nla_nest_end(skb, ieee); 1120 nla_nest_end(skb, ieee);
1322 if (dcbx >= 0) { 1121 if (dcbx >= 0) {
1323 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx); 1122 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1324 if (err) 1123 if (err)
1325 goto nla_put_failure; 1124 return -EMSGSIZE;
1326 } 1125 }
1327 1126
1328 return 0; 1127 return 0;
1329
1330nla_put_failure:
1331 return err;
1332} 1128}
1333 1129
1334static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev, 1130static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
@@ -1340,13 +1136,13 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1340 struct nlattr *pg = nla_nest_start(skb, i); 1136 struct nlattr *pg = nla_nest_start(skb, i);
1341 1137
1342 if (!pg) 1138 if (!pg)
1343 goto nla_put_failure; 1139 return -EMSGSIZE;
1344 1140
1345 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { 1141 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
1346 struct nlattr *tc_nest = nla_nest_start(skb, i); 1142 struct nlattr *tc_nest = nla_nest_start(skb, i);
1347 1143
1348 if (!tc_nest) 1144 if (!tc_nest)
1349 goto nla_put_failure; 1145 return -EMSGSIZE;
1350 1146
1351 pgid = DCB_ATTR_VALUE_UNDEFINED; 1147 pgid = DCB_ATTR_VALUE_UNDEFINED;
1352 prio = DCB_ATTR_VALUE_UNDEFINED; 1148 prio = DCB_ATTR_VALUE_UNDEFINED;
@@ -1364,7 +1160,7 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1364 nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) || 1160 nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
1365 nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) || 1161 nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
1366 nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct)) 1162 nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
1367 goto nla_put_failure; 1163 return -EMSGSIZE;
1368 nla_nest_end(skb, tc_nest); 1164 nla_nest_end(skb, tc_nest);
1369 } 1165 }
1370 1166
@@ -1378,13 +1174,10 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1378 ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0, 1174 ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
1379 &tc_pct); 1175 &tc_pct);
1380 if (nla_put_u8(skb, i, tc_pct)) 1176 if (nla_put_u8(skb, i, tc_pct))
1381 goto nla_put_failure; 1177 return -EMSGSIZE;
1382 } 1178 }
1383 nla_nest_end(skb, pg); 1179 nla_nest_end(skb, pg);
1384 return 0; 1180 return 0;
1385
1386nla_put_failure:
1387 return -EMSGSIZE;
1388} 1181}
1389 1182
1390static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) 1183static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
@@ -1531,27 +1324,16 @@ static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1531 struct net *net = dev_net(dev); 1324 struct net *net = dev_net(dev);
1532 struct sk_buff *skb; 1325 struct sk_buff *skb;
1533 struct nlmsghdr *nlh; 1326 struct nlmsghdr *nlh;
1534 struct dcbmsg *dcb;
1535 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops; 1327 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1536 int err; 1328 int err;
1537 1329
1538 if (!ops) 1330 if (!ops)
1539 return -EOPNOTSUPP; 1331 return -EOPNOTSUPP;
1540 1332
1541 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1333 skb = dcbnl_newmsg(event, cmd, pid, seq, 0, &nlh);
1542 if (!skb) 1334 if (!skb)
1543 return -ENOBUFS; 1335 return -ENOBUFS;
1544 1336
1545 nlh = nlmsg_put(skb, pid, 0, event, sizeof(*dcb), 0);
1546 if (nlh == NULL) {
1547 nlmsg_free(skb);
1548 return -EMSGSIZE;
1549 }
1550
1551 dcb = NLMSG_DATA(nlh);
1552 dcb->dcb_family = AF_UNSPEC;
1553 dcb->cmd = cmd;
1554
1555 if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE) 1337 if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
1556 err = dcbnl_ieee_fill(skb, dev); 1338 err = dcbnl_ieee_fill(skb, dev);
1557 else 1339 else
@@ -1559,8 +1341,7 @@ static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1559 1341
1560 if (err < 0) { 1342 if (err < 0) {
1561 /* Report error to broadcast listeners */ 1343 /* Report error to broadcast listeners */
1562 nlmsg_cancel(skb, nlh); 1344 nlmsg_free(skb);
1563 kfree_skb(skb);
1564 rtnl_set_sk_err(net, RTNLGRP_DCB, err); 1345 rtnl_set_sk_err(net, RTNLGRP_DCB, err);
1565 } else { 1346 } else {
1566 /* End nlmsg and notify broadcast listeners */ 1347 /* End nlmsg and notify broadcast listeners */
@@ -1590,15 +1371,15 @@ EXPORT_SYMBOL(dcbnl_cee_notify);
1590 * No attempt is made to reconcile the case where only part of the 1371 * No attempt is made to reconcile the case where only part of the
1591 * cmd can be completed. 1372 * cmd can be completed.
1592 */ 1373 */
1593static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb, 1374static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
1594 u32 pid, u32 seq, u16 flags) 1375 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1595{ 1376{
1596 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1377 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1597 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; 1378 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1598 int err = -EOPNOTSUPP; 1379 int err;
1599 1380
1600 if (!ops) 1381 if (!ops)
1601 return err; 1382 return -EOPNOTSUPP;
1602 1383
1603 if (!tb[DCB_ATTR_IEEE]) 1384 if (!tb[DCB_ATTR_IEEE])
1604 return -EINVAL; 1385 return -EINVAL;
@@ -1649,58 +1430,28 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
1649 } 1430 }
1650 1431
1651err: 1432err:
1652 dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE, 1433 err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
1653 pid, seq, flags);
1654 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0); 1434 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
1655 return err; 1435 return err;
1656} 1436}
1657 1437
1658static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb, 1438static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh,
1659 u32 pid, u32 seq, u16 flags) 1439 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1660{ 1440{
1661 struct net *net = dev_net(netdev);
1662 struct sk_buff *skb;
1663 struct nlmsghdr *nlh;
1664 struct dcbmsg *dcb;
1665 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1441 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1666 int err;
1667 1442
1668 if (!ops) 1443 if (!ops)
1669 return -EOPNOTSUPP; 1444 return -EOPNOTSUPP;
1670 1445
1671 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1446 return dcbnl_ieee_fill(skb, netdev);
1672 if (!skb)
1673 return -ENOBUFS;
1674
1675 nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1676 if (nlh == NULL) {
1677 nlmsg_free(skb);
1678 return -EMSGSIZE;
1679 }
1680
1681 dcb = NLMSG_DATA(nlh);
1682 dcb->dcb_family = AF_UNSPEC;
1683 dcb->cmd = DCB_CMD_IEEE_GET;
1684
1685 err = dcbnl_ieee_fill(skb, netdev);
1686
1687 if (err < 0) {
1688 nlmsg_cancel(skb, nlh);
1689 kfree_skb(skb);
1690 } else {
1691 nlmsg_end(skb, nlh);
1692 err = rtnl_unicast(skb, net, pid);
1693 }
1694
1695 return err;
1696} 1447}
1697 1448
1698static int dcbnl_ieee_del(struct net_device *netdev, struct nlattr **tb, 1449static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh,
1699 u32 pid, u32 seq, u16 flags) 1450 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1700{ 1451{
1701 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1452 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1702 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; 1453 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1703 int err = -EOPNOTSUPP; 1454 int err;
1704 1455
1705 if (!ops) 1456 if (!ops)
1706 return -EOPNOTSUPP; 1457 return -EOPNOTSUPP;
@@ -1733,32 +1484,26 @@ static int dcbnl_ieee_del(struct net_device *netdev, struct nlattr **tb,
1733 } 1484 }
1734 1485
1735err: 1486err:
1736 dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_DEL, DCB_ATTR_IEEE, 1487 err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
1737 pid, seq, flags);
1738 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0); 1488 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
1739 return err; 1489 return err;
1740} 1490}
1741 1491
1742 1492
1743/* DCBX configuration */ 1493/* DCBX configuration */
1744static int dcbnl_getdcbx(struct net_device *netdev, struct nlattr **tb, 1494static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
1745 u32 pid, u32 seq, u16 flags) 1495 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1746{ 1496{
1747 int ret;
1748
1749 if (!netdev->dcbnl_ops->getdcbx) 1497 if (!netdev->dcbnl_ops->getdcbx)
1750 return -EOPNOTSUPP; 1498 return -EOPNOTSUPP;
1751 1499
1752 ret = dcbnl_reply(netdev->dcbnl_ops->getdcbx(netdev), RTM_GETDCB, 1500 return nla_put_u8(skb, DCB_ATTR_DCBX,
1753 DCB_CMD_GDCBX, DCB_ATTR_DCBX, pid, seq, flags); 1501 netdev->dcbnl_ops->getdcbx(netdev));
1754
1755 return ret;
1756} 1502}
1757 1503
1758static int dcbnl_setdcbx(struct net_device *netdev, struct nlattr **tb, 1504static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
1759 u32 pid, u32 seq, u16 flags) 1505 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1760{ 1506{
1761 int ret;
1762 u8 value; 1507 u8 value;
1763 1508
1764 if (!netdev->dcbnl_ops->setdcbx) 1509 if (!netdev->dcbnl_ops->setdcbx)
@@ -1769,19 +1514,13 @@ static int dcbnl_setdcbx(struct net_device *netdev, struct nlattr **tb,
1769 1514
1770 value = nla_get_u8(tb[DCB_ATTR_DCBX]); 1515 value = nla_get_u8(tb[DCB_ATTR_DCBX]);
1771 1516
1772 ret = dcbnl_reply(netdev->dcbnl_ops->setdcbx(netdev, value), 1517 return nla_put_u8(skb, DCB_ATTR_DCBX,
1773 RTM_SETDCB, DCB_CMD_SDCBX, DCB_ATTR_DCBX, 1518 netdev->dcbnl_ops->setdcbx(netdev, value));
1774 pid, seq, flags);
1775
1776 return ret;
1777} 1519}
1778 1520
1779static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb, 1521static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1780 u32 pid, u32 seq, u16 flags) 1522 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1781{ 1523{
1782 struct sk_buff *dcbnl_skb;
1783 struct nlmsghdr *nlh;
1784 struct dcbmsg *dcb;
1785 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest; 1524 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
1786 u8 value; 1525 u8 value;
1787 int ret, i; 1526 int ret, i;
@@ -1796,25 +1535,11 @@ static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb,
1796 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG], 1535 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1797 dcbnl_featcfg_nest); 1536 dcbnl_featcfg_nest);
1798 if (ret) 1537 if (ret)
1799 goto err_out; 1538 return ret;
1800
1801 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1802 if (!dcbnl_skb) {
1803 ret = -ENOBUFS;
1804 goto err_out;
1805 }
1806
1807 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1808
1809 dcb = NLMSG_DATA(nlh);
1810 dcb->dcb_family = AF_UNSPEC;
1811 dcb->cmd = DCB_CMD_GFEATCFG;
1812 1539
1813 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_FEATCFG); 1540 nest = nla_nest_start(skb, DCB_ATTR_FEATCFG);
1814 if (!nest) { 1541 if (!nest)
1815 ret = -EMSGSIZE; 1542 return -EMSGSIZE;
1816 goto nla_put_failure;
1817 }
1818 1543
1819 if (data[DCB_FEATCFG_ATTR_ALL]) 1544 if (data[DCB_FEATCFG_ATTR_ALL])
1820 getall = 1; 1545 getall = 1;
@@ -1825,28 +1550,21 @@ static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb,
1825 1550
1826 ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value); 1551 ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
1827 if (!ret) 1552 if (!ret)
1828 ret = nla_put_u8(dcbnl_skb, i, value); 1553 ret = nla_put_u8(skb, i, value);
1829 1554
1830 if (ret) { 1555 if (ret) {
1831 nla_nest_cancel(dcbnl_skb, nest); 1556 nla_nest_cancel(skb, nest);
1832 goto nla_put_failure; 1557 goto nla_put_failure;
1833 } 1558 }
1834 } 1559 }
1835 nla_nest_end(dcbnl_skb, nest); 1560 nla_nest_end(skb, nest);
1836 1561
1837 nlmsg_end(dcbnl_skb, nlh);
1838
1839 return rtnl_unicast(dcbnl_skb, &init_net, pid);
1840nla_put_failure: 1562nla_put_failure:
1841 nlmsg_cancel(dcbnl_skb, nlh);
1842nlmsg_failure:
1843 kfree_skb(dcbnl_skb);
1844err_out:
1845 return ret; 1563 return ret;
1846} 1564}
1847 1565
1848static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlattr **tb, 1566static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1849 u32 pid, u32 seq, u16 flags) 1567 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1850{ 1568{
1851 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1]; 1569 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
1852 int ret, i; 1570 int ret, i;
@@ -1876,60 +1594,73 @@ static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlattr **tb,
1876 goto err; 1594 goto err;
1877 } 1595 }
1878err: 1596err:
1879 dcbnl_reply(ret, RTM_SETDCB, DCB_CMD_SFEATCFG, DCB_ATTR_FEATCFG, 1597 ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret);
1880 pid, seq, flags);
1881 1598
1882 return ret; 1599 return ret;
1883} 1600}
1884 1601
1885/* Handle CEE DCBX GET commands. */ 1602/* Handle CEE DCBX GET commands. */
1886static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb, 1603static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh,
1887 u32 pid, u32 seq, u16 flags) 1604 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1888{ 1605{
1889 struct net *net = dev_net(netdev);
1890 struct sk_buff *skb;
1891 struct nlmsghdr *nlh;
1892 struct dcbmsg *dcb;
1893 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1606 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1894 int err;
1895 1607
1896 if (!ops) 1608 if (!ops)
1897 return -EOPNOTSUPP; 1609 return -EOPNOTSUPP;
1898 1610
1899 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1611 return dcbnl_cee_fill(skb, netdev);
1900 if (!skb) 1612}
1901 return -ENOBUFS;
1902
1903 nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1904 if (nlh == NULL) {
1905 nlmsg_free(skb);
1906 return -EMSGSIZE;
1907 }
1908 1613
1909 dcb = NLMSG_DATA(nlh); 1614struct reply_func {
1910 dcb->dcb_family = AF_UNSPEC; 1615 /* reply netlink message type */
1911 dcb->cmd = DCB_CMD_CEE_GET; 1616 int type;
1912 1617
1913 err = dcbnl_cee_fill(skb, netdev); 1618 /* function to fill message contents */
1619 int (*cb)(struct net_device *, struct nlmsghdr *, u32,
1620 struct nlattr **, struct sk_buff *);
1621};
1914 1622
1915 if (err < 0) { 1623static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = {
1916 nlmsg_cancel(skb, nlh); 1624 [DCB_CMD_GSTATE] = { RTM_GETDCB, dcbnl_getstate },
1917 nlmsg_free(skb); 1625 [DCB_CMD_SSTATE] = { RTM_SETDCB, dcbnl_setstate },
1918 } else { 1626 [DCB_CMD_PFC_GCFG] = { RTM_GETDCB, dcbnl_getpfccfg },
1919 nlmsg_end(skb, nlh); 1627 [DCB_CMD_PFC_SCFG] = { RTM_SETDCB, dcbnl_setpfccfg },
1920 err = rtnl_unicast(skb, net, pid); 1628 [DCB_CMD_GPERM_HWADDR] = { RTM_GETDCB, dcbnl_getperm_hwaddr },
1921 } 1629 [DCB_CMD_GCAP] = { RTM_GETDCB, dcbnl_getcap },
1922 return err; 1630 [DCB_CMD_GNUMTCS] = { RTM_GETDCB, dcbnl_getnumtcs },
1923} 1631 [DCB_CMD_SNUMTCS] = { RTM_SETDCB, dcbnl_setnumtcs },
1632 [DCB_CMD_PFC_GSTATE] = { RTM_GETDCB, dcbnl_getpfcstate },
1633 [DCB_CMD_PFC_SSTATE] = { RTM_SETDCB, dcbnl_setpfcstate },
1634 [DCB_CMD_GAPP] = { RTM_GETDCB, dcbnl_getapp },
1635 [DCB_CMD_SAPP] = { RTM_SETDCB, dcbnl_setapp },
1636 [DCB_CMD_PGTX_GCFG] = { RTM_GETDCB, dcbnl_pgtx_getcfg },
1637 [DCB_CMD_PGTX_SCFG] = { RTM_SETDCB, dcbnl_pgtx_setcfg },
1638 [DCB_CMD_PGRX_GCFG] = { RTM_GETDCB, dcbnl_pgrx_getcfg },
1639 [DCB_CMD_PGRX_SCFG] = { RTM_SETDCB, dcbnl_pgrx_setcfg },
1640 [DCB_CMD_SET_ALL] = { RTM_SETDCB, dcbnl_setall },
1641 [DCB_CMD_BCN_GCFG] = { RTM_GETDCB, dcbnl_bcn_getcfg },
1642 [DCB_CMD_BCN_SCFG] = { RTM_SETDCB, dcbnl_bcn_setcfg },
1643 [DCB_CMD_IEEE_GET] = { RTM_GETDCB, dcbnl_ieee_get },
1644 [DCB_CMD_IEEE_SET] = { RTM_SETDCB, dcbnl_ieee_set },
1645 [DCB_CMD_IEEE_DEL] = { RTM_SETDCB, dcbnl_ieee_del },
1646 [DCB_CMD_GDCBX] = { RTM_GETDCB, dcbnl_getdcbx },
1647 [DCB_CMD_SDCBX] = { RTM_SETDCB, dcbnl_setdcbx },
1648 [DCB_CMD_GFEATCFG] = { RTM_GETDCB, dcbnl_getfeatcfg },
1649 [DCB_CMD_SFEATCFG] = { RTM_SETDCB, dcbnl_setfeatcfg },
1650 [DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get },
1651};
1924 1652
1925static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 1653static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1926{ 1654{
1927 struct net *net = sock_net(skb->sk); 1655 struct net *net = sock_net(skb->sk);
1928 struct net_device *netdev; 1656 struct net_device *netdev;
1929 struct dcbmsg *dcb = (struct dcbmsg *)NLMSG_DATA(nlh); 1657 struct dcbmsg *dcb = nlmsg_data(nlh);
1930 struct nlattr *tb[DCB_ATTR_MAX + 1]; 1658 struct nlattr *tb[DCB_ATTR_MAX + 1];
1931 u32 pid = skb ? NETLINK_CB(skb).pid : 0; 1659 u32 pid = skb ? NETLINK_CB(skb).pid : 0;
1932 int ret = -EINVAL; 1660 int ret = -EINVAL;
1661 struct sk_buff *reply_skb;
1662 struct nlmsghdr *reply_nlh = NULL;
1663 const struct reply_func *fn;
1933 1664
1934 if (!net_eq(net, &init_net)) 1665 if (!net_eq(net, &init_net))
1935 return -EINVAL; 1666 return -EINVAL;
@@ -1939,136 +1670,78 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1939 if (ret < 0) 1670 if (ret < 0)
1940 return ret; 1671 return ret;
1941 1672
1673 if (dcb->cmd > DCB_CMD_MAX)
1674 return -EINVAL;
1675
1676 /* check if a reply function has been defined for the command */
1677 fn = &reply_funcs[dcb->cmd];
1678 if (!fn->cb)
1679 return -EOPNOTSUPP;
1680
1942 if (!tb[DCB_ATTR_IFNAME]) 1681 if (!tb[DCB_ATTR_IFNAME])
1943 return -EINVAL; 1682 return -EINVAL;
1944 1683
1945 netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME])); 1684 netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME]));
1946 if (!netdev) 1685 if (!netdev)
1947 return -EINVAL; 1686 return -ENODEV;
1948 1687
1949 if (!netdev->dcbnl_ops) 1688 if (!netdev->dcbnl_ops) {
1950 goto errout; 1689 ret = -EOPNOTSUPP;
1951
1952 switch (dcb->cmd) {
1953 case DCB_CMD_GSTATE:
1954 ret = dcbnl_getstate(netdev, tb, pid, nlh->nlmsg_seq,
1955 nlh->nlmsg_flags);
1956 goto out;
1957 case DCB_CMD_PFC_GCFG:
1958 ret = dcbnl_getpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
1959 nlh->nlmsg_flags);
1960 goto out;
1961 case DCB_CMD_GPERM_HWADDR:
1962 ret = dcbnl_getperm_hwaddr(netdev, tb, pid, nlh->nlmsg_seq,
1963 nlh->nlmsg_flags);
1964 goto out;
1965 case DCB_CMD_PGTX_GCFG:
1966 ret = dcbnl_pgtx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1967 nlh->nlmsg_flags);
1968 goto out;
1969 case DCB_CMD_PGRX_GCFG:
1970 ret = dcbnl_pgrx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1971 nlh->nlmsg_flags);
1972 goto out;
1973 case DCB_CMD_BCN_GCFG:
1974 ret = dcbnl_bcn_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1975 nlh->nlmsg_flags);
1976 goto out;
1977 case DCB_CMD_SSTATE:
1978 ret = dcbnl_setstate(netdev, tb, pid, nlh->nlmsg_seq,
1979 nlh->nlmsg_flags);
1980 goto out;
1981 case DCB_CMD_PFC_SCFG:
1982 ret = dcbnl_setpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
1983 nlh->nlmsg_flags);
1984 goto out; 1690 goto out;
1691 }
1985 1692
1986 case DCB_CMD_SET_ALL: 1693 reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, pid, nlh->nlmsg_seq,
1987 ret = dcbnl_setall(netdev, tb, pid, nlh->nlmsg_seq, 1694 nlh->nlmsg_flags, &reply_nlh);
1988 nlh->nlmsg_flags); 1695 if (!reply_skb) {
1989 goto out; 1696 ret = -ENOBUFS;
1990 case DCB_CMD_PGTX_SCFG:
1991 ret = dcbnl_pgtx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1992 nlh->nlmsg_flags);
1993 goto out;
1994 case DCB_CMD_PGRX_SCFG:
1995 ret = dcbnl_pgrx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1996 nlh->nlmsg_flags);
1997 goto out;
1998 case DCB_CMD_GCAP:
1999 ret = dcbnl_getcap(netdev, tb, pid, nlh->nlmsg_seq,
2000 nlh->nlmsg_flags);
2001 goto out;
2002 case DCB_CMD_GNUMTCS:
2003 ret = dcbnl_getnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
2004 nlh->nlmsg_flags);
2005 goto out;
2006 case DCB_CMD_SNUMTCS:
2007 ret = dcbnl_setnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
2008 nlh->nlmsg_flags);
2009 goto out;
2010 case DCB_CMD_PFC_GSTATE:
2011 ret = dcbnl_getpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
2012 nlh->nlmsg_flags);
2013 goto out;
2014 case DCB_CMD_PFC_SSTATE:
2015 ret = dcbnl_setpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
2016 nlh->nlmsg_flags);
2017 goto out;
2018 case DCB_CMD_BCN_SCFG:
2019 ret = dcbnl_bcn_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
2020 nlh->nlmsg_flags);
2021 goto out;
2022 case DCB_CMD_GAPP:
2023 ret = dcbnl_getapp(netdev, tb, pid, nlh->nlmsg_seq,
2024 nlh->nlmsg_flags);
2025 goto out;
2026 case DCB_CMD_SAPP:
2027 ret = dcbnl_setapp(netdev, tb, pid, nlh->nlmsg_seq,
2028 nlh->nlmsg_flags);
2029 goto out;
2030 case DCB_CMD_IEEE_SET:
2031 ret = dcbnl_ieee_set(netdev, tb, pid, nlh->nlmsg_seq,
2032 nlh->nlmsg_flags);
2033 goto out;
2034 case DCB_CMD_IEEE_GET:
2035 ret = dcbnl_ieee_get(netdev, tb, pid, nlh->nlmsg_seq,
2036 nlh->nlmsg_flags);
2037 goto out;
2038 case DCB_CMD_IEEE_DEL:
2039 ret = dcbnl_ieee_del(netdev, tb, pid, nlh->nlmsg_seq,
2040 nlh->nlmsg_flags);
2041 goto out;
2042 case DCB_CMD_GDCBX:
2043 ret = dcbnl_getdcbx(netdev, tb, pid, nlh->nlmsg_seq,
2044 nlh->nlmsg_flags);
2045 goto out;
2046 case DCB_CMD_SDCBX:
2047 ret = dcbnl_setdcbx(netdev, tb, pid, nlh->nlmsg_seq,
2048 nlh->nlmsg_flags);
2049 goto out;
2050 case DCB_CMD_GFEATCFG:
2051 ret = dcbnl_getfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
2052 nlh->nlmsg_flags);
2053 goto out;
2054 case DCB_CMD_SFEATCFG:
2055 ret = dcbnl_setfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
2056 nlh->nlmsg_flags);
2057 goto out; 1697 goto out;
2058 case DCB_CMD_CEE_GET: 1698 }
2059 ret = dcbnl_cee_get(netdev, tb, pid, nlh->nlmsg_seq, 1699
2060 nlh->nlmsg_flags); 1700 ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
1701 if (ret < 0) {
1702 nlmsg_free(reply_skb);
2061 goto out; 1703 goto out;
2062 default:
2063 goto errout;
2064 } 1704 }
2065errout: 1705
2066 ret = -EINVAL; 1706 nlmsg_end(reply_skb, reply_nlh);
1707
1708 ret = rtnl_unicast(reply_skb, &init_net, pid);
2067out: 1709out:
2068 dev_put(netdev); 1710 dev_put(netdev);
2069 return ret; 1711 return ret;
2070} 1712}
2071 1713
1714static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
1715 int ifindex, int prio)
1716{
1717 struct dcb_app_type *itr;
1718
1719 list_for_each_entry(itr, &dcb_app_list, list) {
1720 if (itr->app.selector == app->selector &&
1721 itr->app.protocol == app->protocol &&
1722 itr->ifindex == ifindex &&
1723 (!prio || itr->app.priority == prio))
1724 return itr;
1725 }
1726
1727 return NULL;
1728}
1729
1730static int dcb_app_add(const struct dcb_app *app, int ifindex)
1731{
1732 struct dcb_app_type *entry;
1733
1734 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
1735 if (!entry)
1736 return -ENOMEM;
1737
1738 memcpy(&entry->app, app, sizeof(*app));
1739 entry->ifindex = ifindex;
1740 list_add(&entry->list, &dcb_app_list);
1741
1742 return 0;
1743}
1744
2072/** 1745/**
2073 * dcb_getapp - retrieve the DCBX application user priority 1746 * dcb_getapp - retrieve the DCBX application user priority
2074 * 1747 *
@@ -2082,14 +1755,8 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
2082 u8 prio = 0; 1755 u8 prio = 0;
2083 1756
2084 spin_lock(&dcb_lock); 1757 spin_lock(&dcb_lock);
2085 list_for_each_entry(itr, &dcb_app_list, list) { 1758 if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
2086 if (itr->app.selector == app->selector && 1759 prio = itr->app.priority;
2087 itr->app.protocol == app->protocol &&
2088 itr->ifindex == dev->ifindex) {
2089 prio = itr->app.priority;
2090 break;
2091 }
2092 }
2093 spin_unlock(&dcb_lock); 1760 spin_unlock(&dcb_lock);
2094 1761
2095 return prio; 1762 return prio;
@@ -2107,6 +1774,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
2107{ 1774{
2108 struct dcb_app_type *itr; 1775 struct dcb_app_type *itr;
2109 struct dcb_app_type event; 1776 struct dcb_app_type event;
1777 int err = 0;
2110 1778
2111 event.ifindex = dev->ifindex; 1779 event.ifindex = dev->ifindex;
2112 memcpy(&event.app, new, sizeof(event.app)); 1780 memcpy(&event.app, new, sizeof(event.app));
@@ -2115,36 +1783,23 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
2115 1783
2116 spin_lock(&dcb_lock); 1784 spin_lock(&dcb_lock);
2117 /* Search for existing match and replace */ 1785 /* Search for existing match and replace */
2118 list_for_each_entry(itr, &dcb_app_list, list) { 1786 if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
2119 if (itr->app.selector == new->selector && 1787 if (new->priority)
2120 itr->app.protocol == new->protocol && 1788 itr->app.priority = new->priority;
2121 itr->ifindex == dev->ifindex) { 1789 else {
2122 if (new->priority) 1790 list_del(&itr->list);
2123 itr->app.priority = new->priority; 1791 kfree(itr);
2124 else {
2125 list_del(&itr->list);
2126 kfree(itr);
2127 }
2128 goto out;
2129 } 1792 }
1793 goto out;
2130 } 1794 }
2131 /* App type does not exist add new application type */ 1795 /* App type does not exist add new application type */
2132 if (new->priority) { 1796 if (new->priority)
2133 struct dcb_app_type *entry; 1797 err = dcb_app_add(new, dev->ifindex);
2134 entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
2135 if (!entry) {
2136 spin_unlock(&dcb_lock);
2137 return -ENOMEM;
2138 }
2139
2140 memcpy(&entry->app, new, sizeof(*new));
2141 entry->ifindex = dev->ifindex;
2142 list_add(&entry->list, &dcb_app_list);
2143 }
2144out: 1798out:
2145 spin_unlock(&dcb_lock); 1799 spin_unlock(&dcb_lock);
2146 call_dcbevent_notifiers(DCB_APP_EVENT, &event); 1800 if (!err)
2147 return 0; 1801 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1802 return err;
2148} 1803}
2149EXPORT_SYMBOL(dcb_setapp); 1804EXPORT_SYMBOL(dcb_setapp);
2150 1805
@@ -2161,13 +1816,8 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
2161 u8 prio = 0; 1816 u8 prio = 0;
2162 1817
2163 spin_lock(&dcb_lock); 1818 spin_lock(&dcb_lock);
2164 list_for_each_entry(itr, &dcb_app_list, list) { 1819 if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
2165 if (itr->app.selector == app->selector && 1820 prio |= 1 << itr->app.priority;
2166 itr->app.protocol == app->protocol &&
2167 itr->ifindex == dev->ifindex) {
2168 prio |= 1 << itr->app.priority;
2169 }
2170 }
2171 spin_unlock(&dcb_lock); 1821 spin_unlock(&dcb_lock);
2172 1822
2173 return prio; 1823 return prio;
@@ -2183,7 +1833,6 @@ EXPORT_SYMBOL(dcb_ieee_getapp_mask);
2183 */ 1833 */
2184int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new) 1834int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
2185{ 1835{
2186 struct dcb_app_type *itr, *entry;
2187 struct dcb_app_type event; 1836 struct dcb_app_type event;
2188 int err = 0; 1837 int err = 0;
2189 1838
@@ -2194,26 +1843,12 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
2194 1843
2195 spin_lock(&dcb_lock); 1844 spin_lock(&dcb_lock);
2196 /* Search for existing match and abort if found */ 1845 /* Search for existing match and abort if found */
2197 list_for_each_entry(itr, &dcb_app_list, list) { 1846 if (dcb_app_lookup(new, dev->ifindex, new->priority)) {
2198 if (itr->app.selector == new->selector && 1847 err = -EEXIST;
2199 itr->app.protocol == new->protocol &&
2200 itr->app.priority == new->priority &&
2201 itr->ifindex == dev->ifindex) {
2202 err = -EEXIST;
2203 goto out;
2204 }
2205 }
2206
2207 /* App entry does not exist add new entry */
2208 entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
2209 if (!entry) {
2210 err = -ENOMEM;
2211 goto out; 1848 goto out;
2212 } 1849 }
2213 1850
2214 memcpy(&entry->app, new, sizeof(*new)); 1851 err = dcb_app_add(new, dev->ifindex);
2215 entry->ifindex = dev->ifindex;
2216 list_add(&entry->list, &dcb_app_list);
2217out: 1852out:
2218 spin_unlock(&dcb_lock); 1853 spin_unlock(&dcb_lock);
2219 if (!err) 1854 if (!err)
@@ -2240,19 +1875,12 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
2240 1875
2241 spin_lock(&dcb_lock); 1876 spin_lock(&dcb_lock);
2242 /* Search for existing match and remove it. */ 1877 /* Search for existing match and remove it. */
2243 list_for_each_entry(itr, &dcb_app_list, list) { 1878 if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) {
2244 if (itr->app.selector == del->selector && 1879 list_del(&itr->list);
2245 itr->app.protocol == del->protocol && 1880 kfree(itr);
2246 itr->app.priority == del->priority && 1881 err = 0;
2247 itr->ifindex == dev->ifindex) {
2248 list_del(&itr->list);
2249 kfree(itr);
2250 err = 0;
2251 goto out;
2252 }
2253 } 1882 }
2254 1883
2255out:
2256 spin_unlock(&dcb_lock); 1884 spin_unlock(&dcb_lock);
2257 if (!err) 1885 if (!err)
2258 call_dcbevent_notifiers(DCB_APP_EVENT, &event); 1886 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h
index e2ab0627a5f..a269aa7f792 100644
--- a/net/dccp/ackvec.h
+++ b/net/dccp/ackvec.h
@@ -50,7 +50,8 @@ static inline u8 dccp_ackvec_state(const u8 *cell)
50 return *cell & ~DCCPAV_MAX_RUNLEN; 50 return *cell & ~DCCPAV_MAX_RUNLEN;
51} 51}
52 52
53/** struct dccp_ackvec - Ack Vector main data structure 53/**
54 * struct dccp_ackvec - Ack Vector main data structure
54 * 55 *
55 * This implements a fixed-size circular buffer within an array and is largely 56 * This implements a fixed-size circular buffer within an array and is largely
56 * based on Appendix A of RFC 4340. 57 * based on Appendix A of RFC 4340.
@@ -76,7 +77,8 @@ struct dccp_ackvec {
76 struct list_head av_records; 77 struct list_head av_records;
77}; 78};
78 79
79/** struct dccp_ackvec_record - Records information about sent Ack Vectors 80/**
81 * struct dccp_ackvec_record - Records information about sent Ack Vectors
80 * 82 *
81 * These list entries define the additional information which the HC-Receiver 83 * These list entries define the additional information which the HC-Receiver
82 * keeps about recently-sent Ack Vectors; again refer to RFC 4340, Appendix A. 84 * keeps about recently-sent Ack Vectors; again refer to RFC 4340, Appendix A.
@@ -121,6 +123,7 @@ static inline bool dccp_ackvec_is_empty(const struct dccp_ackvec *av)
121 * @len: length of @vec 123 * @len: length of @vec
122 * @nonce: whether @vec had an ECN nonce of 0 or 1 124 * @nonce: whether @vec had an ECN nonce of 0 or 1
123 * @node: FIFO - arranged in descending order of ack_ackno 125 * @node: FIFO - arranged in descending order of ack_ackno
126 *
124 * This structure is used by CCIDs to access Ack Vectors in a received skb. 127 * This structure is used by CCIDs to access Ack Vectors in a received skb.
125 */ 128 */
126struct dccp_ackvec_parsed { 129struct dccp_ackvec_parsed {
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index 48b585a5cba..597557254dd 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -46,6 +46,7 @@ bool ccid_support_check(u8 const *ccid_array, u8 array_len)
46 * ccid_get_builtin_ccids - Populate a list of built-in CCIDs 46 * ccid_get_builtin_ccids - Populate a list of built-in CCIDs
47 * @ccid_array: pointer to copy into 47 * @ccid_array: pointer to copy into
48 * @array_len: value to return length into 48 * @array_len: value to return length into
49 *
49 * This function allocates memory - caller must see that it is freed after use. 50 * This function allocates memory - caller must see that it is freed after use.
50 */ 51 */
51int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len) 52int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len)
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 8c67bedf85b..d65e98798ec 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -113,6 +113,7 @@ static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now)
113/** 113/**
114 * ccid3_hc_tx_update_x - Update allowed sending rate X 114 * ccid3_hc_tx_update_x - Update allowed sending rate X
115 * @stamp: most recent time if available - can be left NULL. 115 * @stamp: most recent time if available - can be left NULL.
116 *
116 * This function tracks draft rfc3448bis, check there for latest details. 117 * This function tracks draft rfc3448bis, check there for latest details.
117 * 118 *
118 * Note: X and X_recv are both stored in units of 64 * bytes/second, to support 119 * Note: X and X_recv are both stored in units of 64 * bytes/second, to support
@@ -161,9 +162,11 @@ static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp)
161 } 162 }
162} 163}
163 164
164/* 165/**
165 * Track the mean packet size `s' (cf. RFC 4342, 5.3 and RFC 3448, 4.1) 166 * ccid3_hc_tx_update_s - Track the mean packet size `s'
166 * @len: DCCP packet payload size in bytes 167 * @len: DCCP packet payload size in bytes
168 *
169 * cf. RFC 4342, 5.3 and RFC 3448, 4.1
167 */ 170 */
168static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hc, int len) 171static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hc, int len)
169{ 172{
@@ -270,6 +273,7 @@ out:
270/** 273/**
271 * ccid3_hc_tx_send_packet - Delay-based dequeueing of TX packets 274 * ccid3_hc_tx_send_packet - Delay-based dequeueing of TX packets
272 * @skb: next packet candidate to send on @sk 275 * @skb: next packet candidate to send on @sk
276 *
273 * This function uses the convention of ccid_packet_dequeue_eval() and 277 * This function uses the convention of ccid_packet_dequeue_eval() and
274 * returns a millisecond-delay value between 0 and t_mbi = 64000 msec. 278 * returns a millisecond-delay value between 0 and t_mbi = 64000 msec.
275 */ 279 */
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index 497723c4d4b..57f9fd78c4d 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -133,6 +133,7 @@ static inline u8 tfrc_lh_is_new_loss(struct tfrc_loss_interval *cur,
133 * @rh: Receive history containing a fresh loss event 133 * @rh: Receive history containing a fresh loss event
134 * @calc_first_li: Caller-dependent routine to compute length of first interval 134 * @calc_first_li: Caller-dependent routine to compute length of first interval
135 * @sk: Used by @calc_first_li in caller-specific way (subtyping) 135 * @sk: Used by @calc_first_li in caller-specific way (subtyping)
136 *
136 * Updates I_mean and returns 1 if a new interval has in fact been added to @lh. 137 * Updates I_mean and returns 1 if a new interval has in fact been added to @lh.
137 */ 138 */
138int tfrc_lh_interval_add(struct tfrc_loss_hist *lh, struct tfrc_rx_hist *rh, 139int tfrc_lh_interval_add(struct tfrc_loss_hist *lh, struct tfrc_rx_hist *rh,
diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c
index de8fe294bf0..08df7a3acb3 100644
--- a/net/dccp/ccids/lib/packet_history.c
+++ b/net/dccp/ccids/lib/packet_history.c
@@ -315,6 +315,7 @@ static void __three_after_loss(struct tfrc_rx_hist *h)
315 * @ndp: The NDP count belonging to @skb 315 * @ndp: The NDP count belonging to @skb
316 * @calc_first_li: Caller-dependent computation of first loss interval in @lh 316 * @calc_first_li: Caller-dependent computation of first loss interval in @lh
317 * @sk: Used by @calc_first_li (see tfrc_lh_interval_add) 317 * @sk: Used by @calc_first_li (see tfrc_lh_interval_add)
318 *
318 * Chooses action according to pending loss, updates LI database when a new 319 * Chooses action according to pending loss, updates LI database when a new
319 * loss was detected, and does required post-processing. Returns 1 when caller 320 * loss was detected, and does required post-processing. Returns 1 when caller
320 * should send feedback, 0 otherwise. 321 * should send feedback, 0 otherwise.
@@ -387,7 +388,7 @@ static inline struct tfrc_rx_hist_entry *
387} 388}
388 389
389/** 390/**
390 * tfrc_rx_hist_rtt_prev_s: previously suitable (wrt rtt_last_s) RTT-sampling entry 391 * tfrc_rx_hist_rtt_prev_s - previously suitable (wrt rtt_last_s) RTT-sampling entry
391 */ 392 */
392static inline struct tfrc_rx_hist_entry * 393static inline struct tfrc_rx_hist_entry *
393 tfrc_rx_hist_rtt_prev_s(const struct tfrc_rx_hist *h) 394 tfrc_rx_hist_rtt_prev_s(const struct tfrc_rx_hist *h)
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c
index a052a4377e2..88ef98285be 100644
--- a/net/dccp/ccids/lib/tfrc_equation.c
+++ b/net/dccp/ccids/lib/tfrc_equation.c
@@ -611,6 +611,7 @@ static inline u32 tfrc_binsearch(u32 fval, u8 small)
611 * @s: packet size in bytes 611 * @s: packet size in bytes
612 * @R: RTT scaled by 1000000 (i.e., microseconds) 612 * @R: RTT scaled by 1000000 (i.e., microseconds)
613 * @p: loss ratio estimate scaled by 1000000 613 * @p: loss ratio estimate scaled by 1000000
614 *
614 * Returns X_calc in bytes per second (not scaled). 615 * Returns X_calc in bytes per second (not scaled).
615 */ 616 */
616u32 tfrc_calc_x(u16 s, u32 R, u32 p) 617u32 tfrc_calc_x(u16 s, u32 R, u32 p)
@@ -659,6 +660,7 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p)
659/** 660/**
660 * tfrc_calc_x_reverse_lookup - try to find p given f(p) 661 * tfrc_calc_x_reverse_lookup - try to find p given f(p)
661 * @fvalue: function value to match, scaled by 1000000 662 * @fvalue: function value to match, scaled by 1000000
663 *
662 * Returns closest match for p, also scaled by 1000000 664 * Returns closest match for p, also scaled by 1000000
663 */ 665 */
664u32 tfrc_calc_x_reverse_lookup(u32 fvalue) 666u32 tfrc_calc_x_reverse_lookup(u32 fvalue)
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 9040be049d8..708e75bf623 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -352,6 +352,7 @@ static inline int dccp_bad_service_code(const struct sock *sk,
352 * @dccpd_opt_len: total length of all options (5.8) in the packet 352 * @dccpd_opt_len: total length of all options (5.8) in the packet
353 * @dccpd_seq: sequence number 353 * @dccpd_seq: sequence number
354 * @dccpd_ack_seq: acknowledgment number subheader field value 354 * @dccpd_ack_seq: acknowledgment number subheader field value
355 *
355 * This is used for transmission as well as for reception. 356 * This is used for transmission as well as for reception.
356 */ 357 */
357struct dccp_skb_cb { 358struct dccp_skb_cb {
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 78a2ad70e1b..9733ddbc96c 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -350,6 +350,7 @@ static int __dccp_feat_activate(struct sock *sk, const int idx,
350 * @feat_num: feature to activate, one of %dccp_feature_numbers 350 * @feat_num: feature to activate, one of %dccp_feature_numbers
351 * @local: whether local (1) or remote (0) @feat_num is meant 351 * @local: whether local (1) or remote (0) @feat_num is meant
352 * @fval: the value (SP or NN) to activate, or NULL to use the default value 352 * @fval: the value (SP or NN) to activate, or NULL to use the default value
353 *
353 * For general use this function is preferable over __dccp_feat_activate(). 354 * For general use this function is preferable over __dccp_feat_activate().
354 */ 355 */
355static int dccp_feat_activate(struct sock *sk, u8 feat_num, bool local, 356static int dccp_feat_activate(struct sock *sk, u8 feat_num, bool local,
@@ -446,6 +447,7 @@ static struct dccp_feat_entry *dccp_feat_list_lookup(struct list_head *fn_list,
446 * @head: list to add to 447 * @head: list to add to
447 * @feat: feature number 448 * @feat: feature number
448 * @local: whether the local (1) or remote feature with number @feat is meant 449 * @local: whether the local (1) or remote feature with number @feat is meant
450 *
449 * This is the only constructor and serves to ensure the above invariants. 451 * This is the only constructor and serves to ensure the above invariants.
450 */ 452 */
451static struct dccp_feat_entry * 453static struct dccp_feat_entry *
@@ -504,6 +506,7 @@ static int dccp_feat_push_change(struct list_head *fn_list, u8 feat, u8 local,
504 * @feat: one of %dccp_feature_numbers 506 * @feat: one of %dccp_feature_numbers
505 * @local: whether local (1) or remote (0) @feat_num is being confirmed 507 * @local: whether local (1) or remote (0) @feat_num is being confirmed
506 * @fval: pointer to NN/SP value to be inserted or NULL 508 * @fval: pointer to NN/SP value to be inserted or NULL
509 *
507 * Returns 0 on success, a Reset code for further processing otherwise. 510 * Returns 0 on success, a Reset code for further processing otherwise.
508 */ 511 */
509static int dccp_feat_push_confirm(struct list_head *fn_list, u8 feat, u8 local, 512static int dccp_feat_push_confirm(struct list_head *fn_list, u8 feat, u8 local,
@@ -691,6 +694,7 @@ int dccp_feat_insert_opts(struct dccp_sock *dp, struct dccp_request_sock *dreq,
691 * @feat: an NN feature from %dccp_feature_numbers 694 * @feat: an NN feature from %dccp_feature_numbers
692 * @mandatory: use Mandatory option if 1 695 * @mandatory: use Mandatory option if 1
693 * @nn_val: value to register (restricted to 4 bytes) 696 * @nn_val: value to register (restricted to 4 bytes)
697 *
694 * Note that NN features are local by definition (RFC 4340, 6.3.2). 698 * Note that NN features are local by definition (RFC 4340, 6.3.2).
695 */ 699 */
696static int __feat_register_nn(struct list_head *fn, u8 feat, 700static int __feat_register_nn(struct list_head *fn, u8 feat,
@@ -760,6 +764,7 @@ int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local,
760 * dccp_feat_nn_get - Query current/pending value of NN feature 764 * dccp_feat_nn_get - Query current/pending value of NN feature
761 * @sk: DCCP socket of an established connection 765 * @sk: DCCP socket of an established connection
762 * @feat: NN feature number from %dccp_feature_numbers 766 * @feat: NN feature number from %dccp_feature_numbers
767 *
763 * For a known NN feature, returns value currently being negotiated, or 768 * For a known NN feature, returns value currently being negotiated, or
764 * current (confirmed) value if no negotiation is going on. 769 * current (confirmed) value if no negotiation is going on.
765 */ 770 */
@@ -790,6 +795,7 @@ EXPORT_SYMBOL_GPL(dccp_feat_nn_get);
790 * @sk: DCCP socket of an established connection 795 * @sk: DCCP socket of an established connection
791 * @feat: NN feature number from %dccp_feature_numbers 796 * @feat: NN feature number from %dccp_feature_numbers
792 * @nn_val: the new value to use 797 * @nn_val: the new value to use
798 *
793 * This function is used to communicate NN updates out-of-band. 799 * This function is used to communicate NN updates out-of-band.
794 */ 800 */
795int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val) 801int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val)
@@ -930,6 +936,7 @@ static const struct ccid_dependency *dccp_feat_ccid_deps(u8 ccid, bool is_local)
930 * @fn: feature-negotiation list to update 936 * @fn: feature-negotiation list to update
931 * @id: CCID number to track 937 * @id: CCID number to track
932 * @is_local: whether TX CCID (1) or RX CCID (0) is meant 938 * @is_local: whether TX CCID (1) or RX CCID (0) is meant
939 *
933 * This function needs to be called after registering all other features. 940 * This function needs to be called after registering all other features.
934 */ 941 */
935static int dccp_feat_propagate_ccid(struct list_head *fn, u8 id, bool is_local) 942static int dccp_feat_propagate_ccid(struct list_head *fn, u8 id, bool is_local)
@@ -953,6 +960,7 @@ static int dccp_feat_propagate_ccid(struct list_head *fn, u8 id, bool is_local)
953/** 960/**
954 * dccp_feat_finalise_settings - Finalise settings before starting negotiation 961 * dccp_feat_finalise_settings - Finalise settings before starting negotiation
955 * @dp: client or listening socket (settings will be inherited) 962 * @dp: client or listening socket (settings will be inherited)
963 *
956 * This is called after all registrations (socket initialisation, sysctls, and 964 * This is called after all registrations (socket initialisation, sysctls, and
957 * sockopt calls), and before sending the first packet containing Change options 965 * sockopt calls), and before sending the first packet containing Change options
958 * (ie. client-Request or server-Response), to ensure internal consistency. 966 * (ie. client-Request or server-Response), to ensure internal consistency.
@@ -1284,6 +1292,7 @@ confirmation_failed:
1284 * @feat: NN number, one of %dccp_feature_numbers 1292 * @feat: NN number, one of %dccp_feature_numbers
1285 * @val: NN value 1293 * @val: NN value
1286 * @len: length of @val in bytes 1294 * @len: length of @val in bytes
1295 *
1287 * This function combines the functionality of change_recv/confirm_recv, with 1296 * This function combines the functionality of change_recv/confirm_recv, with
1288 * the following differences (reset codes are the same): 1297 * the following differences (reset codes are the same):
1289 * - cleanup after receiving the Confirm; 1298 * - cleanup after receiving the Confirm;
@@ -1379,6 +1388,7 @@ fast_path_failed:
1379 * @feat: one of %dccp_feature_numbers 1388 * @feat: one of %dccp_feature_numbers
1380 * @val: value contents of @opt 1389 * @val: value contents of @opt
1381 * @len: length of @val in bytes 1390 * @len: length of @val in bytes
1391 *
1382 * Returns 0 on success, a Reset code for ending the connection otherwise. 1392 * Returns 0 on success, a Reset code for ending the connection otherwise.
1383 */ 1393 */
1384int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq, 1394int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
diff --git a/net/dccp/input.c b/net/dccp/input.c
index bc93a333931..14cdafad7a9 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -710,6 +710,7 @@ EXPORT_SYMBOL_GPL(dccp_rcv_state_process);
710/** 710/**
711 * dccp_sample_rtt - Validate and finalise computation of RTT sample 711 * dccp_sample_rtt - Validate and finalise computation of RTT sample
712 * @delta: number of microseconds between packet and acknowledgment 712 * @delta: number of microseconds between packet and acknowledgment
713 *
713 * The routine is kept generic to work in different contexts. It should be 714 * The routine is kept generic to work in different contexts. It should be
714 * called immediately when the ACK used for the RTT sample arrives. 715 * called immediately when the ACK used for the RTT sample arrives.
715 */ 716 */
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 07f5579ca75..3eb76b5f221 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -504,7 +504,7 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
504 struct dst_entry *dst; 504 struct dst_entry *dst;
505 struct flowi4 fl4; 505 struct flowi4 fl4;
506 506
507 dst = inet_csk_route_req(sk, &fl4, req); 507 dst = inet_csk_route_req(sk, &fl4, req, false);
508 if (dst == NULL) 508 if (dst == NULL)
509 goto out; 509 goto out;
510 510
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index fa9512d86f3..02162cfa504 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -165,6 +165,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
165 } else 165 } else
166 dst_hold(dst); 166 dst_hold(dst);
167 167
168 dst->ops->update_pmtu(dst, ntohl(info));
169
168 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { 170 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
169 dccp_sync_mss(sk, dst_mtu(dst)); 171 dccp_sync_mss(sk, dst_mtu(dst));
170 } /* else let the usual retransmit timer handle it */ 172 } /* else let the usual retransmit timer handle it */
@@ -237,7 +239,6 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
237 struct inet6_request_sock *ireq6 = inet6_rsk(req); 239 struct inet6_request_sock *ireq6 = inet6_rsk(req);
238 struct ipv6_pinfo *np = inet6_sk(sk); 240 struct ipv6_pinfo *np = inet6_sk(sk);
239 struct sk_buff *skb; 241 struct sk_buff *skb;
240 struct ipv6_txoptions *opt = NULL;
241 struct in6_addr *final_p, final; 242 struct in6_addr *final_p, final;
242 struct flowi6 fl6; 243 struct flowi6 fl6;
243 int err = -1; 244 int err = -1;
@@ -253,9 +254,8 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
253 fl6.fl6_sport = inet_rsk(req)->loc_port; 254 fl6.fl6_sport = inet_rsk(req)->loc_port;
254 security_req_classify_flow(req, flowi6_to_flowi(&fl6)); 255 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
255 256
256 opt = np->opt;
257 257
258 final_p = fl6_update_dst(&fl6, opt, &final); 258 final_p = fl6_update_dst(&fl6, np->opt, &final);
259 259
260 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false); 260 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
261 if (IS_ERR(dst)) { 261 if (IS_ERR(dst)) {
@@ -272,13 +272,11 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
272 &ireq6->loc_addr, 272 &ireq6->loc_addr,
273 &ireq6->rmt_addr); 273 &ireq6->rmt_addr);
274 fl6.daddr = ireq6->rmt_addr; 274 fl6.daddr = ireq6->rmt_addr;
275 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); 275 err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
276 err = net_xmit_eval(err); 276 err = net_xmit_eval(err);
277 } 277 }
278 278
279done: 279done:
280 if (opt != NULL && opt != np->opt)
281 sock_kfree_s(sk, opt, opt->tot_len);
282 dst_release(dst); 280 dst_release(dst);
283 return err; 281 return err;
284} 282}
@@ -473,7 +471,6 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
473 struct inet_sock *newinet; 471 struct inet_sock *newinet;
474 struct dccp6_sock *newdp6; 472 struct dccp6_sock *newdp6;
475 struct sock *newsk; 473 struct sock *newsk;
476 struct ipv6_txoptions *opt;
477 474
478 if (skb->protocol == htons(ETH_P_IP)) { 475 if (skb->protocol == htons(ETH_P_IP)) {
479 /* 476 /*
@@ -518,7 +515,6 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
518 return newsk; 515 return newsk;
519 } 516 }
520 517
521 opt = np->opt;
522 518
523 if (sk_acceptq_is_full(sk)) 519 if (sk_acceptq_is_full(sk))
524 goto out_overflow; 520 goto out_overflow;
@@ -530,7 +526,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
530 memset(&fl6, 0, sizeof(fl6)); 526 memset(&fl6, 0, sizeof(fl6));
531 fl6.flowi6_proto = IPPROTO_DCCP; 527 fl6.flowi6_proto = IPPROTO_DCCP;
532 fl6.daddr = ireq6->rmt_addr; 528 fl6.daddr = ireq6->rmt_addr;
533 final_p = fl6_update_dst(&fl6, opt, &final); 529 final_p = fl6_update_dst(&fl6, np->opt, &final);
534 fl6.saddr = ireq6->loc_addr; 530 fl6.saddr = ireq6->loc_addr;
535 fl6.flowi6_oif = sk->sk_bound_dev_if; 531 fl6.flowi6_oif = sk->sk_bound_dev_if;
536 fl6.fl6_dport = inet_rsk(req)->rmt_port; 532 fl6.fl6_dport = inet_rsk(req)->rmt_port;
@@ -595,11 +591,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
595 * Yes, keeping reference count would be much more clever, but we make 591 * Yes, keeping reference count would be much more clever, but we make
596 * one more one thing there: reattach optmem to newsk. 592 * one more one thing there: reattach optmem to newsk.
597 */ 593 */
598 if (opt != NULL) { 594 if (np->opt != NULL)
599 newnp->opt = ipv6_dup_options(newsk, opt); 595 newnp->opt = ipv6_dup_options(newsk, np->opt);
600 if (opt != np->opt)
601 sock_kfree_s(sk, opt, opt->tot_len);
602 }
603 596
604 inet_csk(newsk)->icsk_ext_hdr_len = 0; 597 inet_csk(newsk)->icsk_ext_hdr_len = 0;
605 if (newnp->opt != NULL) 598 if (newnp->opt != NULL)
@@ -625,8 +618,6 @@ out_nonewsk:
625 dst_release(dst); 618 dst_release(dst);
626out: 619out:
627 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 620 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
628 if (opt != NULL && opt != np->opt)
629 sock_kfree_s(sk, opt, opt->tot_len);
630 return NULL; 621 return NULL;
631} 622}
632 623
diff --git a/net/dccp/options.c b/net/dccp/options.c
index 68fa6b7a3e0..a58e0b63405 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -527,6 +527,7 @@ int dccp_insert_option_mandatory(struct sk_buff *skb)
527 * @val: NN value or SP array (preferred element first) to copy 527 * @val: NN value or SP array (preferred element first) to copy
528 * @len: true length of @val in bytes (excluding first element repetition) 528 * @len: true length of @val in bytes (excluding first element repetition)
529 * @repeat_first: whether to copy the first element of @val twice 529 * @repeat_first: whether to copy the first element of @val twice
530 *
530 * The last argument is used to construct Confirm options, where the preferred 531 * The last argument is used to construct Confirm options, where the preferred
531 * value and the preference list appear separately (RFC 4340, 6.3.1). Preference 532 * value and the preference list appear separately (RFC 4340, 6.3.1). Preference
532 * lists are kept such that the preferred entry is always first, so we only need 533 * lists are kept such that the preferred entry is always first, so we only need
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 78736730879..d17fc90a74b 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -214,6 +214,7 @@ void dccp_write_space(struct sock *sk)
214 * dccp_wait_for_ccid - Await CCID send permission 214 * dccp_wait_for_ccid - Await CCID send permission
215 * @sk: socket to wait for 215 * @sk: socket to wait for
216 * @delay: timeout in jiffies 216 * @delay: timeout in jiffies
217 *
217 * This is used by CCIDs which need to delay the send time in process context. 218 * This is used by CCIDs which need to delay the send time in process context.
218 */ 219 */
219static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay) 220static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 7eaf9879972..102d6106a94 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -505,6 +505,14 @@ static int dn_fib_check_attr(struct rtmsg *r, struct rtattr **rta)
505 return 0; 505 return 0;
506} 506}
507 507
508static inline u32 rtm_get_table(struct rtattr **rta, u8 table)
509{
510 if (rta[RTA_TABLE - 1])
511 table = nla_get_u32((struct nlattr *) rta[RTA_TABLE - 1]);
512
513 return table;
514}
515
508static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 516static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
509{ 517{
510 struct net *net = sock_net(skb->sk); 518 struct net *net = sock_net(skb->sk);
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index ac90f658586..3aede1b459f 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -202,7 +202,7 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
202{ 202{
203 struct dst_entry *dst = skb_dst(skb); 203 struct dst_entry *dst = skb_dst(skb);
204 struct dn_route *rt = (struct dn_route *)dst; 204 struct dn_route *rt = (struct dn_route *)dst;
205 struct neighbour *neigh = dst_get_neighbour_noref(dst); 205 struct neighbour *neigh = rt->n;
206 struct net_device *dev = neigh->dev; 206 struct net_device *dev = neigh->dev;
207 char mac_addr[ETH_ALEN]; 207 char mac_addr[ETH_ALEN];
208 unsigned int seq; 208 unsigned int seq;
@@ -240,7 +240,7 @@ static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb)
240 kfree_skb(skb); 240 kfree_skb(skb);
241 return -ENOBUFS; 241 return -ENOBUFS;
242 } 242 }
243 kfree_skb(skb); 243 consume_skb(skb);
244 skb = skb2; 244 skb = skb2;
245 net_info_ratelimited("dn_long_output: Increasing headroom\n"); 245 net_info_ratelimited("dn_long_output: Increasing headroom\n");
246 } 246 }
@@ -283,7 +283,7 @@ static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb)
283 kfree_skb(skb); 283 kfree_skb(skb);
284 return -ENOBUFS; 284 return -ENOBUFS;
285 } 285 }
286 kfree_skb(skb); 286 consume_skb(skb);
287 skb = skb2; 287 skb = skb2;
288 net_info_ratelimited("dn_short_output: Increasing headroom\n"); 288 net_info_ratelimited("dn_short_output: Increasing headroom\n");
289 } 289 }
@@ -322,7 +322,7 @@ static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb)
322 kfree_skb(skb); 322 kfree_skb(skb);
323 return -ENOBUFS; 323 return -ENOBUFS;
324 } 324 }
325 kfree_skb(skb); 325 consume_skb(skb);
326 skb = skb2; 326 skb = skb2;
327 net_info_ratelimited("dn_phase3_output: Increasing headroom\n"); 327 net_info_ratelimited("dn_phase3_output: Increasing headroom\n");
328 } 328 }
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index 564a6ad13ce..8a96047c7c9 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -322,7 +322,7 @@ static __le16 *dn_mk_ack_header(struct sock *sk, struct sk_buff *skb, unsigned c
322 /* Set "cross subchannel" bit in ackcrs */ 322 /* Set "cross subchannel" bit in ackcrs */
323 ackcrs |= 0x2000; 323 ackcrs |= 0x2000;
324 324
325 ptr = (__le16 *)dn_mk_common_header(scp, skb, msgflag, hlen); 325 ptr = dn_mk_common_header(scp, skb, msgflag, hlen);
326 326
327 *ptr++ = cpu_to_le16(acknum); 327 *ptr++ = cpu_to_le16(acknum);
328 *ptr++ = cpu_to_le16(ackcrs); 328 *ptr++ = cpu_to_le16(ackcrs);
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 586302e557a..b5594cc73ee 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -114,10 +114,13 @@ static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
114static unsigned int dn_dst_default_advmss(const struct dst_entry *dst); 114static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
115static unsigned int dn_dst_mtu(const struct dst_entry *dst); 115static unsigned int dn_dst_mtu(const struct dst_entry *dst);
116static void dn_dst_destroy(struct dst_entry *); 116static void dn_dst_destroy(struct dst_entry *);
117static void dn_dst_ifdown(struct dst_entry *, struct net_device *dev, int how);
117static struct dst_entry *dn_dst_negative_advice(struct dst_entry *); 118static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
118static void dn_dst_link_failure(struct sk_buff *); 119static void dn_dst_link_failure(struct sk_buff *);
119static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu); 120static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu);
120static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr); 121static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
122 struct sk_buff *skb,
123 const void *daddr);
121static int dn_route_input(struct sk_buff *); 124static int dn_route_input(struct sk_buff *);
122static void dn_run_flush(unsigned long dummy); 125static void dn_run_flush(unsigned long dummy);
123 126
@@ -138,6 +141,7 @@ static struct dst_ops dn_dst_ops = {
138 .mtu = dn_dst_mtu, 141 .mtu = dn_dst_mtu,
139 .cow_metrics = dst_cow_metrics_generic, 142 .cow_metrics = dst_cow_metrics_generic,
140 .destroy = dn_dst_destroy, 143 .destroy = dn_dst_destroy,
144 .ifdown = dn_dst_ifdown,
141 .negative_advice = dn_dst_negative_advice, 145 .negative_advice = dn_dst_negative_advice,
142 .link_failure = dn_dst_link_failure, 146 .link_failure = dn_dst_link_failure,
143 .update_pmtu = dn_dst_update_pmtu, 147 .update_pmtu = dn_dst_update_pmtu,
@@ -146,9 +150,27 @@ static struct dst_ops dn_dst_ops = {
146 150
147static void dn_dst_destroy(struct dst_entry *dst) 151static void dn_dst_destroy(struct dst_entry *dst)
148{ 152{
153 struct dn_route *rt = (struct dn_route *) dst;
154
155 if (rt->n)
156 neigh_release(rt->n);
149 dst_destroy_metrics_generic(dst); 157 dst_destroy_metrics_generic(dst);
150} 158}
151 159
160static void dn_dst_ifdown(struct dst_entry *dst, struct net_device *dev, int how)
161{
162 if (how) {
163 struct dn_route *rt = (struct dn_route *) dst;
164 struct neighbour *n = rt->n;
165
166 if (n && n->dev == dev) {
167 n->dev = dev_net(dev)->loopback_dev;
168 dev_hold(n->dev);
169 dev_put(dev);
170 }
171 }
172}
173
152static __inline__ unsigned int dn_hash(__le16 src, __le16 dst) 174static __inline__ unsigned int dn_hash(__le16 src, __le16 dst)
153{ 175{
154 __u16 tmp = (__u16 __force)(src ^ dst); 176 __u16 tmp = (__u16 __force)(src ^ dst);
@@ -244,7 +266,8 @@ static int dn_dst_gc(struct dst_ops *ops)
244 */ 266 */
245static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu) 267static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
246{ 268{
247 struct neighbour *n = dst_get_neighbour_noref(dst); 269 struct dn_route *rt = (struct dn_route *) dst;
270 struct neighbour *n = rt->n;
248 u32 min_mtu = 230; 271 u32 min_mtu = 230;
249 struct dn_dev *dn; 272 struct dn_dev *dn;
250 273
@@ -713,7 +736,8 @@ out:
713static int dn_to_neigh_output(struct sk_buff *skb) 736static int dn_to_neigh_output(struct sk_buff *skb)
714{ 737{
715 struct dst_entry *dst = skb_dst(skb); 738 struct dst_entry *dst = skb_dst(skb);
716 struct neighbour *n = dst_get_neighbour_noref(dst); 739 struct dn_route *rt = (struct dn_route *) dst;
740 struct neighbour *n = rt->n;
717 741
718 return n->output(n, skb); 742 return n->output(n, skb);
719} 743}
@@ -727,7 +751,7 @@ static int dn_output(struct sk_buff *skb)
727 751
728 int err = -EINVAL; 752 int err = -EINVAL;
729 753
730 if (dst_get_neighbour_noref(dst) == NULL) 754 if (rt->n == NULL)
731 goto error; 755 goto error;
732 756
733 skb->dev = dev; 757 skb->dev = dev;
@@ -828,7 +852,9 @@ static unsigned int dn_dst_mtu(const struct dst_entry *dst)
828 return mtu ? : dst->dev->mtu; 852 return mtu ? : dst->dev->mtu;
829} 853}
830 854
831static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) 855static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
856 struct sk_buff *skb,
857 const void *daddr)
832{ 858{
833 return __neigh_lookup_errno(&dn_neigh_table, daddr, dst->dev); 859 return __neigh_lookup_errno(&dn_neigh_table, daddr, dst->dev);
834} 860}
@@ -848,11 +874,11 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
848 } 874 }
849 rt->rt_type = res->type; 875 rt->rt_type = res->type;
850 876
851 if (dev != NULL && dst_get_neighbour_noref(&rt->dst) == NULL) { 877 if (dev != NULL && rt->n == NULL) {
852 n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev); 878 n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
853 if (IS_ERR(n)) 879 if (IS_ERR(n))
854 return PTR_ERR(n); 880 return PTR_ERR(n);
855 dst_set_neighbour(&rt->dst, n); 881 rt->n = n;
856 } 882 }
857 883
858 if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu) 884 if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
@@ -1159,7 +1185,7 @@ make_route:
1159 rt->rt_dst_map = fld.daddr; 1185 rt->rt_dst_map = fld.daddr;
1160 rt->rt_src_map = fld.saddr; 1186 rt->rt_src_map = fld.saddr;
1161 1187
1162 dst_set_neighbour(&rt->dst, neigh); 1188 rt->n = neigh;
1163 neigh = NULL; 1189 neigh = NULL;
1164 1190
1165 rt->dst.lastuse = jiffies; 1191 rt->dst.lastuse = jiffies;
@@ -1429,7 +1455,7 @@ make_route:
1429 rt->fld.flowidn_iif = in_dev->ifindex; 1455 rt->fld.flowidn_iif = in_dev->ifindex;
1430 rt->fld.flowidn_mark = fld.flowidn_mark; 1456 rt->fld.flowidn_mark = fld.flowidn_mark;
1431 1457
1432 dst_set_neighbour(&rt->dst, neigh); 1458 rt->n = neigh;
1433 rt->dst.lastuse = jiffies; 1459 rt->dst.lastuse = jiffies;
1434 rt->dst.output = dn_rt_bug; 1460 rt->dst.output = dn_rt_bug;
1435 switch (res.type) { 1461 switch (res.type) {
@@ -1515,54 +1541,68 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1515 struct dn_route *rt = (struct dn_route *)skb_dst(skb); 1541 struct dn_route *rt = (struct dn_route *)skb_dst(skb);
1516 struct rtmsg *r; 1542 struct rtmsg *r;
1517 struct nlmsghdr *nlh; 1543 struct nlmsghdr *nlh;
1518 unsigned char *b = skb_tail_pointer(skb);
1519 long expires; 1544 long expires;
1520 1545
1521 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags); 1546 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
1522 r = NLMSG_DATA(nlh); 1547 if (!nlh)
1548 return -EMSGSIZE;
1549
1550 r = nlmsg_data(nlh);
1523 r->rtm_family = AF_DECnet; 1551 r->rtm_family = AF_DECnet;
1524 r->rtm_dst_len = 16; 1552 r->rtm_dst_len = 16;
1525 r->rtm_src_len = 0; 1553 r->rtm_src_len = 0;
1526 r->rtm_tos = 0; 1554 r->rtm_tos = 0;
1527 r->rtm_table = RT_TABLE_MAIN; 1555 r->rtm_table = RT_TABLE_MAIN;
1528 RTA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
1529 r->rtm_type = rt->rt_type; 1556 r->rtm_type = rt->rt_type;
1530 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; 1557 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
1531 r->rtm_scope = RT_SCOPE_UNIVERSE; 1558 r->rtm_scope = RT_SCOPE_UNIVERSE;
1532 r->rtm_protocol = RTPROT_UNSPEC; 1559 r->rtm_protocol = RTPROT_UNSPEC;
1560
1533 if (rt->rt_flags & RTCF_NOTIFY) 1561 if (rt->rt_flags & RTCF_NOTIFY)
1534 r->rtm_flags |= RTM_F_NOTIFY; 1562 r->rtm_flags |= RTM_F_NOTIFY;
1535 RTA_PUT(skb, RTA_DST, 2, &rt->rt_daddr); 1563
1564 if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN) < 0 ||
1565 nla_put_le16(skb, RTA_DST, rt->rt_daddr) < 0)
1566 goto errout;
1567
1536 if (rt->fld.saddr) { 1568 if (rt->fld.saddr) {
1537 r->rtm_src_len = 16; 1569 r->rtm_src_len = 16;
1538 RTA_PUT(skb, RTA_SRC, 2, &rt->fld.saddr); 1570 if (nla_put_le16(skb, RTA_SRC, rt->fld.saddr) < 0)
1571 goto errout;
1539 } 1572 }
1540 if (rt->dst.dev) 1573 if (rt->dst.dev &&
1541 RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->dst.dev->ifindex); 1574 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex) < 0)
1575 goto errout;
1576
1542 /* 1577 /*
1543 * Note to self - change this if input routes reverse direction when 1578 * Note to self - change this if input routes reverse direction when
1544 * they deal only with inputs and not with replies like they do 1579 * they deal only with inputs and not with replies like they do
1545 * currently. 1580 * currently.
1546 */ 1581 */
1547 RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src); 1582 if (nla_put_le16(skb, RTA_PREFSRC, rt->rt_local_src) < 0)
1548 if (rt->rt_daddr != rt->rt_gateway) 1583 goto errout;
1549 RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway); 1584
1585 if (rt->rt_daddr != rt->rt_gateway &&
1586 nla_put_le16(skb, RTA_GATEWAY, rt->rt_gateway) < 0)
1587 goto errout;
1588
1550 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) 1589 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
1551 goto rtattr_failure; 1590 goto errout;
1591
1552 expires = rt->dst.expires ? rt->dst.expires - jiffies : 0; 1592 expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
1553 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires, 1593 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires,
1554 rt->dst.error) < 0) 1594 rt->dst.error) < 0)
1555 goto rtattr_failure; 1595 goto errout;
1556 if (dn_is_input_route(rt))
1557 RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fld.flowidn_iif);
1558 1596
1559 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1597 if (dn_is_input_route(rt) &&
1560 return skb->len; 1598 nla_put_u32(skb, RTA_IIF, rt->fld.flowidn_iif) < 0)
1599 goto errout;
1561 1600
1562nlmsg_failure: 1601 return nlmsg_end(skb, nlh);
1563rtattr_failure: 1602
1564 nlmsg_trim(skb, b); 1603errout:
1565 return -1; 1604 nlmsg_cancel(skb, nlh);
1605 return -EMSGSIZE;
1566} 1606}
1567 1607
1568/* 1608/*
@@ -1572,7 +1612,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
1572{ 1612{
1573 struct net *net = sock_net(in_skb->sk); 1613 struct net *net = sock_net(in_skb->sk);
1574 struct rtattr **rta = arg; 1614 struct rtattr **rta = arg;
1575 struct rtmsg *rtm = NLMSG_DATA(nlh); 1615 struct rtmsg *rtm = nlmsg_data(nlh);
1576 struct dn_route *rt = NULL; 1616 struct dn_route *rt = NULL;
1577 struct dn_skb_cb *cb; 1617 struct dn_skb_cb *cb;
1578 int err; 1618 int err;
@@ -1585,7 +1625,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
1585 memset(&fld, 0, sizeof(fld)); 1625 memset(&fld, 0, sizeof(fld));
1586 fld.flowidn_proto = DNPROTO_NSP; 1626 fld.flowidn_proto = DNPROTO_NSP;
1587 1627
1588 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1628 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1589 if (skb == NULL) 1629 if (skb == NULL)
1590 return -ENOBUFS; 1630 return -ENOBUFS;
1591 skb_reset_mac_header(skb); 1631 skb_reset_mac_header(skb);
@@ -1663,13 +1703,16 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
1663 struct dn_route *rt; 1703 struct dn_route *rt;
1664 int h, s_h; 1704 int h, s_h;
1665 int idx, s_idx; 1705 int idx, s_idx;
1706 struct rtmsg *rtm;
1666 1707
1667 if (!net_eq(net, &init_net)) 1708 if (!net_eq(net, &init_net))
1668 return 0; 1709 return 0;
1669 1710
1670 if (NLMSG_PAYLOAD(cb->nlh, 0) < sizeof(struct rtmsg)) 1711 if (nlmsg_len(cb->nlh) < sizeof(struct rtmsg))
1671 return -EINVAL; 1712 return -EINVAL;
1672 if (!(((struct rtmsg *)NLMSG_DATA(cb->nlh))->rtm_flags&RTM_F_CLONED)) 1713
1714 rtm = nlmsg_data(cb->nlh);
1715 if (!(rtm->rtm_flags & RTM_F_CLONED))
1673 return 0; 1716 return 0;
1674 1717
1675 s_h = cb->args[0]; 1718 s_h = cb->args[0];
@@ -1769,12 +1812,11 @@ static int dn_rt_cache_seq_show(struct seq_file *seq, void *v)
1769 char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN]; 1812 char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN];
1770 1813
1771 seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n", 1814 seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n",
1772 rt->dst.dev ? rt->dst.dev->name : "*", 1815 rt->dst.dev ? rt->dst.dev->name : "*",
1773 dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1), 1816 dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1),
1774 dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2), 1817 dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2),
1775 atomic_read(&rt->dst.__refcnt), 1818 atomic_read(&rt->dst.__refcnt),
1776 rt->dst.__use, 1819 rt->dst.__use, 0);
1777 (int) dst_metric(&rt->dst, RTAX_RTT));
1778 return 0; 1820 return 0;
1779} 1821}
1780 1822
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 650f3380c98..16c986ab122 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -297,61 +297,75 @@ static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
297{ 297{
298 struct rtmsg *rtm; 298 struct rtmsg *rtm;
299 struct nlmsghdr *nlh; 299 struct nlmsghdr *nlh;
300 unsigned char *b = skb_tail_pointer(skb);
301 300
302 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags); 301 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags);
303 rtm = NLMSG_DATA(nlh); 302 if (!nlh)
303 return -EMSGSIZE;
304
305 rtm = nlmsg_data(nlh);
304 rtm->rtm_family = AF_DECnet; 306 rtm->rtm_family = AF_DECnet;
305 rtm->rtm_dst_len = dst_len; 307 rtm->rtm_dst_len = dst_len;
306 rtm->rtm_src_len = 0; 308 rtm->rtm_src_len = 0;
307 rtm->rtm_tos = 0; 309 rtm->rtm_tos = 0;
308 rtm->rtm_table = tb_id; 310 rtm->rtm_table = tb_id;
309 RTA_PUT_U32(skb, RTA_TABLE, tb_id);
310 rtm->rtm_flags = fi->fib_flags; 311 rtm->rtm_flags = fi->fib_flags;
311 rtm->rtm_scope = scope; 312 rtm->rtm_scope = scope;
312 rtm->rtm_type = type; 313 rtm->rtm_type = type;
313 if (rtm->rtm_dst_len)
314 RTA_PUT(skb, RTA_DST, 2, dst);
315 rtm->rtm_protocol = fi->fib_protocol; 314 rtm->rtm_protocol = fi->fib_protocol;
316 if (fi->fib_priority) 315
317 RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority); 316 if (nla_put_u32(skb, RTA_TABLE, tb_id) < 0)
317 goto errout;
318
319 if (rtm->rtm_dst_len &&
320 nla_put(skb, RTA_DST, 2, dst) < 0)
321 goto errout;
322
323 if (fi->fib_priority &&
324 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority) < 0)
325 goto errout;
326
318 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) 327 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
319 goto rtattr_failure; 328 goto errout;
329
320 if (fi->fib_nhs == 1) { 330 if (fi->fib_nhs == 1) {
321 if (fi->fib_nh->nh_gw) 331 if (fi->fib_nh->nh_gw &&
322 RTA_PUT(skb, RTA_GATEWAY, 2, &fi->fib_nh->nh_gw); 332 nla_put_le16(skb, RTA_GATEWAY, fi->fib_nh->nh_gw) < 0)
323 if (fi->fib_nh->nh_oif) 333 goto errout;
324 RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif); 334
335 if (fi->fib_nh->nh_oif &&
336 nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif) < 0)
337 goto errout;
325 } 338 }
339
326 if (fi->fib_nhs > 1) { 340 if (fi->fib_nhs > 1) {
327 struct rtnexthop *nhp; 341 struct rtnexthop *nhp;
328 struct rtattr *mp_head; 342 struct nlattr *mp_head;
329 if (skb_tailroom(skb) <= RTA_SPACE(0)) 343
330 goto rtattr_failure; 344 if (!(mp_head = nla_nest_start(skb, RTA_MULTIPATH)))
331 mp_head = (struct rtattr *)skb_put(skb, RTA_SPACE(0)); 345 goto errout;
332 346
333 for_nexthops(fi) { 347 for_nexthops(fi) {
334 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) 348 if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp))))
335 goto rtattr_failure; 349 goto errout;
336 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 350
337 nhp->rtnh_flags = nh->nh_flags & 0xFF; 351 nhp->rtnh_flags = nh->nh_flags & 0xFF;
338 nhp->rtnh_hops = nh->nh_weight - 1; 352 nhp->rtnh_hops = nh->nh_weight - 1;
339 nhp->rtnh_ifindex = nh->nh_oif; 353 nhp->rtnh_ifindex = nh->nh_oif;
340 if (nh->nh_gw) 354
341 RTA_PUT(skb, RTA_GATEWAY, 2, &nh->nh_gw); 355 if (nh->nh_gw &&
356 nla_put_le16(skb, RTA_GATEWAY, nh->nh_gw) < 0)
357 goto errout;
358
342 nhp->rtnh_len = skb_tail_pointer(skb) - (unsigned char *)nhp; 359 nhp->rtnh_len = skb_tail_pointer(skb) - (unsigned char *)nhp;
343 } endfor_nexthops(fi); 360 } endfor_nexthops(fi);
344 mp_head->rta_type = RTA_MULTIPATH;
345 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
346 }
347 361
348 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 362 nla_nest_end(skb, mp_head);
349 return skb->len; 363 }
350 364
365 return nlmsg_end(skb, nlh);
351 366
352nlmsg_failure: 367errout:
353rtattr_failure: 368 nlmsg_cancel(skb, nlh);
354 nlmsg_trim(skb, b);
355 return -EMSGSIZE; 369 return -EMSGSIZE;
356} 370}
357 371
@@ -476,7 +490,7 @@ int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
476 return 0; 490 return 0;
477 491
478 if (NLMSG_PAYLOAD(cb->nlh, 0) >= sizeof(struct rtmsg) && 492 if (NLMSG_PAYLOAD(cb->nlh, 0) >= sizeof(struct rtmsg) &&
479 ((struct rtmsg *)NLMSG_DATA(cb->nlh))->rtm_flags&RTM_F_CLONED) 493 ((struct rtmsg *)nlmsg_data(cb->nlh))->rtm_flags&RTM_F_CLONED)
480 return dn_cache_dump(skb, cb); 494 return dn_cache_dump(skb, cb);
481 495
482 s_h = cb->args[0]; 496 s_h = cb->args[0];
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 44b890936fc..11db0ecf342 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -42,23 +42,23 @@ static struct sk_buff *dnrmg_build_message(struct sk_buff *rt_skb, int *errp)
42 size = NLMSG_SPACE(rt_skb->len); 42 size = NLMSG_SPACE(rt_skb->len);
43 size += NLMSG_ALIGN(sizeof(struct nf_dn_rtmsg)); 43 size += NLMSG_ALIGN(sizeof(struct nf_dn_rtmsg));
44 skb = alloc_skb(size, GFP_ATOMIC); 44 skb = alloc_skb(size, GFP_ATOMIC);
45 if (!skb) 45 if (!skb) {
46 goto nlmsg_failure; 46 *errp = -ENOMEM;
47 return NULL;
48 }
47 old_tail = skb->tail; 49 old_tail = skb->tail;
48 nlh = NLMSG_PUT(skb, 0, 0, 0, size - sizeof(*nlh)); 50 nlh = nlmsg_put(skb, 0, 0, 0, size - sizeof(*nlh), 0);
51 if (!nlh) {
52 kfree_skb(skb);
53 *errp = -ENOMEM;
54 return NULL;
55 }
49 rtm = (struct nf_dn_rtmsg *)NLMSG_DATA(nlh); 56 rtm = (struct nf_dn_rtmsg *)NLMSG_DATA(nlh);
50 rtm->nfdn_ifindex = rt_skb->dev->ifindex; 57 rtm->nfdn_ifindex = rt_skb->dev->ifindex;
51 ptr = NFDN_RTMSG(rtm); 58 ptr = NFDN_RTMSG(rtm);
52 skb_copy_from_linear_data(rt_skb, ptr, rt_skb->len); 59 skb_copy_from_linear_data(rt_skb, ptr, rt_skb->len);
53 nlh->nlmsg_len = skb->tail - old_tail; 60 nlh->nlmsg_len = skb->tail - old_tail;
54 return skb; 61 return skb;
55
56nlmsg_failure:
57 if (skb)
58 kfree_skb(skb);
59 *errp = -ENOMEM;
60 net_err_ratelimited("dn_rtmsg: error creating netlink message\n");
61 return NULL;
62} 62}
63 63
64static void dnrmg_send_peer(struct sk_buff *skb) 64static void dnrmg_send_peer(struct sk_buff *skb)
@@ -117,7 +117,7 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
117 117
118static struct nf_hook_ops dnrmg_ops __read_mostly = { 118static struct nf_hook_ops dnrmg_ops __read_mostly = {
119 .hook = dnrmg_hook, 119 .hook = dnrmg_hook,
120 .pf = PF_DECnet, 120 .pf = NFPROTO_DECNET,
121 .hooknum = NF_DN_ROUTE, 121 .hooknum = NF_DN_ROUTE,
122 .priority = NF_DN_PRI_DNRTMSG, 122 .priority = NF_DN_PRI_DNRTMSG,
123}; 123};
@@ -125,11 +125,13 @@ static struct nf_hook_ops dnrmg_ops __read_mostly = {
125static int __init dn_rtmsg_init(void) 125static int __init dn_rtmsg_init(void)
126{ 126{
127 int rv = 0; 127 int rv = 0;
128 struct netlink_kernel_cfg cfg = {
129 .groups = DNRNG_NLGRP_MAX,
130 .input = dnrmg_receive_user_skb,
131 };
128 132
129 dnrmg = netlink_kernel_create(&init_net, 133 dnrmg = netlink_kernel_create(&init_net,
130 NETLINK_DNRTMSG, DNRNG_NLGRP_MAX, 134 NETLINK_DNRTMSG, THIS_MODULE, &cfg);
131 dnrmg_receive_user_skb,
132 NULL, THIS_MODULE);
133 if (dnrmg == NULL) { 135 if (dnrmg == NULL) {
134 printk(KERN_ERR "dn_rtmsg: Cannot create netlink socket"); 136 printk(KERN_ERR "dn_rtmsg: Cannot create netlink socket");
135 return -ENOMEM; 137 return -ENOMEM;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 36e58800a9e..4efad533e5f 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -232,6 +232,7 @@ EXPORT_SYMBOL(eth_header_parse);
232 * @neigh: source neighbour 232 * @neigh: source neighbour
233 * @hh: destination cache entry 233 * @hh: destination cache entry
234 * @type: Ethernet type field 234 * @type: Ethernet type field
235 *
235 * Create an Ethernet header template from the neighbour. 236 * Create an Ethernet header template from the neighbour.
236 */ 237 */
237int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type) 238int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type)
@@ -274,6 +275,7 @@ EXPORT_SYMBOL(eth_header_cache_update);
274 * eth_mac_addr - set new Ethernet hardware address 275 * eth_mac_addr - set new Ethernet hardware address
275 * @dev: network device 276 * @dev: network device
276 * @p: socket address 277 * @p: socket address
278 *
277 * Change hardware address of device. 279 * Change hardware address of device.
278 * 280 *
279 * This doesn't change hardware matching, so needs to be overridden 281 * This doesn't change hardware matching, so needs to be overridden
@@ -283,7 +285,7 @@ int eth_mac_addr(struct net_device *dev, void *p)
283{ 285{
284 struct sockaddr *addr = p; 286 struct sockaddr *addr = p;
285 287
286 if (netif_running(dev)) 288 if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
287 return -EBUSY; 289 return -EBUSY;
288 if (!is_valid_ether_addr(addr->sa_data)) 290 if (!is_valid_ether_addr(addr->sa_data))
289 return -EADDRNOTAVAIL; 291 return -EADDRNOTAVAIL;
@@ -331,6 +333,7 @@ const struct header_ops eth_header_ops ____cacheline_aligned = {
331/** 333/**
332 * ether_setup - setup Ethernet network device 334 * ether_setup - setup Ethernet network device
333 * @dev: network device 335 * @dev: network device
336 *
334 * Fill in the fields of the device structure with Ethernet-generic values. 337 * Fill in the fields of the device structure with Ethernet-generic values.
335 */ 338 */
336void ether_setup(struct net_device *dev) 339void ether_setup(struct net_device *dev)
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 32eb4179e8f..f4070e54d1a 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -55,7 +55,6 @@
55#include <linux/module.h> 55#include <linux/module.h>
56#include <linux/moduleparam.h> 56#include <linux/moduleparam.h>
57#include <linux/netdevice.h> 57#include <linux/netdevice.h>
58#include <linux/etherdevice.h>
59#include <net/af_ieee802154.h> 58#include <net/af_ieee802154.h>
60#include <net/ieee802154.h> 59#include <net/ieee802154.h>
61#include <net/ieee802154_netdev.h> 60#include <net/ieee802154_netdev.h>
@@ -240,8 +239,7 @@ lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr,
240 lowpan_uip_ds6_set_addr_iid(ipaddr, lladdr); 239 lowpan_uip_ds6_set_addr_iid(ipaddr, lladdr);
241 } 240 }
242 241
243 pr_debug("(%s): uncompressing %d + %d => ", __func__, prefcount, 242 pr_debug("uncompressing %d + %d => ", prefcount, postcount);
244 postcount);
245 lowpan_raw_dump_inline(NULL, NULL, ipaddr->s6_addr, 16); 243 lowpan_raw_dump_inline(NULL, NULL, ipaddr->s6_addr, 16);
246 244
247 return 0; 245 return 0;
@@ -252,13 +250,11 @@ lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
252{ 250{
253 struct udphdr *uh = udp_hdr(skb); 251 struct udphdr *uh = udp_hdr(skb);
254 252
255 pr_debug("(%s): UDP header compression\n", __func__);
256
257 if (((uh->source & LOWPAN_NHC_UDP_4BIT_MASK) == 253 if (((uh->source & LOWPAN_NHC_UDP_4BIT_MASK) ==
258 LOWPAN_NHC_UDP_4BIT_PORT) && 254 LOWPAN_NHC_UDP_4BIT_PORT) &&
259 ((uh->dest & LOWPAN_NHC_UDP_4BIT_MASK) == 255 ((uh->dest & LOWPAN_NHC_UDP_4BIT_MASK) ==
260 LOWPAN_NHC_UDP_4BIT_PORT)) { 256 LOWPAN_NHC_UDP_4BIT_PORT)) {
261 pr_debug("(%s): both ports compression to 4 bits\n", __func__); 257 pr_debug("UDP header: both ports compression to 4 bits\n");
262 **hc06_ptr = LOWPAN_NHC_UDP_CS_P_11; 258 **hc06_ptr = LOWPAN_NHC_UDP_CS_P_11;
263 **(hc06_ptr + 1) = /* subtraction is faster */ 259 **(hc06_ptr + 1) = /* subtraction is faster */
264 (u8)((uh->dest - LOWPAN_NHC_UDP_4BIT_PORT) + 260 (u8)((uh->dest - LOWPAN_NHC_UDP_4BIT_PORT) +
@@ -266,20 +262,20 @@ lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
266 *hc06_ptr += 2; 262 *hc06_ptr += 2;
267 } else if ((uh->dest & LOWPAN_NHC_UDP_8BIT_MASK) == 263 } else if ((uh->dest & LOWPAN_NHC_UDP_8BIT_MASK) ==
268 LOWPAN_NHC_UDP_8BIT_PORT) { 264 LOWPAN_NHC_UDP_8BIT_PORT) {
269 pr_debug("(%s): remove 8 bits of dest\n", __func__); 265 pr_debug("UDP header: remove 8 bits of dest\n");
270 **hc06_ptr = LOWPAN_NHC_UDP_CS_P_01; 266 **hc06_ptr = LOWPAN_NHC_UDP_CS_P_01;
271 memcpy(*hc06_ptr + 1, &uh->source, 2); 267 memcpy(*hc06_ptr + 1, &uh->source, 2);
272 **(hc06_ptr + 3) = (u8)(uh->dest - LOWPAN_NHC_UDP_8BIT_PORT); 268 **(hc06_ptr + 3) = (u8)(uh->dest - LOWPAN_NHC_UDP_8BIT_PORT);
273 *hc06_ptr += 4; 269 *hc06_ptr += 4;
274 } else if ((uh->source & LOWPAN_NHC_UDP_8BIT_MASK) == 270 } else if ((uh->source & LOWPAN_NHC_UDP_8BIT_MASK) ==
275 LOWPAN_NHC_UDP_8BIT_PORT) { 271 LOWPAN_NHC_UDP_8BIT_PORT) {
276 pr_debug("(%s): remove 8 bits of source\n", __func__); 272 pr_debug("UDP header: remove 8 bits of source\n");
277 **hc06_ptr = LOWPAN_NHC_UDP_CS_P_10; 273 **hc06_ptr = LOWPAN_NHC_UDP_CS_P_10;
278 memcpy(*hc06_ptr + 1, &uh->dest, 2); 274 memcpy(*hc06_ptr + 1, &uh->dest, 2);
279 **(hc06_ptr + 3) = (u8)(uh->source - LOWPAN_NHC_UDP_8BIT_PORT); 275 **(hc06_ptr + 3) = (u8)(uh->source - LOWPAN_NHC_UDP_8BIT_PORT);
280 *hc06_ptr += 4; 276 *hc06_ptr += 4;
281 } else { 277 } else {
282 pr_debug("(%s): can't compress header\n", __func__); 278 pr_debug("UDP header: can't compress\n");
283 **hc06_ptr = LOWPAN_NHC_UDP_CS_P_00; 279 **hc06_ptr = LOWPAN_NHC_UDP_CS_P_00;
284 memcpy(*hc06_ptr + 1, &uh->source, 2); 280 memcpy(*hc06_ptr + 1, &uh->source, 2);
285 memcpy(*hc06_ptr + 3, &uh->dest, 2); 281 memcpy(*hc06_ptr + 3, &uh->dest, 2);
@@ -291,25 +287,26 @@ lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
291 *hc06_ptr += 2; 287 *hc06_ptr += 2;
292} 288}
293 289
294static u8 lowpan_fetch_skb_u8(struct sk_buff *skb) 290static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val)
295{ 291{
296 u8 ret; 292 if (unlikely(!pskb_may_pull(skb, 1)))
293 return -EINVAL;
297 294
298 ret = skb->data[0]; 295 *val = skb->data[0];
299 skb_pull(skb, 1); 296 skb_pull(skb, 1);
300 297
301 return ret; 298 return 0;
302} 299}
303 300
304static u16 lowpan_fetch_skb_u16(struct sk_buff *skb) 301static inline int lowpan_fetch_skb_u16(struct sk_buff *skb, u16 *val)
305{ 302{
306 u16 ret; 303 if (unlikely(!pskb_may_pull(skb, 2)))
307 304 return -EINVAL;
308 BUG_ON(!pskb_may_pull(skb, 2));
309 305
310 ret = skb->data[0] | (skb->data[1] << 8); 306 *val = skb->data[0] | (skb->data[1] << 8);
311 skb_pull(skb, 2); 307 skb_pull(skb, 2);
312 return ret; 308
309 return 0;
313} 310}
314 311
315static int 312static int
@@ -318,10 +315,11 @@ lowpan_uncompress_udp_header(struct sk_buff *skb)
318 struct udphdr *uh = udp_hdr(skb); 315 struct udphdr *uh = udp_hdr(skb);
319 u8 tmp; 316 u8 tmp;
320 317
321 tmp = lowpan_fetch_skb_u8(skb); 318 if (lowpan_fetch_skb_u8(skb, &tmp))
319 goto err;
322 320
323 if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) { 321 if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
324 pr_debug("(%s): UDP header uncompression\n", __func__); 322 pr_debug("UDP header uncompression\n");
325 switch (tmp & LOWPAN_NHC_UDP_CS_P_11) { 323 switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
326 case LOWPAN_NHC_UDP_CS_P_00: 324 case LOWPAN_NHC_UDP_CS_P_00:
327 memcpy(&uh->source, &skb->data[0], 2); 325 memcpy(&uh->source, &skb->data[0], 2);
@@ -347,19 +345,19 @@ lowpan_uncompress_udp_header(struct sk_buff *skb)
347 skb_pull(skb, 1); 345 skb_pull(skb, 1);
348 break; 346 break;
349 default: 347 default:
350 pr_debug("(%s) ERROR: unknown UDP format\n", __func__); 348 pr_debug("ERROR: unknown UDP format\n");
351 goto err; 349 goto err;
352 break; 350 break;
353 } 351 }
354 352
355 pr_debug("(%s): uncompressed UDP ports: src = %d, dst = %d\n", 353 pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
356 __func__, uh->source, uh->dest); 354 uh->source, uh->dest);
357 355
358 /* copy checksum */ 356 /* copy checksum */
359 memcpy(&uh->check, &skb->data[0], 2); 357 memcpy(&uh->check, &skb->data[0], 2);
360 skb_pull(skb, 2); 358 skb_pull(skb, 2);
361 } else { 359 } else {
362 pr_debug("(%s): ERROR: unsupported NH format\n", __func__); 360 pr_debug("ERROR: unsupported NH format\n");
363 goto err; 361 goto err;
364 } 362 }
365 363
@@ -392,10 +390,9 @@ static int lowpan_header_create(struct sk_buff *skb,
392 hdr = ipv6_hdr(skb); 390 hdr = ipv6_hdr(skb);
393 hc06_ptr = head + 2; 391 hc06_ptr = head + 2;
394 392
395 pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n" 393 pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n"
396 "\tnexthdr = 0x%02x\n\thop_lim = %d\n", __func__, 394 "\tnexthdr = 0x%02x\n\thop_lim = %d\n", hdr->version,
397 hdr->version, ntohs(hdr->payload_len), hdr->nexthdr, 395 ntohs(hdr->payload_len), hdr->nexthdr, hdr->hop_limit);
398 hdr->hop_limit);
399 396
400 lowpan_raw_dump_table(__func__, "raw skb network header dump", 397 lowpan_raw_dump_table(__func__, "raw skb network header dump",
401 skb_network_header(skb), sizeof(struct ipv6hdr)); 398 skb_network_header(skb), sizeof(struct ipv6hdr));
@@ -490,28 +487,28 @@ static int lowpan_header_create(struct sk_buff *skb,
490 break; 487 break;
491 default: 488 default:
492 *hc06_ptr = hdr->hop_limit; 489 *hc06_ptr = hdr->hop_limit;
490 hc06_ptr += 1;
493 break; 491 break;
494 } 492 }
495 493
496 /* source address compression */ 494 /* source address compression */
497 if (is_addr_unspecified(&hdr->saddr)) { 495 if (is_addr_unspecified(&hdr->saddr)) {
498 pr_debug("(%s): source address is unspecified, setting SAC\n", 496 pr_debug("source address is unspecified, setting SAC\n");
499 __func__);
500 iphc1 |= LOWPAN_IPHC_SAC; 497 iphc1 |= LOWPAN_IPHC_SAC;
501 /* TODO: context lookup */ 498 /* TODO: context lookup */
502 } else if (is_addr_link_local(&hdr->saddr)) { 499 } else if (is_addr_link_local(&hdr->saddr)) {
503 pr_debug("(%s): source address is link-local\n", __func__); 500 pr_debug("source address is link-local\n");
504 iphc1 |= lowpan_compress_addr_64(&hc06_ptr, 501 iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
505 LOWPAN_IPHC_SAM_BIT, &hdr->saddr, saddr); 502 LOWPAN_IPHC_SAM_BIT, &hdr->saddr, saddr);
506 } else { 503 } else {
507 pr_debug("(%s): send the full source address\n", __func__); 504 pr_debug("send the full source address\n");
508 memcpy(hc06_ptr, &hdr->saddr.s6_addr16[0], 16); 505 memcpy(hc06_ptr, &hdr->saddr.s6_addr16[0], 16);
509 hc06_ptr += 16; 506 hc06_ptr += 16;
510 } 507 }
511 508
512 /* destination address compression */ 509 /* destination address compression */
513 if (is_addr_mcast(&hdr->daddr)) { 510 if (is_addr_mcast(&hdr->daddr)) {
514 pr_debug("(%s): destination address is multicast", __func__); 511 pr_debug("destination address is multicast: ");
515 iphc1 |= LOWPAN_IPHC_M; 512 iphc1 |= LOWPAN_IPHC_M;
516 if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) { 513 if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) {
517 pr_debug("compressed to 1 octet\n"); 514 pr_debug("compressed to 1 octet\n");
@@ -540,14 +537,13 @@ static int lowpan_header_create(struct sk_buff *skb,
540 hc06_ptr += 16; 537 hc06_ptr += 16;
541 } 538 }
542 } else { 539 } else {
543 pr_debug("(%s): destination address is unicast: ", __func__);
544 /* TODO: context lookup */ 540 /* TODO: context lookup */
545 if (is_addr_link_local(&hdr->daddr)) { 541 if (is_addr_link_local(&hdr->daddr)) {
546 pr_debug("destination address is link-local\n"); 542 pr_debug("dest address is unicast and link-local\n");
547 iphc1 |= lowpan_compress_addr_64(&hc06_ptr, 543 iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
548 LOWPAN_IPHC_DAM_BIT, &hdr->daddr, daddr); 544 LOWPAN_IPHC_DAM_BIT, &hdr->daddr, daddr);
549 } else { 545 } else {
550 pr_debug("using full address\n"); 546 pr_debug("dest address is unicast: using full one\n");
551 memcpy(hc06_ptr, &hdr->daddr.s6_addr16[0], 16); 547 memcpy(hc06_ptr, &hdr->daddr.s6_addr16[0], 16);
552 hc06_ptr += 16; 548 hc06_ptr += 16;
553 } 549 }
@@ -639,8 +635,7 @@ static void lowpan_fragment_timer_expired(unsigned long entry_addr)
639{ 635{
640 struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr; 636 struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
641 637
642 pr_debug("%s: timer expired for frame with tag %d\n", __func__, 638 pr_debug("timer expired for frame with tag %d\n", entry->tag);
643 entry->tag);
644 639
645 spin_lock(&flist_lock); 640 spin_lock(&flist_lock);
646 list_del(&entry->list); 641 list_del(&entry->list);
@@ -710,7 +705,9 @@ lowpan_process_data(struct sk_buff *skb)
710 /* at least two bytes will be used for the encoding */ 705 /* at least two bytes will be used for the encoding */
711 if (skb->len < 2) 706 if (skb->len < 2)
712 goto drop; 707 goto drop;
713 iphc0 = lowpan_fetch_skb_u8(skb); 708
709 if (lowpan_fetch_skb_u8(skb, &iphc0))
710 goto drop;
714 711
715 /* fragments assembling */ 712 /* fragments assembling */
716 switch (iphc0 & LOWPAN_DISPATCH_MASK) { 713 switch (iphc0 & LOWPAN_DISPATCH_MASK) {
@@ -722,8 +719,9 @@ lowpan_process_data(struct sk_buff *skb)
722 u16 tag; 719 u16 tag;
723 bool found = false; 720 bool found = false;
724 721
725 len = lowpan_fetch_skb_u8(skb); /* frame length */ 722 if (lowpan_fetch_skb_u8(skb, &len) || /* frame length */
726 tag = lowpan_fetch_skb_u16(skb); 723 lowpan_fetch_skb_u16(skb, &tag)) /* fragment tag */
724 goto drop;
727 725
728 /* 726 /*
729 * check if frame assembling with the same tag is 727 * check if frame assembling with the same tag is
@@ -747,7 +745,8 @@ lowpan_process_data(struct sk_buff *skb)
747 if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) 745 if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1)
748 goto unlock_and_drop; 746 goto unlock_and_drop;
749 747
750 offset = lowpan_fetch_skb_u8(skb); /* fetch offset */ 748 if (lowpan_fetch_skb_u8(skb, &offset)) /* fetch offset */
749 goto unlock_and_drop;
751 750
752 /* if payload fits buffer, copy it */ 751 /* if payload fits buffer, copy it */
753 if (likely((offset * 8 + skb->len) <= frame->length)) 752 if (likely((offset * 8 + skb->len) <= frame->length))
@@ -769,7 +768,10 @@ lowpan_process_data(struct sk_buff *skb)
769 dev_kfree_skb(skb); 768 dev_kfree_skb(skb);
770 skb = frame->skb; 769 skb = frame->skb;
771 kfree(frame); 770 kfree(frame);
772 iphc0 = lowpan_fetch_skb_u8(skb); 771
772 if (lowpan_fetch_skb_u8(skb, &iphc0))
773 goto drop;
774
773 break; 775 break;
774 } 776 }
775 spin_unlock(&flist_lock); 777 spin_unlock(&flist_lock);
@@ -780,20 +782,19 @@ lowpan_process_data(struct sk_buff *skb)
780 break; 782 break;
781 } 783 }
782 784
783 iphc1 = lowpan_fetch_skb_u8(skb); 785 if (lowpan_fetch_skb_u8(skb, &iphc1))
786 goto drop;
784 787
785 _saddr = mac_cb(skb)->sa.hwaddr; 788 _saddr = mac_cb(skb)->sa.hwaddr;
786 _daddr = mac_cb(skb)->da.hwaddr; 789 _daddr = mac_cb(skb)->da.hwaddr;
787 790
788 pr_debug("(%s): iphc0 = %02x, iphc1 = %02x\n", __func__, iphc0, iphc1); 791 pr_debug("iphc0 = %02x, iphc1 = %02x\n", iphc0, iphc1);
789 792
790 /* another if the CID flag is set */ 793 /* another if the CID flag is set */
791 if (iphc1 & LOWPAN_IPHC_CID) { 794 if (iphc1 & LOWPAN_IPHC_CID) {
792 pr_debug("(%s): CID flag is set, increase header with one\n", 795 pr_debug("CID flag is set, increase header with one\n");
793 __func__); 796 if (lowpan_fetch_skb_u8(skb, &num_context))
794 if (!skb->len)
795 goto drop; 797 goto drop;
796 num_context = lowpan_fetch_skb_u8(skb);
797 } 798 }
798 799
799 hdr.version = 6; 800 hdr.version = 6;
@@ -805,9 +806,9 @@ lowpan_process_data(struct sk_buff *skb)
805 * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes) 806 * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
806 */ 807 */
807 case 0: /* 00b */ 808 case 0: /* 00b */
808 if (!skb->len) 809 if (lowpan_fetch_skb_u8(skb, &tmp))
809 goto drop; 810 goto drop;
810 tmp = lowpan_fetch_skb_u8(skb); 811
811 memcpy(&hdr.flow_lbl, &skb->data[0], 3); 812 memcpy(&hdr.flow_lbl, &skb->data[0], 3);
812 skb_pull(skb, 3); 813 skb_pull(skb, 3);
813 hdr.priority = ((tmp >> 2) & 0x0f); 814 hdr.priority = ((tmp >> 2) & 0x0f);
@@ -819,9 +820,9 @@ lowpan_process_data(struct sk_buff *skb)
819 * ECN + DSCP (1 byte), Flow Label is elided 820 * ECN + DSCP (1 byte), Flow Label is elided
820 */ 821 */
821 case 1: /* 10b */ 822 case 1: /* 10b */
822 if (!skb->len) 823 if (lowpan_fetch_skb_u8(skb, &tmp))
823 goto drop; 824 goto drop;
824 tmp = lowpan_fetch_skb_u8(skb); 825
825 hdr.priority = ((tmp >> 2) & 0x0f); 826 hdr.priority = ((tmp >> 2) & 0x0f);
826 hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30); 827 hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
827 hdr.flow_lbl[1] = 0; 828 hdr.flow_lbl[1] = 0;
@@ -832,9 +833,9 @@ lowpan_process_data(struct sk_buff *skb)
832 * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided 833 * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
833 */ 834 */
834 case 2: /* 01b */ 835 case 2: /* 01b */
835 if (!skb->len) 836 if (lowpan_fetch_skb_u8(skb, &tmp))
836 goto drop; 837 goto drop;
837 tmp = lowpan_fetch_skb_u8(skb); 838
838 hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30); 839 hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
839 memcpy(&hdr.flow_lbl[1], &skb->data[0], 2); 840 memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
840 skb_pull(skb, 2); 841 skb_pull(skb, 2);
@@ -853,27 +854,26 @@ lowpan_process_data(struct sk_buff *skb)
853 /* Next Header */ 854 /* Next Header */
854 if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) { 855 if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
855 /* Next header is carried inline */ 856 /* Next header is carried inline */
856 if (!skb->len) 857 if (lowpan_fetch_skb_u8(skb, &(hdr.nexthdr)))
857 goto drop; 858 goto drop;
858 hdr.nexthdr = lowpan_fetch_skb_u8(skb); 859
859 pr_debug("(%s): NH flag is set, next header is carried " 860 pr_debug("NH flag is set, next header carried inline: %02x\n",
860 "inline: %02x\n", __func__, hdr.nexthdr); 861 hdr.nexthdr);
861 } 862 }
862 863
863 /* Hop Limit */ 864 /* Hop Limit */
864 if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I) 865 if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I)
865 hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03]; 866 hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03];
866 else { 867 else {
867 if (!skb->len) 868 if (lowpan_fetch_skb_u8(skb, &(hdr.hop_limit)))
868 goto drop; 869 goto drop;
869 hdr.hop_limit = lowpan_fetch_skb_u8(skb);
870 } 870 }
871 871
872 /* Extract SAM to the tmp variable */ 872 /* Extract SAM to the tmp variable */
873 tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03; 873 tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
874 874
875 /* Source address uncompression */ 875 /* Source address uncompression */
876 pr_debug("(%s): source address stateless compression\n", __func__); 876 pr_debug("source address stateless compression\n");
877 err = lowpan_uncompress_addr(skb, &hdr.saddr, lowpan_llprefix, 877 err = lowpan_uncompress_addr(skb, &hdr.saddr, lowpan_llprefix,
878 lowpan_unc_llconf[tmp], skb->data); 878 lowpan_unc_llconf[tmp], skb->data);
879 if (err) 879 if (err)
@@ -885,19 +885,15 @@ lowpan_process_data(struct sk_buff *skb)
885 /* check for Multicast Compression */ 885 /* check for Multicast Compression */
886 if (iphc1 & LOWPAN_IPHC_M) { 886 if (iphc1 & LOWPAN_IPHC_M) {
887 if (iphc1 & LOWPAN_IPHC_DAC) { 887 if (iphc1 & LOWPAN_IPHC_DAC) {
888 pr_debug("(%s): destination address context-based " 888 pr_debug("dest: context-based mcast compression\n");
889 "multicast compression\n", __func__);
890 /* TODO: implement this */ 889 /* TODO: implement this */
891 } else { 890 } else {
892 u8 prefix[] = {0xff, 0x02}; 891 u8 prefix[] = {0xff, 0x02};
893 892
894 pr_debug("(%s): destination address non-context-based" 893 pr_debug("dest: non context-based mcast compression\n");
895 " multicast compression\n", __func__);
896 if (0 < tmp && tmp < 3) { 894 if (0 < tmp && tmp < 3) {
897 if (!skb->len) 895 if (lowpan_fetch_skb_u8(skb, &prefix[1]))
898 goto drop; 896 goto drop;
899 else
900 prefix[1] = lowpan_fetch_skb_u8(skb);
901 } 897 }
902 898
903 err = lowpan_uncompress_addr(skb, &hdr.daddr, prefix, 899 err = lowpan_uncompress_addr(skb, &hdr.daddr, prefix,
@@ -906,8 +902,7 @@ lowpan_process_data(struct sk_buff *skb)
906 goto drop; 902 goto drop;
907 } 903 }
908 } else { 904 } else {
909 pr_debug("(%s): destination address stateless compression\n", 905 pr_debug("dest: stateless compression\n");
910 __func__);
911 err = lowpan_uncompress_addr(skb, &hdr.daddr, lowpan_llprefix, 906 err = lowpan_uncompress_addr(skb, &hdr.daddr, lowpan_llprefix,
912 lowpan_unc_llconf[tmp], skb->data); 907 lowpan_unc_llconf[tmp], skb->data);
913 if (err) 908 if (err)
@@ -922,11 +917,11 @@ lowpan_process_data(struct sk_buff *skb)
922 /* Not fragmented package */ 917 /* Not fragmented package */
923 hdr.payload_len = htons(skb->len); 918 hdr.payload_len = htons(skb->len);
924 919
925 pr_debug("(%s): skb headroom size = %d, data length = %d\n", __func__, 920 pr_debug("skb headroom size = %d, data length = %d\n",
926 skb_headroom(skb), skb->len); 921 skb_headroom(skb), skb->len);
927 922
928 pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t" 923 pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t"
929 "nexthdr = 0x%02x\n\thop_lim = %d\n", __func__, hdr.version, 924 "nexthdr = 0x%02x\n\thop_lim = %d\n", hdr.version,
930 ntohs(hdr.payload_len), hdr.nexthdr, hdr.hop_limit); 925 ntohs(hdr.payload_len), hdr.nexthdr, hdr.hop_limit);
931 926
932 lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr, 927 lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr,
@@ -940,6 +935,19 @@ drop:
940 return -EINVAL; 935 return -EINVAL;
941} 936}
942 937
938static int lowpan_set_address(struct net_device *dev, void *p)
939{
940 struct sockaddr *sa = p;
941
942 if (netif_running(dev))
943 return -EBUSY;
944
945 /* TODO: validate addr */
946 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
947
948 return 0;
949}
950
943static int lowpan_get_mac_header_length(struct sk_buff *skb) 951static int lowpan_get_mac_header_length(struct sk_buff *skb)
944{ 952{
945 /* 953 /*
@@ -1028,11 +1036,11 @@ static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
1028{ 1036{
1029 int err = -1; 1037 int err = -1;
1030 1038
1031 pr_debug("(%s): package xmit\n", __func__); 1039 pr_debug("package xmit\n");
1032 1040
1033 skb->dev = lowpan_dev_info(dev)->real_dev; 1041 skb->dev = lowpan_dev_info(dev)->real_dev;
1034 if (skb->dev == NULL) { 1042 if (skb->dev == NULL) {
1035 pr_debug("(%s) ERROR: no real wpan device found\n", __func__); 1043 pr_debug("ERROR: no real wpan device found\n");
1036 goto error; 1044 goto error;
1037 } 1045 }
1038 1046
@@ -1041,14 +1049,13 @@ static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
1041 goto out; 1049 goto out;
1042 } 1050 }
1043 1051
1044 pr_debug("(%s): frame is too big, fragmentation is needed\n", 1052 pr_debug("frame is too big, fragmentation is needed\n");
1045 __func__);
1046 err = lowpan_skb_fragmentation(skb); 1053 err = lowpan_skb_fragmentation(skb);
1047error: 1054error:
1048 dev_kfree_skb(skb); 1055 dev_kfree_skb(skb);
1049out: 1056out:
1050 if (err < 0) 1057 if (err < 0)
1051 pr_debug("(%s): ERROR: xmit failed\n", __func__); 1058 pr_debug("ERROR: xmit failed\n");
1052 1059
1053 return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK); 1060 return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK);
1054} 1061}
@@ -1083,7 +1090,7 @@ static struct header_ops lowpan_header_ops = {
1083 1090
1084static const struct net_device_ops lowpan_netdev_ops = { 1091static const struct net_device_ops lowpan_netdev_ops = {
1085 .ndo_start_xmit = lowpan_xmit, 1092 .ndo_start_xmit = lowpan_xmit,
1086 .ndo_set_mac_address = eth_mac_addr, 1093 .ndo_set_mac_address = lowpan_set_address,
1087}; 1094};
1088 1095
1089static struct ieee802154_mlme_ops lowpan_mlme = { 1096static struct ieee802154_mlme_ops lowpan_mlme = {
@@ -1094,8 +1101,6 @@ static struct ieee802154_mlme_ops lowpan_mlme = {
1094 1101
1095static void lowpan_setup(struct net_device *dev) 1102static void lowpan_setup(struct net_device *dev)
1096{ 1103{
1097 pr_debug("(%s)\n", __func__);
1098
1099 dev->addr_len = IEEE802154_ADDR_LEN; 1104 dev->addr_len = IEEE802154_ADDR_LEN;
1100 memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN); 1105 memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
1101 dev->type = ARPHRD_IEEE802154; 1106 dev->type = ARPHRD_IEEE802154;
@@ -1115,8 +1120,6 @@ static void lowpan_setup(struct net_device *dev)
1115 1120
1116static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[]) 1121static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
1117{ 1122{
1118 pr_debug("(%s)\n", __func__);
1119
1120 if (tb[IFLA_ADDRESS]) { 1123 if (tb[IFLA_ADDRESS]) {
1121 if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN) 1124 if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
1122 return -EINVAL; 1125 return -EINVAL;
@@ -1157,7 +1160,7 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
1157 struct net_device *real_dev; 1160 struct net_device *real_dev;
1158 struct lowpan_dev_record *entry; 1161 struct lowpan_dev_record *entry;
1159 1162
1160 pr_debug("(%s)\n", __func__); 1163 pr_debug("adding new link\n");
1161 1164
1162 if (!tb[IFLA_LINK]) 1165 if (!tb[IFLA_LINK])
1163 return -EINVAL; 1166 return -EINVAL;
@@ -1252,8 +1255,6 @@ static int __init lowpan_init_module(void)
1252{ 1255{
1253 int err = 0; 1256 int err = 0;
1254 1257
1255 pr_debug("(%s)\n", __func__);
1256
1257 err = lowpan_netlink_init(); 1258 err = lowpan_netlink_init();
1258 if (err < 0) 1259 if (err < 0)
1259 goto out; 1260 goto out;
@@ -1265,8 +1266,6 @@ out:
1265 1266
1266static void __exit lowpan_cleanup_module(void) 1267static void __exit lowpan_cleanup_module(void)
1267{ 1268{
1268 pr_debug("(%s)\n", __func__);
1269
1270 lowpan_netlink_fini(); 1269 lowpan_netlink_fini();
1271 1270
1272 dev_remove_pack(&lowpan_packet_type); 1271 dev_remove_pack(&lowpan_packet_type);
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
index c8097ae2482..97351e1d07a 100644
--- a/net/ieee802154/netlink.c
+++ b/net/ieee802154/netlink.c
@@ -44,7 +44,7 @@ struct genl_family nl802154_family = {
44struct sk_buff *ieee802154_nl_create(int flags, u8 req) 44struct sk_buff *ieee802154_nl_create(int flags, u8 req)
45{ 45{
46 void *hdr; 46 void *hdr;
47 struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 47 struct sk_buff *msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
48 unsigned long f; 48 unsigned long f;
49 49
50 if (!msg) 50 if (!msg)
@@ -80,7 +80,7 @@ struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info,
80 int flags, u8 req) 80 int flags, u8 req)
81{ 81{
82 void *hdr; 82 void *hdr;
83 struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 83 struct sk_buff *msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
84 84
85 if (!msg) 85 if (!msg)
86 return NULL; 86 return NULL;
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index ca92587720f..1e9917124e7 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -530,7 +530,7 @@ static int ieee802154_list_iface(struct sk_buff *skb,
530 if (!dev) 530 if (!dev)
531 return -ENODEV; 531 return -ENODEV;
532 532
533 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 533 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
534 if (!msg) 534 if (!msg)
535 goto out_dev; 535 goto out_dev;
536 536
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index eed291626da..d54be34cca9 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -101,7 +101,7 @@ static int ieee802154_list_phy(struct sk_buff *skb,
101 if (!phy) 101 if (!phy)
102 return -ENODEV; 102 return -ENODEV;
103 103
104 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 104 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
105 if (!msg) 105 if (!msg)
106 goto out_dev; 106 goto out_dev;
107 107
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index ff75d3bbcd6..5a23e8b3710 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -7,7 +7,7 @@ obj-y := route.o inetpeer.o protocol.o \
7 ip_output.o ip_sockglue.o inet_hashtables.o \ 7 ip_output.o ip_sockglue.o inet_hashtables.o \
8 inet_timewait_sock.o inet_connection_sock.o \ 8 inet_timewait_sock.o inet_connection_sock.o \
9 tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \ 9 tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \
10 tcp_minisocks.o tcp_cong.o \ 10 tcp_minisocks.o tcp_cong.o tcp_metrics.o \
11 datagram.o raw.o udp.o udplite.o \ 11 datagram.o raw.o udp.o udplite.o \
12 arp.o icmp.o devinet.o af_inet.o igmp.o \ 12 arp.o icmp.o devinet.o af_inet.o igmp.o \
13 fib_frontend.o fib_semantics.o fib_trie.o \ 13 fib_frontend.o fib_semantics.o fib_trie.o \
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index c8f7aee587d..07a02f6e969 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -157,6 +157,7 @@ void inet_sock_destruct(struct sock *sk)
157 157
158 kfree(rcu_dereference_protected(inet->inet_opt, 1)); 158 kfree(rcu_dereference_protected(inet->inet_opt, 1));
159 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1)); 159 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
160 dst_release(sk->sk_rx_dst);
160 sk_refcnt_debug_dec(sk); 161 sk_refcnt_debug_dec(sk);
161} 162}
162EXPORT_SYMBOL(inet_sock_destruct); 163EXPORT_SYMBOL(inet_sock_destruct);
@@ -242,20 +243,18 @@ void build_ehash_secret(void)
242} 243}
243EXPORT_SYMBOL(build_ehash_secret); 244EXPORT_SYMBOL(build_ehash_secret);
244 245
245static inline int inet_netns_ok(struct net *net, int protocol) 246static inline int inet_netns_ok(struct net *net, __u8 protocol)
246{ 247{
247 int hash;
248 const struct net_protocol *ipprot; 248 const struct net_protocol *ipprot;
249 249
250 if (net_eq(net, &init_net)) 250 if (net_eq(net, &init_net))
251 return 1; 251 return 1;
252 252
253 hash = protocol & (MAX_INET_PROTOS - 1); 253 ipprot = rcu_dereference(inet_protos[protocol]);
254 ipprot = rcu_dereference(inet_protos[hash]); 254 if (ipprot == NULL) {
255
256 if (ipprot == NULL)
257 /* raw IP is OK */ 255 /* raw IP is OK */
258 return 1; 256 return 1;
257 }
259 return ipprot->netns_ok; 258 return ipprot->netns_ok;
260} 259}
261 260
@@ -553,7 +552,7 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
553 552
554 if (!inet_sk(sk)->inet_num && inet_autobind(sk)) 553 if (!inet_sk(sk)->inet_num && inet_autobind(sk))
555 return -EAGAIN; 554 return -EAGAIN;
556 return sk->sk_prot->connect(sk, (struct sockaddr *)uaddr, addr_len); 555 return sk->sk_prot->connect(sk, uaddr, addr_len);
557} 556}
558EXPORT_SYMBOL(inet_dgram_connect); 557EXPORT_SYMBOL(inet_dgram_connect);
559 558
@@ -1216,8 +1215,8 @@ EXPORT_SYMBOL(inet_sk_rebuild_header);
1216 1215
1217static int inet_gso_send_check(struct sk_buff *skb) 1216static int inet_gso_send_check(struct sk_buff *skb)
1218{ 1217{
1219 const struct iphdr *iph;
1220 const struct net_protocol *ops; 1218 const struct net_protocol *ops;
1219 const struct iphdr *iph;
1221 int proto; 1220 int proto;
1222 int ihl; 1221 int ihl;
1223 int err = -EINVAL; 1222 int err = -EINVAL;
@@ -1236,7 +1235,7 @@ static int inet_gso_send_check(struct sk_buff *skb)
1236 __skb_pull(skb, ihl); 1235 __skb_pull(skb, ihl);
1237 skb_reset_transport_header(skb); 1236 skb_reset_transport_header(skb);
1238 iph = ip_hdr(skb); 1237 iph = ip_hdr(skb);
1239 proto = iph->protocol & (MAX_INET_PROTOS - 1); 1238 proto = iph->protocol;
1240 err = -EPROTONOSUPPORT; 1239 err = -EPROTONOSUPPORT;
1241 1240
1242 rcu_read_lock(); 1241 rcu_read_lock();
@@ -1253,8 +1252,8 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1253 netdev_features_t features) 1252 netdev_features_t features)
1254{ 1253{
1255 struct sk_buff *segs = ERR_PTR(-EINVAL); 1254 struct sk_buff *segs = ERR_PTR(-EINVAL);
1256 struct iphdr *iph;
1257 const struct net_protocol *ops; 1255 const struct net_protocol *ops;
1256 struct iphdr *iph;
1258 int proto; 1257 int proto;
1259 int ihl; 1258 int ihl;
1260 int id; 1259 int id;
@@ -1286,7 +1285,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1286 skb_reset_transport_header(skb); 1285 skb_reset_transport_header(skb);
1287 iph = ip_hdr(skb); 1286 iph = ip_hdr(skb);
1288 id = ntohs(iph->id); 1287 id = ntohs(iph->id);
1289 proto = iph->protocol & (MAX_INET_PROTOS - 1); 1288 proto = iph->protocol;
1290 segs = ERR_PTR(-EPROTONOSUPPORT); 1289 segs = ERR_PTR(-EPROTONOSUPPORT);
1291 1290
1292 rcu_read_lock(); 1291 rcu_read_lock();
@@ -1340,7 +1339,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1340 goto out; 1339 goto out;
1341 } 1340 }
1342 1341
1343 proto = iph->protocol & (MAX_INET_PROTOS - 1); 1342 proto = iph->protocol;
1344 1343
1345 rcu_read_lock(); 1344 rcu_read_lock();
1346 ops = rcu_dereference(inet_protos[proto]); 1345 ops = rcu_dereference(inet_protos[proto]);
@@ -1398,11 +1397,11 @@ out:
1398 1397
1399static int inet_gro_complete(struct sk_buff *skb) 1398static int inet_gro_complete(struct sk_buff *skb)
1400{ 1399{
1401 const struct net_protocol *ops; 1400 __be16 newlen = htons(skb->len - skb_network_offset(skb));
1402 struct iphdr *iph = ip_hdr(skb); 1401 struct iphdr *iph = ip_hdr(skb);
1403 int proto = iph->protocol & (MAX_INET_PROTOS - 1); 1402 const struct net_protocol *ops;
1403 int proto = iph->protocol;
1404 int err = -ENOSYS; 1404 int err = -ENOSYS;
1405 __be16 newlen = htons(skb->len - skb_network_offset(skb));
1406 1405
1407 csum_replace2(&iph->check, iph->tot_len, newlen); 1406 csum_replace2(&iph->check, iph->tot_len, newlen);
1408 iph->tot_len = newlen; 1407 iph->tot_len = newlen;
@@ -1520,14 +1519,15 @@ static const struct net_protocol igmp_protocol = {
1520#endif 1519#endif
1521 1520
1522static const struct net_protocol tcp_protocol = { 1521static const struct net_protocol tcp_protocol = {
1523 .handler = tcp_v4_rcv, 1522 .early_demux = tcp_v4_early_demux,
1524 .err_handler = tcp_v4_err, 1523 .handler = tcp_v4_rcv,
1525 .gso_send_check = tcp_v4_gso_send_check, 1524 .err_handler = tcp_v4_err,
1526 .gso_segment = tcp_tso_segment, 1525 .gso_send_check = tcp_v4_gso_send_check,
1527 .gro_receive = tcp4_gro_receive, 1526 .gso_segment = tcp_tso_segment,
1528 .gro_complete = tcp4_gro_complete, 1527 .gro_receive = tcp4_gro_receive,
1529 .no_policy = 1, 1528 .gro_complete = tcp4_gro_complete,
1530 .netns_ok = 1, 1529 .no_policy = 1,
1530 .netns_ok = 1,
1531}; 1531};
1532 1532
1533static const struct net_protocol udp_protocol = { 1533static const struct net_protocol udp_protocol = {
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index e8f2617ecd4..916d5ecaf6c 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -408,6 +408,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
408 return; 408 return;
409 pr_debug("pmtu discovery on SA AH/%08x/%08x\n", 409 pr_debug("pmtu discovery on SA AH/%08x/%08x\n",
410 ntohl(ah->spi), ntohl(iph->daddr)); 410 ntohl(ah->spi), ntohl(iph->daddr));
411 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
411 xfrm_state_put(x); 412 xfrm_state_put(x);
412} 413}
413 414
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index cda37be02f8..2e560f0c757 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -790,7 +790,8 @@ static int arp_process(struct sk_buff *skb)
790 * Check for bad requests for 127.x.x.x and requests for multicast 790 * Check for bad requests for 127.x.x.x and requests for multicast
791 * addresses. If this is one such, delete it. 791 * addresses. If this is one such, delete it.
792 */ 792 */
793 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip)) 793 if (ipv4_is_multicast(tip) ||
794 (!IN_DEV_ROUTE_LOCALNET(in_dev) && ipv4_is_loopback(tip)))
794 goto out; 795 goto out;
795 796
796/* 797/*
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 10e15a144e9..44bf82e3aef 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1500,7 +1500,8 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
1500 1500
1501 if (cnf == net->ipv4.devconf_dflt) 1501 if (cnf == net->ipv4.devconf_dflt)
1502 devinet_copy_dflt_conf(net, i); 1502 devinet_copy_dflt_conf(net, i);
1503 if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1) 1503 if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 ||
1504 i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
1504 if ((new_value == 0) && (old_value != 0)) 1505 if ((new_value == 0) && (old_value != 0))
1505 rt_cache_flush(net, 0); 1506 rt_cache_flush(net, 0);
1506 } 1507 }
@@ -1617,6 +1618,8 @@ static struct devinet_sysctl_table {
1617 "force_igmp_version"), 1618 "force_igmp_version"),
1618 DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES, 1619 DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
1619 "promote_secondaries"), 1620 "promote_secondaries"),
1621 DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
1622 "route_localnet"),
1620 }, 1623 },
1621}; 1624};
1622 1625
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index cb982a61536..7b95b49a36c 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -494,6 +494,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
494 return; 494 return;
495 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n", 495 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n",
496 ntohl(esph->spi), ntohl(iph->daddr)); 496 ntohl(esph->spi), ntohl(iph->daddr));
497 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
497 xfrm_state_put(x); 498 xfrm_state_put(x);
498} 499}
499 500
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 3854411fa37..81f85716a89 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -31,6 +31,7 @@
31#include <linux/if_addr.h> 31#include <linux/if_addr.h>
32#include <linux/if_arp.h> 32#include <linux/if_arp.h>
33#include <linux/skbuff.h> 33#include <linux/skbuff.h>
34#include <linux/cache.h>
34#include <linux/init.h> 35#include <linux/init.h>
35#include <linux/list.h> 36#include <linux/list.h>
36#include <linux/slab.h> 37#include <linux/slab.h>
@@ -85,6 +86,24 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
85 tb = fib_trie_table(id); 86 tb = fib_trie_table(id);
86 if (!tb) 87 if (!tb)
87 return NULL; 88 return NULL;
89
90 switch (id) {
91 case RT_TABLE_LOCAL:
92 net->ipv4.fib_local = tb;
93 break;
94
95 case RT_TABLE_MAIN:
96 net->ipv4.fib_main = tb;
97 break;
98
99 case RT_TABLE_DEFAULT:
100 net->ipv4.fib_default = tb;
101 break;
102
103 default:
104 break;
105 }
106
88 h = id & (FIB_TABLE_HASHSZ - 1); 107 h = id & (FIB_TABLE_HASHSZ - 1);
89 hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]); 108 hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]);
90 return tb; 109 return tb;
@@ -180,6 +199,43 @@ unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
180} 199}
181EXPORT_SYMBOL(inet_dev_addr_type); 200EXPORT_SYMBOL(inet_dev_addr_type);
182 201
202__be32 fib_compute_spec_dst(struct sk_buff *skb)
203{
204 struct net_device *dev = skb->dev;
205 struct in_device *in_dev;
206 struct fib_result res;
207 struct rtable *rt;
208 struct flowi4 fl4;
209 struct net *net;
210 int scope;
211
212 rt = skb_rtable(skb);
213 if (!(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)))
214 return ip_hdr(skb)->daddr;
215
216 in_dev = __in_dev_get_rcu(dev);
217 BUG_ON(!in_dev);
218
219 net = dev_net(dev);
220
221 scope = RT_SCOPE_UNIVERSE;
222 if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
223 fl4.flowi4_oif = 0;
224 fl4.flowi4_iif = net->loopback_dev->ifindex;
225 fl4.daddr = ip_hdr(skb)->saddr;
226 fl4.saddr = 0;
227 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
228 fl4.flowi4_scope = scope;
229 fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
230 if (!fib_lookup(net, &fl4, &res))
231 return FIB_RES_PREFSRC(net, res);
232 } else {
233 scope = RT_SCOPE_LINK;
234 }
235
236 return inet_select_addr(dev, ip_hdr(skb)->saddr, scope);
237}
238
183/* Given (packet source, input interface) and optional (dst, oif, tos): 239/* Given (packet source, input interface) and optional (dst, oif, tos):
184 * - (main) check, that source is valid i.e. not broadcast or our local 240 * - (main) check, that source is valid i.e. not broadcast or our local
185 * address. 241 * address.
@@ -188,17 +244,15 @@ EXPORT_SYMBOL(inet_dev_addr_type);
188 * - check, that packet arrived from expected physical interface. 244 * - check, that packet arrived from expected physical interface.
189 * called with rcu_read_lock() 245 * called with rcu_read_lock()
190 */ 246 */
191int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos, 247static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
192 int oif, struct net_device *dev, __be32 *spec_dst, 248 u8 tos, int oif, struct net_device *dev,
193 u32 *itag) 249 int rpf, struct in_device *idev, u32 *itag)
194{ 250{
195 struct in_device *in_dev; 251 int ret, no_addr, accept_local;
196 struct flowi4 fl4;
197 struct fib_result res; 252 struct fib_result res;
198 int no_addr, rpf, accept_local; 253 struct flowi4 fl4;
199 bool dev_match;
200 int ret;
201 struct net *net; 254 struct net *net;
255 bool dev_match;
202 256
203 fl4.flowi4_oif = 0; 257 fl4.flowi4_oif = 0;
204 fl4.flowi4_iif = oif; 258 fl4.flowi4_iif = oif;
@@ -207,20 +261,11 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos,
207 fl4.flowi4_tos = tos; 261 fl4.flowi4_tos = tos;
208 fl4.flowi4_scope = RT_SCOPE_UNIVERSE; 262 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
209 263
210 no_addr = rpf = accept_local = 0; 264 no_addr = accept_local = 0;
211 in_dev = __in_dev_get_rcu(dev); 265 no_addr = idev->ifa_list == NULL;
212 if (in_dev) {
213 no_addr = in_dev->ifa_list == NULL;
214
215 /* Ignore rp_filter for packets protected by IPsec. */
216 rpf = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(in_dev);
217 266
218 accept_local = IN_DEV_ACCEPT_LOCAL(in_dev); 267 accept_local = IN_DEV_ACCEPT_LOCAL(idev);
219 fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0; 268 fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0;
220 }
221
222 if (in_dev == NULL)
223 goto e_inval;
224 269
225 net = dev_net(dev); 270 net = dev_net(dev);
226 if (fib_lookup(net, &fl4, &res)) 271 if (fib_lookup(net, &fl4, &res))
@@ -229,7 +274,6 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos,
229 if (res.type != RTN_LOCAL || !accept_local) 274 if (res.type != RTN_LOCAL || !accept_local)
230 goto e_inval; 275 goto e_inval;
231 } 276 }
232 *spec_dst = FIB_RES_PREFSRC(net, res);
233 fib_combine_itag(itag, &res); 277 fib_combine_itag(itag, &res);
234 dev_match = false; 278 dev_match = false;
235 279
@@ -258,17 +302,14 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos,
258 302
259 ret = 0; 303 ret = 0;
260 if (fib_lookup(net, &fl4, &res) == 0) { 304 if (fib_lookup(net, &fl4, &res) == 0) {
261 if (res.type == RTN_UNICAST) { 305 if (res.type == RTN_UNICAST)
262 *spec_dst = FIB_RES_PREFSRC(net, res);
263 ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST; 306 ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
264 }
265 } 307 }
266 return ret; 308 return ret;
267 309
268last_resort: 310last_resort:
269 if (rpf) 311 if (rpf)
270 goto e_rpf; 312 goto e_rpf;
271 *spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
272 *itag = 0; 313 *itag = 0;
273 return 0; 314 return 0;
274 315
@@ -278,6 +319,20 @@ e_rpf:
278 return -EXDEV; 319 return -EXDEV;
279} 320}
280 321
322/* Ignore rp_filter for packets protected by IPsec. */
323int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
324 u8 tos, int oif, struct net_device *dev,
325 struct in_device *idev, u32 *itag)
326{
327 int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev);
328
329 if (!r && !fib_num_tclassid_users(dev_net(dev))) {
330 *itag = 0;
331 return 0;
332 }
333 return __fib_validate_source(skb, src, dst, tos, oif, dev, r, idev, itag);
334}
335
281static inline __be32 sk_extract_addr(struct sockaddr *addr) 336static inline __be32 sk_extract_addr(struct sockaddr *addr)
282{ 337{
283 return ((struct sockaddr_in *) addr)->sin_addr.s_addr; 338 return ((struct sockaddr_in *) addr)->sin_addr.s_addr;
@@ -935,8 +990,11 @@ static void nl_fib_input(struct sk_buff *skb)
935static int __net_init nl_fib_lookup_init(struct net *net) 990static int __net_init nl_fib_lookup_init(struct net *net)
936{ 991{
937 struct sock *sk; 992 struct sock *sk;
938 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, 0, 993 struct netlink_kernel_cfg cfg = {
939 nl_fib_input, NULL, THIS_MODULE); 994 .input = nl_fib_input,
995 };
996
997 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, THIS_MODULE, &cfg);
940 if (sk == NULL) 998 if (sk == NULL)
941 return -EAFNOSUPPORT; 999 return -EAFNOSUPPORT;
942 net->ipv4.fibnl = sk; 1000 net->ipv4.fibnl = sk;
@@ -1090,6 +1148,9 @@ static int __net_init fib_net_init(struct net *net)
1090{ 1148{
1091 int error; 1149 int error;
1092 1150
1151#ifdef CONFIG_IP_ROUTE_CLASSID
1152 net->ipv4.fib_num_tclassid_users = 0;
1153#endif
1093 error = ip_fib_net_init(net); 1154 error = ip_fib_net_init(net);
1094 if (error < 0) 1155 if (error < 0)
1095 goto out; 1156 goto out;
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 2d043f71ef7..c06da93b0b7 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -54,7 +54,7 @@ u32 fib_rules_tclass(const struct fib_result *res)
54} 54}
55#endif 55#endif
56 56
57int fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res) 57int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res)
58{ 58{
59 struct fib_lookup_arg arg = { 59 struct fib_lookup_arg arg = {
60 .result = res, 60 .result = res,
@@ -67,7 +67,7 @@ int fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res)
67 67
68 return err; 68 return err;
69} 69}
70EXPORT_SYMBOL_GPL(fib_lookup); 70EXPORT_SYMBOL_GPL(__fib_lookup);
71 71
72static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp, 72static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp,
73 int flags, struct fib_lookup_arg *arg) 73 int flags, struct fib_lookup_arg *arg)
@@ -169,8 +169,11 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
169 rule4->dst = nla_get_be32(tb[FRA_DST]); 169 rule4->dst = nla_get_be32(tb[FRA_DST]);
170 170
171#ifdef CONFIG_IP_ROUTE_CLASSID 171#ifdef CONFIG_IP_ROUTE_CLASSID
172 if (tb[FRA_FLOW]) 172 if (tb[FRA_FLOW]) {
173 rule4->tclassid = nla_get_u32(tb[FRA_FLOW]); 173 rule4->tclassid = nla_get_u32(tb[FRA_FLOW]);
174 if (rule4->tclassid)
175 net->ipv4.fib_num_tclassid_users++;
176 }
174#endif 177#endif
175 178
176 rule4->src_len = frh->src_len; 179 rule4->src_len = frh->src_len;
@@ -179,11 +182,24 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
179 rule4->dstmask = inet_make_mask(rule4->dst_len); 182 rule4->dstmask = inet_make_mask(rule4->dst_len);
180 rule4->tos = frh->tos; 183 rule4->tos = frh->tos;
181 184
185 net->ipv4.fib_has_custom_rules = true;
182 err = 0; 186 err = 0;
183errout: 187errout:
184 return err; 188 return err;
185} 189}
186 190
191static void fib4_rule_delete(struct fib_rule *rule)
192{
193 struct net *net = rule->fr_net;
194#ifdef CONFIG_IP_ROUTE_CLASSID
195 struct fib4_rule *rule4 = (struct fib4_rule *) rule;
196
197 if (rule4->tclassid)
198 net->ipv4.fib_num_tclassid_users--;
199#endif
200 net->ipv4.fib_has_custom_rules = true;
201}
202
187static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, 203static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
188 struct nlattr **tb) 204 struct nlattr **tb)
189{ 205{
@@ -256,6 +272,7 @@ static const struct fib_rules_ops __net_initdata fib4_rules_ops_template = {
256 .action = fib4_rule_action, 272 .action = fib4_rule_action,
257 .match = fib4_rule_match, 273 .match = fib4_rule_match,
258 .configure = fib4_rule_configure, 274 .configure = fib4_rule_configure,
275 .delete = fib4_rule_delete,
259 .compare = fib4_rule_compare, 276 .compare = fib4_rule_compare,
260 .fill = fib4_rule_fill, 277 .fill = fib4_rule_fill,
261 .default_pref = fib_default_rule_pref, 278 .default_pref = fib_default_rule_pref,
@@ -295,6 +312,7 @@ int __net_init fib4_rules_init(struct net *net)
295 if (err < 0) 312 if (err < 0)
296 goto fail; 313 goto fail;
297 net->ipv4.rules_ops = ops; 314 net->ipv4.rules_ops = ops;
315 net->ipv4.fib_has_custom_rules = false;
298 return 0; 316 return 0;
299 317
300fail: 318fail:
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index e5b7182fa09..d71bfbdc0bf 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -163,6 +163,12 @@ void free_fib_info(struct fib_info *fi)
163 return; 163 return;
164 } 164 }
165 fib_info_cnt--; 165 fib_info_cnt--;
166#ifdef CONFIG_IP_ROUTE_CLASSID
167 change_nexthops(fi) {
168 if (nexthop_nh->nh_tclassid)
169 fi->fib_net->ipv4.fib_num_tclassid_users--;
170 } endfor_nexthops(fi);
171#endif
166 call_rcu(&fi->rcu, free_fib_info_rcu); 172 call_rcu(&fi->rcu, free_fib_info_rcu);
167} 173}
168 174
@@ -421,6 +427,8 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
421#ifdef CONFIG_IP_ROUTE_CLASSID 427#ifdef CONFIG_IP_ROUTE_CLASSID
422 nla = nla_find(attrs, attrlen, RTA_FLOW); 428 nla = nla_find(attrs, attrlen, RTA_FLOW);
423 nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0; 429 nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
430 if (nexthop_nh->nh_tclassid)
431 fi->fib_net->ipv4.fib_num_tclassid_users++;
424#endif 432#endif
425 } 433 }
426 434
@@ -779,9 +787,16 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
779 int type = nla_type(nla); 787 int type = nla_type(nla);
780 788
781 if (type) { 789 if (type) {
790 u32 val;
791
782 if (type > RTAX_MAX) 792 if (type > RTAX_MAX)
783 goto err_inval; 793 goto err_inval;
784 fi->fib_metrics[type - 1] = nla_get_u32(nla); 794 val = nla_get_u32(nla);
795 if (type == RTAX_ADVMSS && val > 65535 - 40)
796 val = 65535 - 40;
797 if (type == RTAX_MTU && val > 65535 - 15)
798 val = 65535 - 15;
799 fi->fib_metrics[type - 1] = val;
785 } 800 }
786 } 801 }
787 } 802 }
@@ -810,6 +825,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
810 nh->nh_flags = cfg->fc_flags; 825 nh->nh_flags = cfg->fc_flags;
811#ifdef CONFIG_IP_ROUTE_CLASSID 826#ifdef CONFIG_IP_ROUTE_CLASSID
812 nh->nh_tclassid = cfg->fc_flow; 827 nh->nh_tclassid = cfg->fc_flow;
828 if (nh->nh_tclassid)
829 fi->fib_net->ipv4.fib_num_tclassid_users++;
813#endif 830#endif
814#ifdef CONFIG_IP_ROUTE_MULTIPATH 831#ifdef CONFIG_IP_ROUTE_MULTIPATH
815 nh->nh_weight = 1; 832 nh->nh_weight = 1;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 30b88d7b4bd..9b0f25930fb 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1007,9 +1007,9 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
1007 while (tn != NULL && (tp = node_parent((struct rt_trie_node *)tn)) != NULL) { 1007 while (tn != NULL && (tp = node_parent((struct rt_trie_node *)tn)) != NULL) {
1008 cindex = tkey_extract_bits(key, tp->pos, tp->bits); 1008 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1009 wasfull = tnode_full(tp, tnode_get_child(tp, cindex)); 1009 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
1010 tn = (struct tnode *) resize(t, (struct tnode *)tn); 1010 tn = (struct tnode *)resize(t, tn);
1011 1011
1012 tnode_put_child_reorg((struct tnode *)tp, cindex, 1012 tnode_put_child_reorg(tp, cindex,
1013 (struct rt_trie_node *)tn, wasfull); 1013 (struct rt_trie_node *)tn, wasfull);
1014 1014
1015 tp = node_parent((struct rt_trie_node *) tn); 1015 tp = node_parent((struct rt_trie_node *) tn);
@@ -1024,7 +1024,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
1024 1024
1025 /* Handle last (top) tnode */ 1025 /* Handle last (top) tnode */
1026 if (IS_TNODE(tn)) 1026 if (IS_TNODE(tn))
1027 tn = (struct tnode *)resize(t, (struct tnode *)tn); 1027 tn = (struct tnode *)resize(t, tn);
1028 1028
1029 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn); 1029 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
1030 tnode_free_flush(); 1030 tnode_free_flush();
@@ -1125,7 +1125,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
1125 node_set_parent((struct rt_trie_node *)l, tp); 1125 node_set_parent((struct rt_trie_node *)l, tp);
1126 1126
1127 cindex = tkey_extract_bits(key, tp->pos, tp->bits); 1127 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1128 put_child(t, (struct tnode *)tp, cindex, (struct rt_trie_node *)l); 1128 put_child(t, tp, cindex, (struct rt_trie_node *)l);
1129 } else { 1129 } else {
1130 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */ 1130 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
1131 /* 1131 /*
@@ -1160,8 +1160,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
1160 1160
1161 if (tp) { 1161 if (tp) {
1162 cindex = tkey_extract_bits(key, tp->pos, tp->bits); 1162 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1163 put_child(t, (struct tnode *)tp, cindex, 1163 put_child(t, tp, cindex, (struct rt_trie_node *)tn);
1164 (struct rt_trie_node *)tn);
1165 } else { 1164 } else {
1166 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn); 1165 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
1167 tp = tn; 1166 tp = tn;
@@ -1620,7 +1619,7 @@ static void trie_leaf_remove(struct trie *t, struct leaf *l)
1620 1619
1621 if (tp) { 1620 if (tp) {
1622 t_key cindex = tkey_extract_bits(l->key, tp->pos, tp->bits); 1621 t_key cindex = tkey_extract_bits(l->key, tp->pos, tp->bits);
1623 put_child(t, (struct tnode *)tp, cindex, NULL); 1622 put_child(t, tp, cindex, NULL);
1624 trie_rebalance(t, tp); 1623 trie_rebalance(t, tp);
1625 } else 1624 } else
1626 RCU_INIT_POINTER(t->trie, NULL); 1625 RCU_INIT_POINTER(t->trie, NULL);
@@ -1844,6 +1843,8 @@ int fib_table_flush(struct fib_table *tb)
1844 if (ll && hlist_empty(&ll->list)) 1843 if (ll && hlist_empty(&ll->list))
1845 trie_leaf_remove(t, ll); 1844 trie_leaf_remove(t, ll);
1846 1845
1846 inetpeer_invalidate_tree(&tb->tb_peers);
1847
1847 pr_debug("trie_flush found=%d\n", found); 1848 pr_debug("trie_flush found=%d\n", found);
1848 return found; 1849 return found;
1849} 1850}
@@ -1992,6 +1993,7 @@ struct fib_table *fib_trie_table(u32 id)
1992 tb->tb_id = id; 1993 tb->tb_id = id;
1993 tb->tb_default = -1; 1994 tb->tb_default = -1;
1994 tb->tb_num_default = 0; 1995 tb->tb_num_default = 0;
1996 inet_peer_base_init(&tb->tb_peers);
1995 1997
1996 t = (struct trie *) tb->tb_data; 1998 t = (struct trie *) tb->tb_data;
1997 memset(t, 0, sizeof(*t)); 1999 memset(t, 0, sizeof(*t));
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index c75efbdc71c..4a049449305 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -95,6 +95,7 @@
95#include <net/checksum.h> 95#include <net/checksum.h>
96#include <net/xfrm.h> 96#include <net/xfrm.h>
97#include <net/inet_common.h> 97#include <net/inet_common.h>
98#include <net/ip_fib.h>
98 99
99/* 100/*
100 * Build xmit assembly blocks 101 * Build xmit assembly blocks
@@ -253,10 +254,10 @@ static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
253 254
254 /* Limit if icmp type is enabled in ratemask. */ 255 /* Limit if icmp type is enabled in ratemask. */
255 if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) { 256 if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) {
256 if (!rt->peer) 257 struct inet_peer *peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, 1);
257 rt_bind_peer(rt, fl4->daddr, 1); 258 rc = inet_peer_xrlim_allow(peer,
258 rc = inet_peer_xrlim_allow(rt->peer,
259 net->ipv4.sysctl_icmp_ratelimit); 259 net->ipv4.sysctl_icmp_ratelimit);
260 inet_putpeer(peer);
260 } 261 }
261out: 262out:
262 return rc; 263 return rc;
@@ -334,7 +335,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
334 struct flowi4 fl4; 335 struct flowi4 fl4;
335 struct sock *sk; 336 struct sock *sk;
336 struct inet_sock *inet; 337 struct inet_sock *inet;
337 __be32 daddr; 338 __be32 daddr, saddr;
338 339
339 if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb)) 340 if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb))
340 return; 341 return;
@@ -348,6 +349,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
348 349
349 inet->tos = ip_hdr(skb)->tos; 350 inet->tos = ip_hdr(skb)->tos;
350 daddr = ipc.addr = ip_hdr(skb)->saddr; 351 daddr = ipc.addr = ip_hdr(skb)->saddr;
352 saddr = fib_compute_spec_dst(skb);
351 ipc.opt = NULL; 353 ipc.opt = NULL;
352 ipc.tx_flags = 0; 354 ipc.tx_flags = 0;
353 if (icmp_param->replyopts.opt.opt.optlen) { 355 if (icmp_param->replyopts.opt.opt.optlen) {
@@ -357,7 +359,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
357 } 359 }
358 memset(&fl4, 0, sizeof(fl4)); 360 memset(&fl4, 0, sizeof(fl4));
359 fl4.daddr = daddr; 361 fl4.daddr = daddr;
360 fl4.saddr = rt->rt_spec_dst; 362 fl4.saddr = saddr;
361 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); 363 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
362 fl4.flowi4_proto = IPPROTO_ICMP; 364 fl4.flowi4_proto = IPPROTO_ICMP;
363 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); 365 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
@@ -638,12 +640,12 @@ EXPORT_SYMBOL(icmp_send);
638 640
639static void icmp_unreach(struct sk_buff *skb) 641static void icmp_unreach(struct sk_buff *skb)
640{ 642{
643 const struct net_protocol *ipprot;
641 const struct iphdr *iph; 644 const struct iphdr *iph;
642 struct icmphdr *icmph; 645 struct icmphdr *icmph;
643 int hash, protocol;
644 const struct net_protocol *ipprot;
645 u32 info = 0;
646 struct net *net; 646 struct net *net;
647 u32 info = 0;
648 int protocol;
647 649
648 net = dev_net(skb_dst(skb)->dev); 650 net = dev_net(skb_dst(skb)->dev);
649 651
@@ -674,9 +676,7 @@ static void icmp_unreach(struct sk_buff *skb)
674 LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: fragmentation needed and DF set\n"), 676 LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: fragmentation needed and DF set\n"),
675 &iph->daddr); 677 &iph->daddr);
676 } else { 678 } else {
677 info = ip_rt_frag_needed(net, iph, 679 info = ntohs(icmph->un.frag.mtu);
678 ntohs(icmph->un.frag.mtu),
679 skb->dev);
680 if (!info) 680 if (!info)
681 goto out; 681 goto out;
682 } 682 }
@@ -734,9 +734,8 @@ static void icmp_unreach(struct sk_buff *skb)
734 */ 734 */
735 raw_icmp_error(skb, protocol, info); 735 raw_icmp_error(skb, protocol, info);
736 736
737 hash = protocol & (MAX_INET_PROTOS - 1);
738 rcu_read_lock(); 737 rcu_read_lock();
739 ipprot = rcu_dereference(inet_protos[hash]); 738 ipprot = rcu_dereference(inet_protos[protocol]);
740 if (ipprot && ipprot->err_handler) 739 if (ipprot && ipprot->err_handler)
741 ipprot->err_handler(skb, info); 740 ipprot->err_handler(skb, info);
742 rcu_read_unlock(); 741 rcu_read_unlock();
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index f9ee7417f6a..76825be3b64 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -368,17 +368,21 @@ EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
368 368
369struct dst_entry *inet_csk_route_req(struct sock *sk, 369struct dst_entry *inet_csk_route_req(struct sock *sk,
370 struct flowi4 *fl4, 370 struct flowi4 *fl4,
371 const struct request_sock *req) 371 const struct request_sock *req,
372 bool nocache)
372{ 373{
373 struct rtable *rt; 374 struct rtable *rt;
374 const struct inet_request_sock *ireq = inet_rsk(req); 375 const struct inet_request_sock *ireq = inet_rsk(req);
375 struct ip_options_rcu *opt = inet_rsk(req)->opt; 376 struct ip_options_rcu *opt = inet_rsk(req)->opt;
376 struct net *net = sock_net(sk); 377 struct net *net = sock_net(sk);
378 int flags = inet_sk_flowi_flags(sk);
377 379
380 if (nocache)
381 flags |= FLOWI_FLAG_RT_NOCACHE;
378 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 382 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
379 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 383 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
380 sk->sk_protocol, 384 sk->sk_protocol,
381 inet_sk_flowi_flags(sk) & ~FLOWI_FLAG_PRECOW_METRICS, 385 flags,
382 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr, 386 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
383 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport); 387 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
384 security_req_classify_flow(req, flowi4_to_flowi(fl4)); 388 security_req_classify_flow(req, flowi4_to_flowi(fl4));
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 46d1e7199a8..38064a285cc 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -46,9 +46,6 @@ struct inet_diag_entry {
46 u16 userlocks; 46 u16 userlocks;
47}; 47};
48 48
49#define INET_DIAG_PUT(skb, attrtype, attrlen) \
50 RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
51
52static DEFINE_MUTEX(inet_diag_table_mutex); 49static DEFINE_MUTEX(inet_diag_table_mutex);
53 50
54static const struct inet_diag_handler *inet_diag_lock_handler(int proto) 51static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
@@ -78,24 +75,22 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
78 const struct inet_sock *inet = inet_sk(sk); 75 const struct inet_sock *inet = inet_sk(sk);
79 struct inet_diag_msg *r; 76 struct inet_diag_msg *r;
80 struct nlmsghdr *nlh; 77 struct nlmsghdr *nlh;
78 struct nlattr *attr;
81 void *info = NULL; 79 void *info = NULL;
82 struct inet_diag_meminfo *minfo = NULL;
83 unsigned char *b = skb_tail_pointer(skb);
84 const struct inet_diag_handler *handler; 80 const struct inet_diag_handler *handler;
85 int ext = req->idiag_ext; 81 int ext = req->idiag_ext;
86 82
87 handler = inet_diag_table[req->sdiag_protocol]; 83 handler = inet_diag_table[req->sdiag_protocol];
88 BUG_ON(handler == NULL); 84 BUG_ON(handler == NULL);
89 85
90 nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r)); 86 nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r),
91 nlh->nlmsg_flags = nlmsg_flags; 87 nlmsg_flags);
88 if (!nlh)
89 return -EMSGSIZE;
92 90
93 r = NLMSG_DATA(nlh); 91 r = nlmsg_data(nlh);
94 BUG_ON(sk->sk_state == TCP_TIME_WAIT); 92 BUG_ON(sk->sk_state == TCP_TIME_WAIT);
95 93
96 if (ext & (1 << (INET_DIAG_MEMINFO - 1)))
97 minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo));
98
99 r->idiag_family = sk->sk_family; 94 r->idiag_family = sk->sk_family;
100 r->idiag_state = sk->sk_state; 95 r->idiag_state = sk->sk_state;
101 r->idiag_timer = 0; 96 r->idiag_timer = 0;
@@ -113,7 +108,8 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
113 * hence this needs to be included regardless of socket family. 108 * hence this needs to be included regardless of socket family.
114 */ 109 */
115 if (ext & (1 << (INET_DIAG_TOS - 1))) 110 if (ext & (1 << (INET_DIAG_TOS - 1)))
116 RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos); 111 if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0)
112 goto errout;
117 113
118#if IS_ENABLED(CONFIG_IPV6) 114#if IS_ENABLED(CONFIG_IPV6)
119 if (r->idiag_family == AF_INET6) { 115 if (r->idiag_family == AF_INET6) {
@@ -121,24 +117,31 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
121 117
122 *(struct in6_addr *)r->id.idiag_src = np->rcv_saddr; 118 *(struct in6_addr *)r->id.idiag_src = np->rcv_saddr;
123 *(struct in6_addr *)r->id.idiag_dst = np->daddr; 119 *(struct in6_addr *)r->id.idiag_dst = np->daddr;
120
124 if (ext & (1 << (INET_DIAG_TCLASS - 1))) 121 if (ext & (1 << (INET_DIAG_TCLASS - 1)))
125 RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass); 122 if (nla_put_u8(skb, INET_DIAG_TCLASS, np->tclass) < 0)
123 goto errout;
126 } 124 }
127#endif 125#endif
128 126
129 r->idiag_uid = sock_i_uid(sk); 127 r->idiag_uid = sock_i_uid(sk);
130 r->idiag_inode = sock_i_ino(sk); 128 r->idiag_inode = sock_i_ino(sk);
131 129
132 if (minfo) { 130 if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
133 minfo->idiag_rmem = sk_rmem_alloc_get(sk); 131 struct inet_diag_meminfo minfo = {
134 minfo->idiag_wmem = sk->sk_wmem_queued; 132 .idiag_rmem = sk_rmem_alloc_get(sk),
135 minfo->idiag_fmem = sk->sk_forward_alloc; 133 .idiag_wmem = sk->sk_wmem_queued,
136 minfo->idiag_tmem = sk_wmem_alloc_get(sk); 134 .idiag_fmem = sk->sk_forward_alloc,
135 .idiag_tmem = sk_wmem_alloc_get(sk),
136 };
137
138 if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0)
139 goto errout;
137 } 140 }
138 141
139 if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) 142 if (ext & (1 << (INET_DIAG_SKMEMINFO - 1)))
140 if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO)) 143 if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
141 goto rtattr_failure; 144 goto errout;
142 145
143 if (icsk == NULL) { 146 if (icsk == NULL) {
144 handler->idiag_get_info(sk, r, NULL); 147 handler->idiag_get_info(sk, r, NULL);
@@ -165,16 +168,20 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
165 } 168 }
166#undef EXPIRES_IN_MS 169#undef EXPIRES_IN_MS
167 170
168 if (ext & (1 << (INET_DIAG_INFO - 1))) 171 if (ext & (1 << (INET_DIAG_INFO - 1))) {
169 info = INET_DIAG_PUT(skb, INET_DIAG_INFO, sizeof(struct tcp_info)); 172 attr = nla_reserve(skb, INET_DIAG_INFO,
173 sizeof(struct tcp_info));
174 if (!attr)
175 goto errout;
170 176
171 if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) { 177 info = nla_data(attr);
172 const size_t len = strlen(icsk->icsk_ca_ops->name);
173
174 strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
175 icsk->icsk_ca_ops->name);
176 } 178 }
177 179
180 if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops)
181 if (nla_put_string(skb, INET_DIAG_CONG,
182 icsk->icsk_ca_ops->name) < 0)
183 goto errout;
184
178 handler->idiag_get_info(sk, r, info); 185 handler->idiag_get_info(sk, r, info);
179 186
180 if (sk->sk_state < TCP_TIME_WAIT && 187 if (sk->sk_state < TCP_TIME_WAIT &&
@@ -182,12 +189,10 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
182 icsk->icsk_ca_ops->get_info(sk, ext, skb); 189 icsk->icsk_ca_ops->get_info(sk, ext, skb);
183 190
184out: 191out:
185 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 192 return nlmsg_end(skb, nlh);
186 return skb->len;
187 193
188rtattr_failure: 194errout:
189nlmsg_failure: 195 nlmsg_cancel(skb, nlh);
190 nlmsg_trim(skb, b);
191 return -EMSGSIZE; 196 return -EMSGSIZE;
192} 197}
193EXPORT_SYMBOL_GPL(inet_sk_diag_fill); 198EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
@@ -208,14 +213,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
208{ 213{
209 long tmo; 214 long tmo;
210 struct inet_diag_msg *r; 215 struct inet_diag_msg *r;
211 const unsigned char *previous_tail = skb_tail_pointer(skb); 216 struct nlmsghdr *nlh;
212 struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq,
213 unlh->nlmsg_type, sizeof(*r));
214 217
215 r = NLMSG_DATA(nlh); 218 nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r),
216 BUG_ON(tw->tw_state != TCP_TIME_WAIT); 219 nlmsg_flags);
220 if (!nlh)
221 return -EMSGSIZE;
217 222
218 nlh->nlmsg_flags = nlmsg_flags; 223 r = nlmsg_data(nlh);
224 BUG_ON(tw->tw_state != TCP_TIME_WAIT);
219 225
220 tmo = tw->tw_ttd - jiffies; 226 tmo = tw->tw_ttd - jiffies;
221 if (tmo < 0) 227 if (tmo < 0)
@@ -245,11 +251,8 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
245 *(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr; 251 *(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr;
246 } 252 }
247#endif 253#endif
248 nlh->nlmsg_len = skb_tail_pointer(skb) - previous_tail; 254
249 return skb->len; 255 return nlmsg_end(skb, nlh);
250nlmsg_failure:
251 nlmsg_trim(skb, previous_tail);
252 return -EMSGSIZE;
253} 256}
254 257
255static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, 258static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
@@ -298,20 +301,20 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
298 if (err) 301 if (err)
299 goto out; 302 goto out;
300 303
301 err = -ENOMEM; 304 rep = nlmsg_new(sizeof(struct inet_diag_msg) +
302 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) + 305 sizeof(struct inet_diag_meminfo) +
303 sizeof(struct inet_diag_meminfo) + 306 sizeof(struct tcp_info) + 64, GFP_KERNEL);
304 sizeof(struct tcp_info) + 64)), 307 if (!rep) {
305 GFP_KERNEL); 308 err = -ENOMEM;
306 if (!rep)
307 goto out; 309 goto out;
310 }
308 311
309 err = sk_diag_fill(sk, rep, req, 312 err = sk_diag_fill(sk, rep, req,
310 NETLINK_CB(in_skb).pid, 313 NETLINK_CB(in_skb).pid,
311 nlh->nlmsg_seq, 0, nlh); 314 nlh->nlmsg_seq, 0, nlh);
312 if (err < 0) { 315 if (err < 0) {
313 WARN_ON(err == -EMSGSIZE); 316 WARN_ON(err == -EMSGSIZE);
314 kfree_skb(rep); 317 nlmsg_free(rep);
315 goto out; 318 goto out;
316 } 319 }
317 err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid, 320 err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
@@ -592,15 +595,16 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
592{ 595{
593 const struct inet_request_sock *ireq = inet_rsk(req); 596 const struct inet_request_sock *ireq = inet_rsk(req);
594 struct inet_sock *inet = inet_sk(sk); 597 struct inet_sock *inet = inet_sk(sk);
595 unsigned char *b = skb_tail_pointer(skb);
596 struct inet_diag_msg *r; 598 struct inet_diag_msg *r;
597 struct nlmsghdr *nlh; 599 struct nlmsghdr *nlh;
598 long tmo; 600 long tmo;
599 601
600 nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r)); 602 nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r),
601 nlh->nlmsg_flags = NLM_F_MULTI; 603 NLM_F_MULTI);
602 r = NLMSG_DATA(nlh); 604 if (!nlh)
605 return -EMSGSIZE;
603 606
607 r = nlmsg_data(nlh);
604 r->idiag_family = sk->sk_family; 608 r->idiag_family = sk->sk_family;
605 r->idiag_state = TCP_SYN_RECV; 609 r->idiag_state = TCP_SYN_RECV;
606 r->idiag_timer = 1; 610 r->idiag_timer = 1;
@@ -628,13 +632,8 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
628 *(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr; 632 *(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr;
629 } 633 }
630#endif 634#endif
631 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
632
633 return skb->len;
634 635
635nlmsg_failure: 636 return nlmsg_end(skb, nlh);
636 nlmsg_trim(skb, b);
637 return -1;
638} 637}
639 638
640static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, 639static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
@@ -892,7 +891,7 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
892 if (nlmsg_attrlen(cb->nlh, hdrlen)) 891 if (nlmsg_attrlen(cb->nlh, hdrlen))
893 bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE); 892 bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
894 893
895 return __inet_diag_dump(skb, cb, (struct inet_diag_req_v2 *)NLMSG_DATA(cb->nlh), bc); 894 return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh), bc);
896} 895}
897 896
898static inline int inet_diag_type2proto(int type) 897static inline int inet_diag_type2proto(int type)
@@ -909,7 +908,7 @@ static inline int inet_diag_type2proto(int type)
909 908
910static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb) 909static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb)
911{ 910{
912 struct inet_diag_req *rc = NLMSG_DATA(cb->nlh); 911 struct inet_diag_req *rc = nlmsg_data(cb->nlh);
913 struct inet_diag_req_v2 req; 912 struct inet_diag_req_v2 req;
914 struct nlattr *bc = NULL; 913 struct nlattr *bc = NULL;
915 int hdrlen = sizeof(struct inet_diag_req); 914 int hdrlen = sizeof(struct inet_diag_req);
@@ -929,7 +928,7 @@ static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *c
929static int inet_diag_get_exact_compat(struct sk_buff *in_skb, 928static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
930 const struct nlmsghdr *nlh) 929 const struct nlmsghdr *nlh)
931{ 930{
932 struct inet_diag_req *rc = NLMSG_DATA(nlh); 931 struct inet_diag_req *rc = nlmsg_data(nlh);
933 struct inet_diag_req_v2 req; 932 struct inet_diag_req_v2 req;
934 933
935 req.sdiag_family = rc->idiag_family; 934 req.sdiag_family = rc->idiag_family;
@@ -996,7 +995,7 @@ static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
996 } 995 }
997 } 996 }
998 997
999 return inet_diag_get_exact(skb, h, (struct inet_diag_req_v2 *)NLMSG_DATA(h)); 998 return inet_diag_get_exact(skb, h, nlmsg_data(h));
1000} 999}
1001 1000
1002static const struct sock_diag_handler inet_diag_handler = { 1001static const struct sock_diag_handler inet_diag_handler = {
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 5ff2a51b6d0..85190e69297 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -243,12 +243,12 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
243 if (q == NULL) 243 if (q == NULL)
244 return NULL; 244 return NULL;
245 245
246 q->net = nf;
246 f->constructor(q, arg); 247 f->constructor(q, arg);
247 atomic_add(f->qsize, &nf->mem); 248 atomic_add(f->qsize, &nf->mem);
248 setup_timer(&q->timer, f->frag_expire, (unsigned long)q); 249 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
249 spin_lock_init(&q->lock); 250 spin_lock_init(&q->lock);
250 atomic_set(&q->refcnt, 1); 251 atomic_set(&q->refcnt, 1);
251 q->net = nf;
252 252
253 return q; 253 return q;
254} 254}
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index dfba343b250..e1e0a4e8fd3 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -82,23 +82,39 @@ static const struct inet_peer peer_fake_node = {
82 .avl_height = 0 82 .avl_height = 0
83}; 83};
84 84
85struct inet_peer_base { 85void inet_peer_base_init(struct inet_peer_base *bp)
86 struct inet_peer __rcu *root; 86{
87 seqlock_t lock; 87 bp->root = peer_avl_empty_rcu;
88 int total; 88 seqlock_init(&bp->lock);
89}; 89 bp->flush_seq = ~0U;
90 bp->total = 0;
91}
92EXPORT_SYMBOL_GPL(inet_peer_base_init);
90 93
91static struct inet_peer_base v4_peers = { 94static atomic_t v4_seq = ATOMIC_INIT(0);
92 .root = peer_avl_empty_rcu, 95static atomic_t v6_seq = ATOMIC_INIT(0);
93 .lock = __SEQLOCK_UNLOCKED(v4_peers.lock),
94 .total = 0,
95};
96 96
97static struct inet_peer_base v6_peers = { 97static atomic_t *inetpeer_seq_ptr(int family)
98 .root = peer_avl_empty_rcu, 98{
99 .lock = __SEQLOCK_UNLOCKED(v6_peers.lock), 99 return (family == AF_INET ? &v4_seq : &v6_seq);
100 .total = 0, 100}
101}; 101
102static inline void flush_check(struct inet_peer_base *base, int family)
103{
104 atomic_t *fp = inetpeer_seq_ptr(family);
105
106 if (unlikely(base->flush_seq != atomic_read(fp))) {
107 inetpeer_invalidate_tree(base);
108 base->flush_seq = atomic_read(fp);
109 }
110}
111
112void inetpeer_invalidate_family(int family)
113{
114 atomic_t *fp = inetpeer_seq_ptr(family);
115
116 atomic_inc(fp);
117}
102 118
103#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ 119#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
104 120
@@ -110,7 +126,7 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min
110 126
111static void inetpeer_gc_worker(struct work_struct *work) 127static void inetpeer_gc_worker(struct work_struct *work)
112{ 128{
113 struct inet_peer *p, *n; 129 struct inet_peer *p, *n, *c;
114 LIST_HEAD(list); 130 LIST_HEAD(list);
115 131
116 spin_lock_bh(&gc_lock); 132 spin_lock_bh(&gc_lock);
@@ -122,17 +138,19 @@ static void inetpeer_gc_worker(struct work_struct *work)
122 138
123 list_for_each_entry_safe(p, n, &list, gc_list) { 139 list_for_each_entry_safe(p, n, &list, gc_list) {
124 140
125 if(need_resched()) 141 if (need_resched())
126 cond_resched(); 142 cond_resched();
127 143
128 if (p->avl_left != peer_avl_empty) { 144 c = rcu_dereference_protected(p->avl_left, 1);
129 list_add_tail(&p->avl_left->gc_list, &list); 145 if (c != peer_avl_empty) {
130 p->avl_left = peer_avl_empty; 146 list_add_tail(&c->gc_list, &list);
147 p->avl_left = peer_avl_empty_rcu;
131 } 148 }
132 149
133 if (p->avl_right != peer_avl_empty) { 150 c = rcu_dereference_protected(p->avl_right, 1);
134 list_add_tail(&p->avl_right->gc_list, &list); 151 if (c != peer_avl_empty) {
135 p->avl_right = peer_avl_empty; 152 list_add_tail(&c->gc_list, &list);
153 p->avl_right = peer_avl_empty_rcu;
136 } 154 }
137 155
138 n = list_entry(p->gc_list.next, struct inet_peer, gc_list); 156 n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
@@ -401,11 +419,6 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
401 call_rcu(&p->rcu, inetpeer_free_rcu); 419 call_rcu(&p->rcu, inetpeer_free_rcu);
402} 420}
403 421
404static struct inet_peer_base *family_to_base(int family)
405{
406 return family == AF_INET ? &v4_peers : &v6_peers;
407}
408
409/* perform garbage collect on all items stacked during a lookup */ 422/* perform garbage collect on all items stacked during a lookup */
410static int inet_peer_gc(struct inet_peer_base *base, 423static int inet_peer_gc(struct inet_peer_base *base,
411 struct inet_peer __rcu **stack[PEER_MAXDEPTH], 424 struct inet_peer __rcu **stack[PEER_MAXDEPTH],
@@ -443,14 +456,17 @@ static int inet_peer_gc(struct inet_peer_base *base,
443 return cnt; 456 return cnt;
444} 457}
445 458
446struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create) 459struct inet_peer *inet_getpeer(struct inet_peer_base *base,
460 const struct inetpeer_addr *daddr,
461 int create)
447{ 462{
448 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr; 463 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
449 struct inet_peer_base *base = family_to_base(daddr->family);
450 struct inet_peer *p; 464 struct inet_peer *p;
451 unsigned int sequence; 465 unsigned int sequence;
452 int invalidated, gccnt = 0; 466 int invalidated, gccnt = 0;
453 467
468 flush_check(base, daddr->family);
469
454 /* Attempt a lockless lookup first. 470 /* Attempt a lockless lookup first.
455 * Because of a concurrent writer, we might not find an existing entry. 471 * Because of a concurrent writer, we might not find an existing entry.
456 */ 472 */
@@ -492,13 +508,9 @@ relookup:
492 (daddr->family == AF_INET) ? 508 (daddr->family == AF_INET) ?
493 secure_ip_id(daddr->addr.a4) : 509 secure_ip_id(daddr->addr.a4) :
494 secure_ipv6_id(daddr->addr.a6)); 510 secure_ipv6_id(daddr->addr.a6));
495 p->tcp_ts_stamp = 0;
496 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; 511 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
497 p->rate_tokens = 0; 512 p->rate_tokens = 0;
498 p->rate_last = 0; 513 p->rate_last = 0;
499 p->pmtu_expires = 0;
500 p->pmtu_orig = 0;
501 memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
502 INIT_LIST_HEAD(&p->gc_list); 514 INIT_LIST_HEAD(&p->gc_list);
503 515
504 /* Link the node. */ 516 /* Link the node. */
@@ -571,26 +583,19 @@ static void inetpeer_inval_rcu(struct rcu_head *head)
571 schedule_delayed_work(&gc_work, gc_delay); 583 schedule_delayed_work(&gc_work, gc_delay);
572} 584}
573 585
574void inetpeer_invalidate_tree(int family) 586void inetpeer_invalidate_tree(struct inet_peer_base *base)
575{ 587{
576 struct inet_peer *old, *new, *prev; 588 struct inet_peer *root;
577 struct inet_peer_base *base = family_to_base(family);
578 589
579 write_seqlock_bh(&base->lock); 590 write_seqlock_bh(&base->lock);
580 591
581 old = base->root; 592 root = rcu_deref_locked(base->root, base);
582 if (old == peer_avl_empty_rcu) 593 if (root != peer_avl_empty) {
583 goto out; 594 base->root = peer_avl_empty_rcu;
584
585 new = peer_avl_empty_rcu;
586
587 prev = cmpxchg(&base->root, old, new);
588 if (prev == old) {
589 base->total = 0; 595 base->total = 0;
590 call_rcu(&prev->gc_rcu, inetpeer_inval_rcu); 596 call_rcu(&root->gc_rcu, inetpeer_inval_rcu);
591 } 597 }
592 598
593out:
594 write_sequnlock_bh(&base->lock); 599 write_sequnlock_bh(&base->lock);
595} 600}
596EXPORT_SYMBOL(inetpeer_invalidate_tree); 601EXPORT_SYMBOL(inetpeer_invalidate_tree);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 9dbd3dd6022..8d07c973409 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -171,6 +171,10 @@ static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
171static void ip4_frag_init(struct inet_frag_queue *q, void *a) 171static void ip4_frag_init(struct inet_frag_queue *q, void *a)
172{ 172{
173 struct ipq *qp = container_of(q, struct ipq, q); 173 struct ipq *qp = container_of(q, struct ipq, q);
174 struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4,
175 frags);
176 struct net *net = container_of(ipv4, struct net, ipv4);
177
174 struct ip4_create_arg *arg = a; 178 struct ip4_create_arg *arg = a;
175 179
176 qp->protocol = arg->iph->protocol; 180 qp->protocol = arg->iph->protocol;
@@ -180,7 +184,7 @@ static void ip4_frag_init(struct inet_frag_queue *q, void *a)
180 qp->daddr = arg->iph->daddr; 184 qp->daddr = arg->iph->daddr;
181 qp->user = arg->user; 185 qp->user = arg->user;
182 qp->peer = sysctl_ipfrag_max_dist ? 186 qp->peer = sysctl_ipfrag_max_dist ?
183 inet_getpeer_v4(arg->iph->saddr, 1) : NULL; 187 inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, 1) : NULL;
184} 188}
185 189
186static __inline__ void ip4_frag_free(struct inet_frag_queue *q) 190static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index f49047b7960..594cec35ac4 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -516,9 +516,6 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
516 case ICMP_PORT_UNREACH: 516 case ICMP_PORT_UNREACH:
517 /* Impossible event. */ 517 /* Impossible event. */
518 return; 518 return;
519 case ICMP_FRAG_NEEDED:
520 /* Soft state for pmtu is maintained by IP core. */
521 return;
522 default: 519 default:
523 /* All others are translated to HOST_UNREACH. 520 /* All others are translated to HOST_UNREACH.
524 rfc2003 contains "deep thoughts" about NET_UNREACH, 521 rfc2003 contains "deep thoughts" about NET_UNREACH,
@@ -538,7 +535,16 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
538 flags & GRE_KEY ? 535 flags & GRE_KEY ?
539 *(((__be32 *)p) + (grehlen / 4) - 1) : 0, 536 *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
540 p[1]); 537 p[1]);
541 if (t == NULL || t->parms.iph.daddr == 0 || 538 if (t == NULL)
539 goto out;
540
541 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
542 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
543 t->parms.link, 0, IPPROTO_GRE, 0);
544 goto out;
545 }
546
547 if (t->parms.iph.daddr == 0 ||
542 ipv4_is_multicast(t->parms.iph.daddr)) 548 ipv4_is_multicast(t->parms.iph.daddr))
543 goto out; 549 goto out;
544 550
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 8590144ca33..b27d4440f52 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -198,14 +198,13 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
198 rcu_read_lock(); 198 rcu_read_lock();
199 { 199 {
200 int protocol = ip_hdr(skb)->protocol; 200 int protocol = ip_hdr(skb)->protocol;
201 int hash, raw;
202 const struct net_protocol *ipprot; 201 const struct net_protocol *ipprot;
202 int raw;
203 203
204 resubmit: 204 resubmit:
205 raw = raw_local_deliver(skb, protocol); 205 raw = raw_local_deliver(skb, protocol);
206 206
207 hash = protocol & (MAX_INET_PROTOS - 1); 207 ipprot = rcu_dereference(inet_protos[protocol]);
208 ipprot = rcu_dereference(inet_protos[hash]);
209 if (ipprot != NULL) { 208 if (ipprot != NULL) {
210 int ret; 209 int ret;
211 210
@@ -314,26 +313,33 @@ drop:
314 return true; 313 return true;
315} 314}
316 315
316int sysctl_ip_early_demux __read_mostly = 1;
317
317static int ip_rcv_finish(struct sk_buff *skb) 318static int ip_rcv_finish(struct sk_buff *skb)
318{ 319{
319 const struct iphdr *iph = ip_hdr(skb); 320 const struct iphdr *iph = ip_hdr(skb);
320 struct rtable *rt; 321 struct rtable *rt;
321 322
323 if (sysctl_ip_early_demux && !skb_dst(skb)) {
324 const struct net_protocol *ipprot;
325 int protocol = iph->protocol;
326
327 rcu_read_lock();
328 ipprot = rcu_dereference(inet_protos[protocol]);
329 if (ipprot && ipprot->early_demux)
330 ipprot->early_demux(skb);
331 rcu_read_unlock();
332 }
333
322 /* 334 /*
323 * Initialise the virtual path cache for the packet. It describes 335 * Initialise the virtual path cache for the packet. It describes
324 * how the packet travels inside Linux networking. 336 * how the packet travels inside Linux networking.
325 */ 337 */
326 if (skb_dst(skb) == NULL) { 338 if (!skb_dst(skb)) {
327 int err = ip_route_input_noref(skb, iph->daddr, iph->saddr, 339 int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
328 iph->tos, skb->dev); 340 iph->tos, skb->dev);
329 if (unlikely(err)) { 341 if (unlikely(err)) {
330 if (err == -EHOSTUNREACH) 342 if (err == -EXDEV)
331 IP_INC_STATS_BH(dev_net(skb->dev),
332 IPSTATS_MIB_INADDRERRORS);
333 else if (err == -ENETUNREACH)
334 IP_INC_STATS_BH(dev_net(skb->dev),
335 IPSTATS_MIB_INNOROUTES);
336 else if (err == -EXDEV)
337 NET_INC_STATS_BH(dev_net(skb->dev), 343 NET_INC_STATS_BH(dev_net(skb->dev),
338 LINUX_MIB_IPRPFILTER); 344 LINUX_MIB_IPRPFILTER);
339 goto drop; 345 goto drop;
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 708b99494e2..a19d6471a31 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -27,6 +27,7 @@
27#include <net/icmp.h> 27#include <net/icmp.h>
28#include <net/route.h> 28#include <net/route.h>
29#include <net/cipso_ipv4.h> 29#include <net/cipso_ipv4.h>
30#include <net/ip_fib.h>
30 31
31/* 32/*
32 * Write options to IP header, record destination address to 33 * Write options to IP header, record destination address to
@@ -104,7 +105,7 @@ int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
104 sptr = skb_network_header(skb); 105 sptr = skb_network_header(skb);
105 dptr = dopt->__data; 106 dptr = dopt->__data;
106 107
107 daddr = skb_rtable(skb)->rt_spec_dst; 108 daddr = fib_compute_spec_dst(skb);
108 109
109 if (sopt->rr) { 110 if (sopt->rr) {
110 optlen = sptr[sopt->rr+1]; 111 optlen = sptr[sopt->rr+1];
@@ -241,6 +242,15 @@ void ip_options_fragment(struct sk_buff *skb)
241 opt->ts_needtime = 0; 242 opt->ts_needtime = 0;
242} 243}
243 244
245/* helper used by ip_options_compile() to call fib_compute_spec_dst()
246 * at most one time.
247 */
248static void spec_dst_fill(__be32 *spec_dst, struct sk_buff *skb)
249{
250 if (*spec_dst == htonl(INADDR_ANY))
251 *spec_dst = fib_compute_spec_dst(skb);
252}
253
244/* 254/*
245 * Verify options and fill pointers in struct options. 255 * Verify options and fill pointers in struct options.
246 * Caller should clear *opt, and set opt->data. 256 * Caller should clear *opt, and set opt->data.
@@ -250,12 +260,12 @@ void ip_options_fragment(struct sk_buff *skb)
250int ip_options_compile(struct net *net, 260int ip_options_compile(struct net *net,
251 struct ip_options *opt, struct sk_buff *skb) 261 struct ip_options *opt, struct sk_buff *skb)
252{ 262{
253 int l; 263 __be32 spec_dst = htonl(INADDR_ANY);
254 unsigned char *iph;
255 unsigned char *optptr;
256 int optlen;
257 unsigned char *pp_ptr = NULL; 264 unsigned char *pp_ptr = NULL;
258 struct rtable *rt = NULL; 265 struct rtable *rt = NULL;
266 unsigned char *optptr;
267 unsigned char *iph;
268 int optlen, l;
259 269
260 if (skb != NULL) { 270 if (skb != NULL) {
261 rt = skb_rtable(skb); 271 rt = skb_rtable(skb);
@@ -331,7 +341,8 @@ int ip_options_compile(struct net *net,
331 goto error; 341 goto error;
332 } 342 }
333 if (rt) { 343 if (rt) {
334 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); 344 spec_dst_fill(&spec_dst, skb);
345 memcpy(&optptr[optptr[2]-1], &spec_dst, 4);
335 opt->is_changed = 1; 346 opt->is_changed = 1;
336 } 347 }
337 optptr[2] += 4; 348 optptr[2] += 4;
@@ -373,7 +384,8 @@ int ip_options_compile(struct net *net,
373 } 384 }
374 opt->ts = optptr - iph; 385 opt->ts = optptr - iph;
375 if (rt) { 386 if (rt) {
376 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); 387 spec_dst_fill(&spec_dst, skb);
388 memcpy(&optptr[optptr[2]-1], &spec_dst, 4);
377 timeptr = &optptr[optptr[2]+3]; 389 timeptr = &optptr[optptr[2]+3];
378 } 390 }
379 opt->ts_needaddr = 1; 391 opt->ts_needaddr = 1;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 451f97c42eb..cc52679790b 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -113,19 +113,6 @@ int ip_local_out(struct sk_buff *skb)
113} 113}
114EXPORT_SYMBOL_GPL(ip_local_out); 114EXPORT_SYMBOL_GPL(ip_local_out);
115 115
116/* dev_loopback_xmit for use with netfilter. */
117static int ip_dev_loopback_xmit(struct sk_buff *newskb)
118{
119 skb_reset_mac_header(newskb);
120 __skb_pull(newskb, skb_network_offset(newskb));
121 newskb->pkt_type = PACKET_LOOPBACK;
122 newskb->ip_summed = CHECKSUM_UNNECESSARY;
123 WARN_ON(!skb_dst(newskb));
124 skb_dst_force(newskb);
125 netif_rx_ni(newskb);
126 return 0;
127}
128
129static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst) 116static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
130{ 117{
131 int ttl = inet->uc_ttl; 118 int ttl = inet->uc_ttl;
@@ -183,6 +170,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
183 struct net_device *dev = dst->dev; 170 struct net_device *dev = dst->dev;
184 unsigned int hh_len = LL_RESERVED_SPACE(dev); 171 unsigned int hh_len = LL_RESERVED_SPACE(dev);
185 struct neighbour *neigh; 172 struct neighbour *neigh;
173 u32 nexthop;
186 174
187 if (rt->rt_type == RTN_MULTICAST) { 175 if (rt->rt_type == RTN_MULTICAST) {
188 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len); 176 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
@@ -200,19 +188,22 @@ static inline int ip_finish_output2(struct sk_buff *skb)
200 } 188 }
201 if (skb->sk) 189 if (skb->sk)
202 skb_set_owner_w(skb2, skb->sk); 190 skb_set_owner_w(skb2, skb->sk);
203 kfree_skb(skb); 191 consume_skb(skb);
204 skb = skb2; 192 skb = skb2;
205 } 193 }
206 194
207 rcu_read_lock(); 195 rcu_read_lock_bh();
208 neigh = dst_get_neighbour_noref(dst); 196 nexthop = rt->rt_gateway ? rt->rt_gateway : ip_hdr(skb)->daddr;
197 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
198 if (unlikely(!neigh))
199 neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
209 if (neigh) { 200 if (neigh) {
210 int res = neigh_output(neigh, skb); 201 int res = dst_neigh_output(dst, neigh, skb);
211 202
212 rcu_read_unlock(); 203 rcu_read_unlock_bh();
213 return res; 204 return res;
214 } 205 }
215 rcu_read_unlock(); 206 rcu_read_unlock_bh();
216 207
217 net_dbg_ratelimited("%s: No header cache and no neighbour!\n", 208 net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
218 __func__); 209 __func__);
@@ -281,7 +272,7 @@ int ip_mc_output(struct sk_buff *skb)
281 if (newskb) 272 if (newskb)
282 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, 273 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
283 newskb, NULL, newskb->dev, 274 newskb, NULL, newskb->dev,
284 ip_dev_loopback_xmit); 275 dev_loopback_xmit);
285 } 276 }
286 277
287 /* Multicasts with ttl 0 must not go beyond the host */ 278 /* Multicasts with ttl 0 must not go beyond the host */
@@ -296,7 +287,7 @@ int ip_mc_output(struct sk_buff *skb)
296 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); 287 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
297 if (newskb) 288 if (newskb)
298 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb, 289 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
299 NULL, newskb->dev, ip_dev_loopback_xmit); 290 NULL, newskb->dev, dev_loopback_xmit);
300 } 291 }
301 292
302 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, 293 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
@@ -709,7 +700,7 @@ slow_path:
709 700
710 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES); 701 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
711 } 702 }
712 kfree_skb(skb); 703 consume_skb(skb);
713 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS); 704 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
714 return err; 705 return err;
715 706
@@ -1472,13 +1463,14 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1472 1463
1473/* 1464/*
1474 * Generic function to send a packet as reply to another packet. 1465 * Generic function to send a packet as reply to another packet.
1475 * Used to send TCP resets so far. ICMP should use this function too. 1466 * Used to send TCP resets so far.
1476 * 1467 *
1477 * Should run single threaded per socket because it uses the sock 1468 * Should run single threaded per socket because it uses the sock
1478 * structure to pass arguments. 1469 * structure to pass arguments.
1479 */ 1470 */
1480void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr, 1471void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
1481 const struct ip_reply_arg *arg, unsigned int len) 1472 __be32 saddr, const struct ip_reply_arg *arg,
1473 unsigned int len)
1482{ 1474{
1483 struct inet_sock *inet = inet_sk(sk); 1475 struct inet_sock *inet = inet_sk(sk);
1484 struct ip_options_data replyopts; 1476 struct ip_options_data replyopts;
@@ -1504,7 +1496,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
1504 RT_TOS(arg->tos), 1496 RT_TOS(arg->tos),
1505 RT_SCOPE_UNIVERSE, sk->sk_protocol, 1497 RT_SCOPE_UNIVERSE, sk->sk_protocol,
1506 ip_reply_arg_flowi_flags(arg), 1498 ip_reply_arg_flowi_flags(arg),
1507 daddr, rt->rt_spec_dst, 1499 daddr, saddr,
1508 tcp_hdr(skb)->source, tcp_hdr(skb)->dest); 1500 tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
1509 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); 1501 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1510 rt = ip_route_output_key(sock_net(sk), &fl4); 1502 rt = ip_route_output_key(sock_net(sk), &fl4);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 0d11f234d61..de29f46f68b 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -40,6 +40,7 @@
40#if IS_ENABLED(CONFIG_IPV6) 40#if IS_ENABLED(CONFIG_IPV6)
41#include <net/transp_v6.h> 41#include <net/transp_v6.h>
42#endif 42#endif
43#include <net/ip_fib.h>
43 44
44#include <linux/errqueue.h> 45#include <linux/errqueue.h>
45#include <asm/uaccess.h> 46#include <asm/uaccess.h>
@@ -1019,8 +1020,8 @@ e_inval:
1019 * @sk: socket 1020 * @sk: socket
1020 * @skb: buffer 1021 * @skb: buffer
1021 * 1022 *
1022 * To support IP_CMSG_PKTINFO option, we store rt_iif and rt_spec_dst 1023 * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
1023 * in skb->cb[] before dst drop. 1024 * destination in skb->cb[] before dst drop.
1024 * This way, receiver doesnt make cache line misses to read rtable. 1025 * This way, receiver doesnt make cache line misses to read rtable.
1025 */ 1026 */
1026void ipv4_pktinfo_prepare(struct sk_buff *skb) 1027void ipv4_pktinfo_prepare(struct sk_buff *skb)
@@ -1030,7 +1031,7 @@ void ipv4_pktinfo_prepare(struct sk_buff *skb)
1030 1031
1031 if (rt) { 1032 if (rt) {
1032 pktinfo->ipi_ifindex = rt->rt_iif; 1033 pktinfo->ipi_ifindex = rt->rt_iif;
1033 pktinfo->ipi_spec_dst.s_addr = rt->rt_spec_dst; 1034 pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
1034 } else { 1035 } else {
1035 pktinfo->ipi_ifindex = 0; 1036 pktinfo->ipi_ifindex = 0;
1036 pktinfo->ipi_spec_dst.s_addr = 0; 1037 pktinfo->ipi_spec_dst.s_addr = 0;
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 63b64c45a82..b91375482d8 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -42,6 +42,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
42 return; 42 return;
43 NETDEBUG(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%pI4\n", 43 NETDEBUG(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%pI4\n",
44 spi, &iph->daddr); 44 spi, &iph->daddr);
45 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
45 xfrm_state_put(x); 46 xfrm_state_put(x);
46} 47}
47 48
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 2d0f99bf61b..715338a1b20 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -348,9 +348,6 @@ static int ipip_err(struct sk_buff *skb, u32 info)
348 case ICMP_PORT_UNREACH: 348 case ICMP_PORT_UNREACH:
349 /* Impossible event. */ 349 /* Impossible event. */
350 return 0; 350 return 0;
351 case ICMP_FRAG_NEEDED:
352 /* Soft state for pmtu is maintained by IP core. */
353 return 0;
354 default: 351 default:
355 /* All others are translated to HOST_UNREACH. 352 /* All others are translated to HOST_UNREACH.
356 rfc2003 contains "deep thoughts" about NET_UNREACH, 353 rfc2003 contains "deep thoughts" about NET_UNREACH,
@@ -369,7 +366,17 @@ static int ipip_err(struct sk_buff *skb, u32 info)
369 366
370 rcu_read_lock(); 367 rcu_read_lock();
371 t = ipip_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr); 368 t = ipip_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
372 if (t == NULL || t->parms.iph.daddr == 0) 369 if (t == NULL)
370 goto out;
371
372 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
373 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
374 t->dev->ifindex, 0, IPPROTO_IPIP, 0);
375 err = 0;
376 goto out;
377 }
378
379 if (t->parms.iph.daddr == 0)
373 goto out; 380 goto out;
374 381
375 err = 0; 382 err = 0;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index c94bbc6f2ba..5716c6b808d 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -524,8 +524,8 @@ failure:
524} 524}
525#endif 525#endif
526 526
527/* 527/**
528 * Delete a VIF entry 528 * vif_delete - Delete a VIF entry
529 * @notify: Set to 1, if the caller is a notifier_call 529 * @notify: Set to 1, if the caller is a notifier_call
530 */ 530 */
531 531
@@ -2006,37 +2006,37 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2006{ 2006{
2007 int ct; 2007 int ct;
2008 struct rtnexthop *nhp; 2008 struct rtnexthop *nhp;
2009 u8 *b = skb_tail_pointer(skb); 2009 struct nlattr *mp_attr;
2010 struct rtattr *mp_head;
2011 2010
2012 /* If cache is unresolved, don't try to parse IIF and OIF */ 2011 /* If cache is unresolved, don't try to parse IIF and OIF */
2013 if (c->mfc_parent >= MAXVIFS) 2012 if (c->mfc_parent >= MAXVIFS)
2014 return -ENOENT; 2013 return -ENOENT;
2015 2014
2016 if (VIF_EXISTS(mrt, c->mfc_parent)) 2015 if (VIF_EXISTS(mrt, c->mfc_parent) &&
2017 RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex); 2016 nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
2017 return -EMSGSIZE;
2018 2018
2019 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); 2019 if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
2020 return -EMSGSIZE;
2020 2021
2021 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { 2022 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2022 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) { 2023 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2023 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) 2024 if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) {
2024 goto rtattr_failure; 2025 nla_nest_cancel(skb, mp_attr);
2025 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 2026 return -EMSGSIZE;
2027 }
2028
2026 nhp->rtnh_flags = 0; 2029 nhp->rtnh_flags = 0;
2027 nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; 2030 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2028 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex; 2031 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
2029 nhp->rtnh_len = sizeof(*nhp); 2032 nhp->rtnh_len = sizeof(*nhp);
2030 } 2033 }
2031 } 2034 }
2032 mp_head->rta_type = RTA_MULTIPATH; 2035
2033 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head; 2036 nla_nest_end(skb, mp_attr);
2037
2034 rtm->rtm_type = RTN_MULTICAST; 2038 rtm->rtm_type = RTN_MULTICAST;
2035 return 1; 2039 return 1;
2036
2037rtattr_failure:
2038 nlmsg_trim(skb, b);
2039 return -EMSGSIZE;
2040} 2040}
2041 2041
2042int ipmr_get_route(struct net *net, struct sk_buff *skb, 2042int ipmr_get_route(struct net *net, struct sk_buff *skb,
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index ba5756d2016..1109f7f6c25 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -196,12 +196,15 @@ static void ipt_ulog_packet(unsigned int hooknum,
196 196
197 pr_debug("qlen %d, qthreshold %Zu\n", ub->qlen, loginfo->qthreshold); 197 pr_debug("qlen %d, qthreshold %Zu\n", ub->qlen, loginfo->qthreshold);
198 198
199 /* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */ 199 nlh = nlmsg_put(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
200 nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT, 200 sizeof(*pm)+copy_len, 0);
201 sizeof(*pm)+copy_len); 201 if (!nlh) {
202 pr_debug("error during nlmsg_put\n");
203 goto out_unlock;
204 }
202 ub->qlen++; 205 ub->qlen++;
203 206
204 pm = NLMSG_DATA(nlh); 207 pm = nlmsg_data(nlh);
205 208
206 /* We might not have a timestamp, get one */ 209 /* We might not have a timestamp, get one */
207 if (skb->tstamp.tv64 == 0) 210 if (skb->tstamp.tv64 == 0)
@@ -261,13 +264,11 @@ static void ipt_ulog_packet(unsigned int hooknum,
261 nlh->nlmsg_type = NLMSG_DONE; 264 nlh->nlmsg_type = NLMSG_DONE;
262 ulog_send(groupnum); 265 ulog_send(groupnum);
263 } 266 }
264 267out_unlock:
265 spin_unlock_bh(&ulog_lock); 268 spin_unlock_bh(&ulog_lock);
266 269
267 return; 270 return;
268 271
269nlmsg_failure:
270 pr_debug("error during NLMSG_PUT\n");
271alloc_failure: 272alloc_failure:
272 pr_debug("Error building netlink message\n"); 273 pr_debug("Error building netlink message\n");
273 spin_unlock_bh(&ulog_lock); 274 spin_unlock_bh(&ulog_lock);
@@ -380,6 +381,9 @@ static struct nf_logger ipt_ulog_logger __read_mostly = {
380static int __init ulog_tg_init(void) 381static int __init ulog_tg_init(void)
381{ 382{
382 int ret, i; 383 int ret, i;
384 struct netlink_kernel_cfg cfg = {
385 .groups = ULOG_MAXNLGROUPS,
386 };
383 387
384 pr_debug("init module\n"); 388 pr_debug("init module\n");
385 389
@@ -392,9 +396,8 @@ static int __init ulog_tg_init(void)
392 for (i = 0; i < ULOG_MAXNLGROUPS; i++) 396 for (i = 0; i < ULOG_MAXNLGROUPS; i++)
393 setup_timer(&ulog_buffers[i].timer, ulog_timer, i); 397 setup_timer(&ulog_buffers[i].timer, ulog_timer, i);
394 398
395 nflognl = netlink_kernel_create(&init_net, 399 nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG,
396 NETLINK_NFLOG, ULOG_MAXNLGROUPS, NULL, 400 THIS_MODULE, &cfg);
397 NULL, THIS_MODULE);
398 if (!nflognl) 401 if (!nflognl)
399 return -ENOMEM; 402 return -ENOMEM;
400 403
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 91747d4ebc2..e7ff2dcab6c 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -95,11 +95,11 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
95 return NF_ACCEPT; 95 return NF_ACCEPT;
96} 96}
97 97
98static unsigned int ipv4_confirm(unsigned int hooknum, 98static unsigned int ipv4_helper(unsigned int hooknum,
99 struct sk_buff *skb, 99 struct sk_buff *skb,
100 const struct net_device *in, 100 const struct net_device *in,
101 const struct net_device *out, 101 const struct net_device *out,
102 int (*okfn)(struct sk_buff *)) 102 int (*okfn)(struct sk_buff *))
103{ 103{
104 struct nf_conn *ct; 104 struct nf_conn *ct;
105 enum ip_conntrack_info ctinfo; 105 enum ip_conntrack_info ctinfo;
@@ -110,24 +110,38 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
110 /* This is where we call the helper: as the packet goes out. */ 110 /* This is where we call the helper: as the packet goes out. */
111 ct = nf_ct_get(skb, &ctinfo); 111 ct = nf_ct_get(skb, &ctinfo);
112 if (!ct || ctinfo == IP_CT_RELATED_REPLY) 112 if (!ct || ctinfo == IP_CT_RELATED_REPLY)
113 goto out; 113 return NF_ACCEPT;
114 114
115 help = nfct_help(ct); 115 help = nfct_help(ct);
116 if (!help) 116 if (!help)
117 goto out; 117 return NF_ACCEPT;
118 118
119 /* rcu_read_lock()ed by nf_hook_slow */ 119 /* rcu_read_lock()ed by nf_hook_slow */
120 helper = rcu_dereference(help->helper); 120 helper = rcu_dereference(help->helper);
121 if (!helper) 121 if (!helper)
122 goto out; 122 return NF_ACCEPT;
123 123
124 ret = helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb), 124 ret = helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb),
125 ct, ctinfo); 125 ct, ctinfo);
126 if (ret != NF_ACCEPT) { 126 if (ret != NF_ACCEPT && (ret & NF_VERDICT_MASK) != NF_QUEUE) {
127 nf_log_packet(NFPROTO_IPV4, hooknum, skb, in, out, NULL, 127 nf_log_packet(NFPROTO_IPV4, hooknum, skb, in, out, NULL,
128 "nf_ct_%s: dropping packet", helper->name); 128 "nf_ct_%s: dropping packet", helper->name);
129 return ret;
130 } 129 }
130 return ret;
131}
132
133static unsigned int ipv4_confirm(unsigned int hooknum,
134 struct sk_buff *skb,
135 const struct net_device *in,
136 const struct net_device *out,
137 int (*okfn)(struct sk_buff *))
138{
139 struct nf_conn *ct;
140 enum ip_conntrack_info ctinfo;
141
142 ct = nf_ct_get(skb, &ctinfo);
143 if (!ct || ctinfo == IP_CT_RELATED_REPLY)
144 goto out;
131 145
132 /* adjust seqs for loopback traffic only in outgoing direction */ 146 /* adjust seqs for loopback traffic only in outgoing direction */
133 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && 147 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
@@ -185,6 +199,13 @@ static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = {
185 .priority = NF_IP_PRI_CONNTRACK, 199 .priority = NF_IP_PRI_CONNTRACK,
186 }, 200 },
187 { 201 {
202 .hook = ipv4_helper,
203 .owner = THIS_MODULE,
204 .pf = NFPROTO_IPV4,
205 .hooknum = NF_INET_POST_ROUTING,
206 .priority = NF_IP_PRI_CONNTRACK_HELPER,
207 },
208 {
188 .hook = ipv4_confirm, 209 .hook = ipv4_confirm,
189 .owner = THIS_MODULE, 210 .owner = THIS_MODULE,
190 .pf = NFPROTO_IPV4, 211 .pf = NFPROTO_IPV4,
@@ -192,6 +213,13 @@ static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = {
192 .priority = NF_IP_PRI_CONNTRACK_CONFIRM, 213 .priority = NF_IP_PRI_CONNTRACK_CONFIRM,
193 }, 214 },
194 { 215 {
216 .hook = ipv4_helper,
217 .owner = THIS_MODULE,
218 .pf = NFPROTO_IPV4,
219 .hooknum = NF_INET_LOCAL_IN,
220 .priority = NF_IP_PRI_CONNTRACK_HELPER,
221 },
222 {
195 .hook = ipv4_confirm, 223 .hook = ipv4_confirm,
196 .owner = THIS_MODULE, 224 .owner = THIS_MODULE,
197 .pf = NFPROTO_IPV4, 225 .pf = NFPROTO_IPV4,
@@ -207,35 +235,30 @@ static int log_invalid_proto_max = 255;
207static ctl_table ip_ct_sysctl_table[] = { 235static ctl_table ip_ct_sysctl_table[] = {
208 { 236 {
209 .procname = "ip_conntrack_max", 237 .procname = "ip_conntrack_max",
210 .data = &nf_conntrack_max,
211 .maxlen = sizeof(int), 238 .maxlen = sizeof(int),
212 .mode = 0644, 239 .mode = 0644,
213 .proc_handler = proc_dointvec, 240 .proc_handler = proc_dointvec,
214 }, 241 },
215 { 242 {
216 .procname = "ip_conntrack_count", 243 .procname = "ip_conntrack_count",
217 .data = &init_net.ct.count,
218 .maxlen = sizeof(int), 244 .maxlen = sizeof(int),
219 .mode = 0444, 245 .mode = 0444,
220 .proc_handler = proc_dointvec, 246 .proc_handler = proc_dointvec,
221 }, 247 },
222 { 248 {
223 .procname = "ip_conntrack_buckets", 249 .procname = "ip_conntrack_buckets",
224 .data = &init_net.ct.htable_size,
225 .maxlen = sizeof(unsigned int), 250 .maxlen = sizeof(unsigned int),
226 .mode = 0444, 251 .mode = 0444,
227 .proc_handler = proc_dointvec, 252 .proc_handler = proc_dointvec,
228 }, 253 },
229 { 254 {
230 .procname = "ip_conntrack_checksum", 255 .procname = "ip_conntrack_checksum",
231 .data = &init_net.ct.sysctl_checksum,
232 .maxlen = sizeof(int), 256 .maxlen = sizeof(int),
233 .mode = 0644, 257 .mode = 0644,
234 .proc_handler = proc_dointvec, 258 .proc_handler = proc_dointvec,
235 }, 259 },
236 { 260 {
237 .procname = "ip_conntrack_log_invalid", 261 .procname = "ip_conntrack_log_invalid",
238 .data = &init_net.ct.sysctl_log_invalid,
239 .maxlen = sizeof(unsigned int), 262 .maxlen = sizeof(unsigned int),
240 .mode = 0644, 263 .mode = 0644,
241 .proc_handler = proc_dointvec_minmax, 264 .proc_handler = proc_dointvec_minmax,
@@ -351,6 +374,25 @@ static struct nf_sockopt_ops so_getorigdst = {
351 .owner = THIS_MODULE, 374 .owner = THIS_MODULE,
352}; 375};
353 376
377static int ipv4_init_net(struct net *net)
378{
379#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
380 struct nf_ip_net *in = &net->ct.nf_ct_proto;
381 in->ctl_table = kmemdup(ip_ct_sysctl_table,
382 sizeof(ip_ct_sysctl_table),
383 GFP_KERNEL);
384 if (!in->ctl_table)
385 return -ENOMEM;
386
387 in->ctl_table[0].data = &nf_conntrack_max;
388 in->ctl_table[1].data = &net->ct.count;
389 in->ctl_table[2].data = &net->ct.htable_size;
390 in->ctl_table[3].data = &net->ct.sysctl_checksum;
391 in->ctl_table[4].data = &net->ct.sysctl_log_invalid;
392#endif
393 return 0;
394}
395
354struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = { 396struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
355 .l3proto = PF_INET, 397 .l3proto = PF_INET,
356 .name = "ipv4", 398 .name = "ipv4",
@@ -366,8 +408,8 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
366#endif 408#endif
367#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) 409#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
368 .ctl_table_path = "net/ipv4/netfilter", 410 .ctl_table_path = "net/ipv4/netfilter",
369 .ctl_table = ip_ct_sysctl_table,
370#endif 411#endif
412 .init_net = ipv4_init_net,
371 .me = THIS_MODULE, 413 .me = THIS_MODULE,
372}; 414};
373 415
@@ -378,6 +420,65 @@ MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET));
378MODULE_ALIAS("ip_conntrack"); 420MODULE_ALIAS("ip_conntrack");
379MODULE_LICENSE("GPL"); 421MODULE_LICENSE("GPL");
380 422
423static int ipv4_net_init(struct net *net)
424{
425 int ret = 0;
426
427 ret = nf_conntrack_l4proto_register(net,
428 &nf_conntrack_l4proto_tcp4);
429 if (ret < 0) {
430 pr_err("nf_conntrack_l4proto_tcp4 :protocol register failed\n");
431 goto out_tcp;
432 }
433 ret = nf_conntrack_l4proto_register(net,
434 &nf_conntrack_l4proto_udp4);
435 if (ret < 0) {
436 pr_err("nf_conntrack_l4proto_udp4 :protocol register failed\n");
437 goto out_udp;
438 }
439 ret = nf_conntrack_l4proto_register(net,
440 &nf_conntrack_l4proto_icmp);
441 if (ret < 0) {
442 pr_err("nf_conntrack_l4proto_icmp4 :protocol register failed\n");
443 goto out_icmp;
444 }
445 ret = nf_conntrack_l3proto_register(net,
446 &nf_conntrack_l3proto_ipv4);
447 if (ret < 0) {
448 pr_err("nf_conntrack_l3proto_ipv4 :protocol register failed\n");
449 goto out_ipv4;
450 }
451 return 0;
452out_ipv4:
453 nf_conntrack_l4proto_unregister(net,
454 &nf_conntrack_l4proto_icmp);
455out_icmp:
456 nf_conntrack_l4proto_unregister(net,
457 &nf_conntrack_l4proto_udp4);
458out_udp:
459 nf_conntrack_l4proto_unregister(net,
460 &nf_conntrack_l4proto_tcp4);
461out_tcp:
462 return ret;
463}
464
465static void ipv4_net_exit(struct net *net)
466{
467 nf_conntrack_l3proto_unregister(net,
468 &nf_conntrack_l3proto_ipv4);
469 nf_conntrack_l4proto_unregister(net,
470 &nf_conntrack_l4proto_icmp);
471 nf_conntrack_l4proto_unregister(net,
472 &nf_conntrack_l4proto_udp4);
473 nf_conntrack_l4proto_unregister(net,
474 &nf_conntrack_l4proto_tcp4);
475}
476
477static struct pernet_operations ipv4_net_ops = {
478 .init = ipv4_net_init,
479 .exit = ipv4_net_exit,
480};
481
381static int __init nf_conntrack_l3proto_ipv4_init(void) 482static int __init nf_conntrack_l3proto_ipv4_init(void)
382{ 483{
383 int ret = 0; 484 int ret = 0;
@@ -391,35 +492,17 @@ static int __init nf_conntrack_l3proto_ipv4_init(void)
391 return ret; 492 return ret;
392 } 493 }
393 494
394 ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp4); 495 ret = register_pernet_subsys(&ipv4_net_ops);
395 if (ret < 0) { 496 if (ret < 0) {
396 pr_err("nf_conntrack_ipv4: can't register tcp.\n"); 497 pr_err("nf_conntrack_ipv4: can't register pernet ops\n");
397 goto cleanup_sockopt; 498 goto cleanup_sockopt;
398 } 499 }
399 500
400 ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp4);
401 if (ret < 0) {
402 pr_err("nf_conntrack_ipv4: can't register udp.\n");
403 goto cleanup_tcp;
404 }
405
406 ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmp);
407 if (ret < 0) {
408 pr_err("nf_conntrack_ipv4: can't register icmp.\n");
409 goto cleanup_udp;
410 }
411
412 ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv4);
413 if (ret < 0) {
414 pr_err("nf_conntrack_ipv4: can't register ipv4\n");
415 goto cleanup_icmp;
416 }
417
418 ret = nf_register_hooks(ipv4_conntrack_ops, 501 ret = nf_register_hooks(ipv4_conntrack_ops,
419 ARRAY_SIZE(ipv4_conntrack_ops)); 502 ARRAY_SIZE(ipv4_conntrack_ops));
420 if (ret < 0) { 503 if (ret < 0) {
421 pr_err("nf_conntrack_ipv4: can't register hooks.\n"); 504 pr_err("nf_conntrack_ipv4: can't register hooks.\n");
422 goto cleanup_ipv4; 505 goto cleanup_pernet;
423 } 506 }
424#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) 507#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
425 ret = nf_conntrack_ipv4_compat_init(); 508 ret = nf_conntrack_ipv4_compat_init();
@@ -431,14 +514,8 @@ static int __init nf_conntrack_l3proto_ipv4_init(void)
431 cleanup_hooks: 514 cleanup_hooks:
432 nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops)); 515 nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops));
433#endif 516#endif
434 cleanup_ipv4: 517 cleanup_pernet:
435 nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4); 518 unregister_pernet_subsys(&ipv4_net_ops);
436 cleanup_icmp:
437 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmp);
438 cleanup_udp:
439 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp4);
440 cleanup_tcp:
441 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp4);
442 cleanup_sockopt: 519 cleanup_sockopt:
443 nf_unregister_sockopt(&so_getorigdst); 520 nf_unregister_sockopt(&so_getorigdst);
444 return ret; 521 return ret;
@@ -451,10 +528,7 @@ static void __exit nf_conntrack_l3proto_ipv4_fini(void)
451 nf_conntrack_ipv4_compat_fini(); 528 nf_conntrack_ipv4_compat_fini();
452#endif 529#endif
453 nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops)); 530 nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops));
454 nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4); 531 unregister_pernet_subsys(&ipv4_net_ops);
455 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmp);
456 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp4);
457 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp4);
458 nf_unregister_sockopt(&so_getorigdst); 532 nf_unregister_sockopt(&so_getorigdst);
459} 533}
460 534
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 0847e373d33..5241d997ab7 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -23,6 +23,11 @@
23 23
24static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ; 24static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ;
25 25
26static inline struct nf_icmp_net *icmp_pernet(struct net *net)
27{
28 return &net->ct.nf_ct_proto.icmp;
29}
30
26static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, 31static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
27 struct nf_conntrack_tuple *tuple) 32 struct nf_conntrack_tuple *tuple)
28{ 33{
@@ -77,7 +82,7 @@ static int icmp_print_tuple(struct seq_file *s,
77 82
78static unsigned int *icmp_get_timeouts(struct net *net) 83static unsigned int *icmp_get_timeouts(struct net *net)
79{ 84{
80 return &nf_ct_icmp_timeout; 85 return &icmp_pernet(net)->timeout;
81} 86}
82 87
83/* Returns verdict for packet, or -1 for invalid. */ 88/* Returns verdict for packet, or -1 for invalid. */
@@ -274,16 +279,18 @@ static int icmp_nlattr_tuple_size(void)
274#include <linux/netfilter/nfnetlink.h> 279#include <linux/netfilter/nfnetlink.h>
275#include <linux/netfilter/nfnetlink_cttimeout.h> 280#include <linux/netfilter/nfnetlink_cttimeout.h>
276 281
277static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data) 282static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[],
283 struct net *net, void *data)
278{ 284{
279 unsigned int *timeout = data; 285 unsigned int *timeout = data;
286 struct nf_icmp_net *in = icmp_pernet(net);
280 287
281 if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) { 288 if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) {
282 *timeout = 289 *timeout =
283 ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMP_TIMEOUT])) * HZ; 290 ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMP_TIMEOUT])) * HZ;
284 } else { 291 } else {
285 /* Set default ICMP timeout. */ 292 /* Set default ICMP timeout. */
286 *timeout = nf_ct_icmp_timeout; 293 *timeout = in->timeout;
287 } 294 }
288 return 0; 295 return 0;
289} 296}
@@ -308,11 +315,9 @@ icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = {
308#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 315#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
309 316
310#ifdef CONFIG_SYSCTL 317#ifdef CONFIG_SYSCTL
311static struct ctl_table_header *icmp_sysctl_header;
312static struct ctl_table icmp_sysctl_table[] = { 318static struct ctl_table icmp_sysctl_table[] = {
313 { 319 {
314 .procname = "nf_conntrack_icmp_timeout", 320 .procname = "nf_conntrack_icmp_timeout",
315 .data = &nf_ct_icmp_timeout,
316 .maxlen = sizeof(unsigned int), 321 .maxlen = sizeof(unsigned int),
317 .mode = 0644, 322 .mode = 0644,
318 .proc_handler = proc_dointvec_jiffies, 323 .proc_handler = proc_dointvec_jiffies,
@@ -323,7 +328,6 @@ static struct ctl_table icmp_sysctl_table[] = {
323static struct ctl_table icmp_compat_sysctl_table[] = { 328static struct ctl_table icmp_compat_sysctl_table[] = {
324 { 329 {
325 .procname = "ip_conntrack_icmp_timeout", 330 .procname = "ip_conntrack_icmp_timeout",
326 .data = &nf_ct_icmp_timeout,
327 .maxlen = sizeof(unsigned int), 331 .maxlen = sizeof(unsigned int),
328 .mode = 0644, 332 .mode = 0644,
329 .proc_handler = proc_dointvec_jiffies, 333 .proc_handler = proc_dointvec_jiffies,
@@ -333,6 +337,62 @@ static struct ctl_table icmp_compat_sysctl_table[] = {
333#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ 337#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
334#endif /* CONFIG_SYSCTL */ 338#endif /* CONFIG_SYSCTL */
335 339
340static int icmp_kmemdup_sysctl_table(struct nf_proto_net *pn,
341 struct nf_icmp_net *in)
342{
343#ifdef CONFIG_SYSCTL
344 pn->ctl_table = kmemdup(icmp_sysctl_table,
345 sizeof(icmp_sysctl_table),
346 GFP_KERNEL);
347 if (!pn->ctl_table)
348 return -ENOMEM;
349
350 pn->ctl_table[0].data = &in->timeout;
351#endif
352 return 0;
353}
354
355static int icmp_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
356 struct nf_icmp_net *in)
357{
358#ifdef CONFIG_SYSCTL
359#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
360 pn->ctl_compat_table = kmemdup(icmp_compat_sysctl_table,
361 sizeof(icmp_compat_sysctl_table),
362 GFP_KERNEL);
363 if (!pn->ctl_compat_table)
364 return -ENOMEM;
365
366 pn->ctl_compat_table[0].data = &in->timeout;
367#endif
368#endif
369 return 0;
370}
371
372static int icmp_init_net(struct net *net, u_int16_t proto)
373{
374 int ret;
375 struct nf_icmp_net *in = icmp_pernet(net);
376 struct nf_proto_net *pn = &in->pn;
377
378 in->timeout = nf_ct_icmp_timeout;
379
380 ret = icmp_kmemdup_compat_sysctl_table(pn, in);
381 if (ret < 0)
382 return ret;
383
384 ret = icmp_kmemdup_sysctl_table(pn, in);
385 if (ret < 0)
386 nf_ct_kfree_compat_sysctl_table(pn);
387
388 return ret;
389}
390
391static struct nf_proto_net *icmp_get_net_proto(struct net *net)
392{
393 return &net->ct.nf_ct_proto.icmp.pn;
394}
395
336struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly = 396struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
337{ 397{
338 .l3proto = PF_INET, 398 .l3proto = PF_INET,
@@ -362,11 +422,6 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
362 .nla_policy = icmp_timeout_nla_policy, 422 .nla_policy = icmp_timeout_nla_policy,
363 }, 423 },
364#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 424#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
365#ifdef CONFIG_SYSCTL 425 .init_net = icmp_init_net,
366 .ctl_table_header = &icmp_sysctl_header, 426 .get_net_proto = icmp_get_net_proto,
367 .ctl_table = icmp_sysctl_table,
368#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
369 .ctl_compat_table = icmp_compat_sysctl_table,
370#endif
371#endif
372}; 427};
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 9bb1b8a37a2..742815518b0 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -94,14 +94,14 @@ static struct nf_hook_ops ipv4_defrag_ops[] = {
94 { 94 {
95 .hook = ipv4_conntrack_defrag, 95 .hook = ipv4_conntrack_defrag,
96 .owner = THIS_MODULE, 96 .owner = THIS_MODULE,
97 .pf = PF_INET, 97 .pf = NFPROTO_IPV4,
98 .hooknum = NF_INET_PRE_ROUTING, 98 .hooknum = NF_INET_PRE_ROUTING,
99 .priority = NF_IP_PRI_CONNTRACK_DEFRAG, 99 .priority = NF_IP_PRI_CONNTRACK_DEFRAG,
100 }, 100 },
101 { 101 {
102 .hook = ipv4_conntrack_defrag, 102 .hook = ipv4_conntrack_defrag,
103 .owner = THIS_MODULE, 103 .owner = THIS_MODULE,
104 .pf = PF_INET, 104 .pf = NFPROTO_IPV4,
105 .hooknum = NF_INET_LOCAL_OUT, 105 .hooknum = NF_INET_LOCAL_OUT,
106 .priority = NF_IP_PRI_CONNTRACK_DEFRAG, 106 .priority = NF_IP_PRI_CONNTRACK_DEFRAG,
107 }, 107 },
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c
index 7b22382ff0e..3c04d24e297 100644
--- a/net/ipv4/netfilter/nf_nat_amanda.c
+++ b/net/ipv4/netfilter/nf_nat_amanda.c
@@ -13,10 +13,10 @@
13#include <linux/skbuff.h> 13#include <linux/skbuff.h>
14#include <linux/udp.h> 14#include <linux/udp.h>
15 15
16#include <net/netfilter/nf_nat_helper.h>
17#include <net/netfilter/nf_nat_rule.h>
18#include <net/netfilter/nf_conntrack_helper.h> 16#include <net/netfilter/nf_conntrack_helper.h>
19#include <net/netfilter/nf_conntrack_expect.h> 17#include <net/netfilter/nf_conntrack_expect.h>
18#include <net/netfilter/nf_nat_helper.h>
19#include <net/netfilter/nf_nat_rule.h>
20#include <linux/netfilter/nf_conntrack_amanda.h> 20#include <linux/netfilter/nf_conntrack_amanda.h>
21 21
22MODULE_AUTHOR("Brian J. Murrell <netfilter@interlinx.bc.ca>"); 22MODULE_AUTHOR("Brian J. Murrell <netfilter@interlinx.bc.ca>");
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index abb52adf5ac..44b082fd48a 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -691,6 +691,10 @@ static struct nf_ct_helper_expectfn follow_master_nat = {
691 .expectfn = nf_nat_follow_master, 691 .expectfn = nf_nat_follow_master,
692}; 692};
693 693
694static struct nfq_ct_nat_hook nfq_ct_nat = {
695 .seq_adjust = nf_nat_tcp_seq_adjust,
696};
697
694static int __init nf_nat_init(void) 698static int __init nf_nat_init(void)
695{ 699{
696 size_t i; 700 size_t i;
@@ -731,6 +735,7 @@ static int __init nf_nat_init(void)
731 nfnetlink_parse_nat_setup); 735 nfnetlink_parse_nat_setup);
732 BUG_ON(nf_ct_nat_offset != NULL); 736 BUG_ON(nf_ct_nat_offset != NULL);
733 RCU_INIT_POINTER(nf_ct_nat_offset, nf_nat_get_offset); 737 RCU_INIT_POINTER(nf_ct_nat_offset, nf_nat_get_offset);
738 RCU_INIT_POINTER(nfq_ct_nat_hook, &nfq_ct_nat);
734 return 0; 739 return 0;
735 740
736 cleanup_extend: 741 cleanup_extend:
@@ -747,6 +752,7 @@ static void __exit nf_nat_cleanup(void)
747 RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL); 752 RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL);
748 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL); 753 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
749 RCU_INIT_POINTER(nf_ct_nat_offset, NULL); 754 RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
755 RCU_INIT_POINTER(nfq_ct_nat_hook, NULL);
750 synchronize_net(); 756 synchronize_net();
751} 757}
752 758
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index cad29c12131..c6784a18c1c 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -95,7 +95,7 @@ static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct,
95 unsigned char **data, 95 unsigned char **data,
96 TransportAddress *taddr, int count) 96 TransportAddress *taddr, int count)
97{ 97{
98 const struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 98 const struct nf_ct_h323_master *info = nfct_help_data(ct);
99 int dir = CTINFO2DIR(ctinfo); 99 int dir = CTINFO2DIR(ctinfo);
100 int i; 100 int i;
101 __be16 port; 101 __be16 port;
@@ -178,7 +178,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
178 struct nf_conntrack_expect *rtp_exp, 178 struct nf_conntrack_expect *rtp_exp,
179 struct nf_conntrack_expect *rtcp_exp) 179 struct nf_conntrack_expect *rtcp_exp)
180{ 180{
181 struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 181 struct nf_ct_h323_master *info = nfct_help_data(ct);
182 int dir = CTINFO2DIR(ctinfo); 182 int dir = CTINFO2DIR(ctinfo);
183 int i; 183 int i;
184 u_int16_t nated_port; 184 u_int16_t nated_port;
@@ -330,7 +330,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
330 TransportAddress *taddr, __be16 port, 330 TransportAddress *taddr, __be16 port,
331 struct nf_conntrack_expect *exp) 331 struct nf_conntrack_expect *exp)
332{ 332{
333 struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 333 struct nf_ct_h323_master *info = nfct_help_data(ct);
334 int dir = CTINFO2DIR(ctinfo); 334 int dir = CTINFO2DIR(ctinfo);
335 u_int16_t nated_port = ntohs(port); 335 u_int16_t nated_port = ntohs(port);
336 336
@@ -419,7 +419,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
419 unsigned char **data, TransportAddress *taddr, int idx, 419 unsigned char **data, TransportAddress *taddr, int idx,
420 __be16 port, struct nf_conntrack_expect *exp) 420 __be16 port, struct nf_conntrack_expect *exp)
421{ 421{
422 struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 422 struct nf_ct_h323_master *info = nfct_help_data(ct);
423 int dir = CTINFO2DIR(ctinfo); 423 int dir = CTINFO2DIR(ctinfo);
424 u_int16_t nated_port = ntohs(port); 424 u_int16_t nated_port = ntohs(port);
425 union nf_inet_addr addr; 425 union nf_inet_addr addr;
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index af65958f630..2e59ad0b90c 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -153,6 +153,19 @@ void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
153} 153}
154EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust); 154EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
155 155
156void nf_nat_tcp_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
157 u32 ctinfo, int off)
158{
159 const struct tcphdr *th;
160
161 if (nf_ct_protonum(ct) != IPPROTO_TCP)
162 return;
163
164 th = (struct tcphdr *)(skb_network_header(skb)+ ip_hdrlen(skb));
165 nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off);
166}
167EXPORT_SYMBOL_GPL(nf_nat_tcp_seq_adjust);
168
156static void nf_nat_csum(struct sk_buff *skb, const struct iphdr *iph, void *data, 169static void nf_nat_csum(struct sk_buff *skb, const struct iphdr *iph, void *data,
157 int datalen, __sum16 *check, int oldlen) 170 int datalen, __sum16 *check, int oldlen)
158{ 171{
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index c273d58980a..388140881eb 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -49,7 +49,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
49 const struct nf_nat_pptp *nat_pptp_info; 49 const struct nf_nat_pptp *nat_pptp_info;
50 struct nf_nat_ipv4_range range; 50 struct nf_nat_ipv4_range range;
51 51
52 ct_pptp_info = &nfct_help(master)->help.ct_pptp_info; 52 ct_pptp_info = nfct_help_data(master);
53 nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info; 53 nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info;
54 54
55 /* And here goes the grand finale of corrosion... */ 55 /* And here goes the grand finale of corrosion... */
@@ -123,7 +123,7 @@ pptp_outbound_pkt(struct sk_buff *skb,
123 __be16 new_callid; 123 __be16 new_callid;
124 unsigned int cid_off; 124 unsigned int cid_off;
125 125
126 ct_pptp_info = &nfct_help(ct)->help.ct_pptp_info; 126 ct_pptp_info = nfct_help_data(ct);
127 nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info; 127 nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info;
128 128
129 new_callid = ct_pptp_info->pns_call_id; 129 new_callid = ct_pptp_info->pns_call_id;
@@ -192,7 +192,7 @@ pptp_exp_gre(struct nf_conntrack_expect *expect_orig,
192 struct nf_ct_pptp_master *ct_pptp_info; 192 struct nf_ct_pptp_master *ct_pptp_info;
193 struct nf_nat_pptp *nat_pptp_info; 193 struct nf_nat_pptp *nat_pptp_info;
194 194
195 ct_pptp_info = &nfct_help(ct)->help.ct_pptp_info; 195 ct_pptp_info = nfct_help_data(ct);
196 nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info; 196 nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info;
197 197
198 /* save original PAC call ID in nat_info */ 198 /* save original PAC call ID in nat_info */
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 746edec8b86..bac712293fd 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -405,7 +405,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
405 405
406 ptr = *octets; 406 ptr = *octets;
407 while (ctx->pointer < eoc) { 407 while (ctx->pointer < eoc) {
408 if (!asn1_octet_decode(ctx, (unsigned char *)ptr++)) { 408 if (!asn1_octet_decode(ctx, ptr++)) {
409 kfree(*octets); 409 kfree(*octets);
410 *octets = NULL; 410 *octets = NULL;
411 return 0; 411 return 0;
@@ -759,7 +759,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
759 } 759 }
760 break; 760 break;
761 case SNMP_OBJECTID: 761 case SNMP_OBJECTID:
762 if (!asn1_oid_decode(ctx, end, (unsigned long **)&lp, &len)) { 762 if (!asn1_oid_decode(ctx, end, &lp, &len)) {
763 kfree(id); 763 kfree(id);
764 return 0; 764 return 0;
765 } 765 }
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c
index a2901bf829c..9dbb8d284f9 100644
--- a/net/ipv4/netfilter/nf_nat_tftp.c
+++ b/net/ipv4/netfilter/nf_nat_tftp.c
@@ -8,10 +8,10 @@
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/udp.h> 9#include <linux/udp.h>
10 10
11#include <net/netfilter/nf_nat_helper.h>
12#include <net/netfilter/nf_nat_rule.h>
13#include <net/netfilter/nf_conntrack_helper.h> 11#include <net/netfilter/nf_conntrack_helper.h>
14#include <net/netfilter/nf_conntrack_expect.h> 12#include <net/netfilter/nf_conntrack_expect.h>
13#include <net/netfilter/nf_nat_helper.h>
14#include <net/netfilter/nf_nat_rule.h>
15#include <linux/netfilter/nf_conntrack_tftp.h> 15#include <linux/netfilter/nf_conntrack_tftp.h>
16 16
17MODULE_AUTHOR("Magnus Boden <mb@ozaba.mine.nu>"); 17MODULE_AUTHOR("Magnus Boden <mb@ozaba.mine.nu>");
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 2c00e8bf684..340fcf29a96 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -371,6 +371,7 @@ void ping_err(struct sk_buff *skb, u32 info)
371 break; 371 break;
372 case ICMP_DEST_UNREACH: 372 case ICMP_DEST_UNREACH:
373 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ 373 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
374 ipv4_sk_update_pmtu(skb, sk, info);
374 if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) { 375 if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) {
375 err = EMSGSIZE; 376 err = EMSGSIZE;
376 harderr = 1; 377 harderr = 1;
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 9ae5c01cd0b..8918eff1426 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -36,9 +36,7 @@ const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly;
36 36
37int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol) 37int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
38{ 38{
39 int hash = protocol & (MAX_INET_PROTOS - 1); 39 return !cmpxchg((const struct net_protocol **)&inet_protos[protocol],
40
41 return !cmpxchg((const struct net_protocol **)&inet_protos[hash],
42 NULL, prot) ? 0 : -1; 40 NULL, prot) ? 0 : -1;
43} 41}
44EXPORT_SYMBOL(inet_add_protocol); 42EXPORT_SYMBOL(inet_add_protocol);
@@ -49,9 +47,9 @@ EXPORT_SYMBOL(inet_add_protocol);
49 47
50int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol) 48int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol)
51{ 49{
52 int ret, hash = protocol & (MAX_INET_PROTOS - 1); 50 int ret;
53 51
54 ret = (cmpxchg((const struct net_protocol **)&inet_protos[hash], 52 ret = (cmpxchg((const struct net_protocol **)&inet_protos[protocol],
55 prot, NULL) == prot) ? 0 : -1; 53 prot, NULL) == prot) ? 0 : -1;
56 54
57 synchronize_net(); 55 synchronize_net();
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 4032b818f3e..659ddfb1094 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -216,6 +216,9 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
216 int err = 0; 216 int err = 0;
217 int harderr = 0; 217 int harderr = 0;
218 218
219 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
220 ipv4_sk_update_pmtu(skb, sk, info);
221
219 /* Report error on raw socket, if: 222 /* Report error on raw socket, if:
220 1. User requested ip_recverr. 223 1. User requested ip_recverr.
221 2. Socket is connected (otherwise the error indication 224 2. Socket is connected (otherwise the error indication
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 98b30d08efe..95bfa1ba5b2 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -158,40 +158,13 @@ static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
158 158
159static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old) 159static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
160{ 160{
161 struct rtable *rt = (struct rtable *) dst; 161 WARN_ON(1);
162 struct inet_peer *peer; 162 return NULL;
163 u32 *p = NULL;
164
165 if (!rt->peer)
166 rt_bind_peer(rt, rt->rt_dst, 1);
167
168 peer = rt->peer;
169 if (peer) {
170 u32 *old_p = __DST_METRICS_PTR(old);
171 unsigned long prev, new;
172
173 p = peer->metrics;
174 if (inet_metrics_new(peer))
175 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
176
177 new = (unsigned long) p;
178 prev = cmpxchg(&dst->_metrics, old, new);
179
180 if (prev != old) {
181 p = __DST_METRICS_PTR(prev);
182 if (prev & DST_METRICS_READ_ONLY)
183 p = NULL;
184 } else {
185 if (rt->fi) {
186 fib_info_put(rt->fi);
187 rt->fi = NULL;
188 }
189 }
190 }
191 return p;
192} 163}
193 164
194static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr); 165static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
166 struct sk_buff *skb,
167 const void *daddr);
195 168
196static struct dst_ops ipv4_dst_ops = { 169static struct dst_ops ipv4_dst_ops = {
197 .family = AF_INET, 170 .family = AF_INET,
@@ -421,29 +394,19 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
421 "HHUptod\tSpecDst"); 394 "HHUptod\tSpecDst");
422 else { 395 else {
423 struct rtable *r = v; 396 struct rtable *r = v;
424 struct neighbour *n; 397 int len;
425 int len, HHUptod;
426
427 rcu_read_lock();
428 n = dst_get_neighbour_noref(&r->dst);
429 HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
430 rcu_read_unlock();
431 398
432 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t" 399 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
433 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", 400 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
434 r->dst.dev ? r->dst.dev->name : "*", 401 r->dst.dev ? r->dst.dev->name : "*",
435 (__force u32)r->rt_dst, 402 (__force u32)r->rt_dst,
436 (__force u32)r->rt_gateway, 403 (__force u32)r->rt_gateway,
437 r->rt_flags, atomic_read(&r->dst.__refcnt), 404 r->rt_flags, atomic_read(&r->dst.__refcnt),
438 r->dst.__use, 0, (__force u32)r->rt_src, 405 r->dst.__use, 0, (__force u32)r->rt_src,
439 dst_metric_advmss(&r->dst) + 40, 406 dst_metric_advmss(&r->dst) + 40,
440 dst_metric(&r->dst, RTAX_WINDOW), 407 dst_metric(&r->dst, RTAX_WINDOW), 0,
441 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) + 408 r->rt_key_tos,
442 dst_metric(&r->dst, RTAX_RTTVAR)), 409 -1, 0, 0, &len);
443 r->rt_key_tos,
444 -1,
445 HHUptod,
446 r->rt_spec_dst, &len);
447 410
448 seq_printf(seq, "%*s\n", 127 - len, ""); 411 seq_printf(seq, "%*s\n", 127 - len, "");
449 } 412 }
@@ -680,7 +643,7 @@ static inline int rt_fast_clean(struct rtable *rth)
680static inline int rt_valuable(struct rtable *rth) 643static inline int rt_valuable(struct rtable *rth)
681{ 644{
682 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) || 645 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
683 (rth->peer && rth->peer->pmtu_expires); 646 rth->dst.expires;
684} 647}
685 648
686static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2) 649static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
@@ -873,34 +836,22 @@ static void rt_check_expire(void)
873 while ((rth = rcu_dereference_protected(*rthp, 836 while ((rth = rcu_dereference_protected(*rthp,
874 lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) { 837 lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
875 prefetch(rth->dst.rt_next); 838 prefetch(rth->dst.rt_next);
876 if (rt_is_expired(rth)) { 839 if (rt_is_expired(rth) ||
840 rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
877 *rthp = rth->dst.rt_next; 841 *rthp = rth->dst.rt_next;
878 rt_free(rth); 842 rt_free(rth);
879 continue; 843 continue;
880 } 844 }
881 if (rth->dst.expires) {
882 /* Entry is expired even if it is in use */
883 if (time_before_eq(jiffies, rth->dst.expires)) {
884nofree:
885 tmo >>= 1;
886 rthp = &rth->dst.rt_next;
887 /*
888 * We only count entries on
889 * a chain with equal hash inputs once
890 * so that entries for different QOS
891 * levels, and other non-hash input
892 * attributes don't unfairly skew
893 * the length computation
894 */
895 length += has_noalias(rt_hash_table[i].chain, rth);
896 continue;
897 }
898 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
899 goto nofree;
900 845
901 /* Cleanup aged off entries. */ 846 /* We only count entries on a chain with equal
902 *rthp = rth->dst.rt_next; 847 * hash inputs once so that entries for
903 rt_free(rth); 848 * different QOS levels, and other non-hash
849 * input attributes don't unfairly skew the
850 * length computation
851 */
852 tmo >>= 1;
853 rthp = &rth->dst.rt_next;
854 length += has_noalias(rt_hash_table[i].chain, rth);
904 } 855 }
905 spin_unlock_bh(rt_hash_lock_addr(i)); 856 spin_unlock_bh(rt_hash_lock_addr(i));
906 sum += length; 857 sum += length;
@@ -938,7 +889,6 @@ static void rt_cache_invalidate(struct net *net)
938 889
939 get_random_bytes(&shuffle, sizeof(shuffle)); 890 get_random_bytes(&shuffle, sizeof(shuffle));
940 atomic_add(shuffle + 1U, &net->ipv4.rt_genid); 891 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
941 inetpeer_invalidate_tree(AF_INET);
942} 892}
943 893
944/* 894/*
@@ -1111,20 +1061,20 @@ static int slow_chain_length(const struct rtable *head)
1111 return length >> FRACT_BITS; 1061 return length >> FRACT_BITS;
1112} 1062}
1113 1063
1114static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr) 1064static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
1065 struct sk_buff *skb,
1066 const void *daddr)
1115{ 1067{
1116 static const __be32 inaddr_any = 0;
1117 struct net_device *dev = dst->dev; 1068 struct net_device *dev = dst->dev;
1118 const __be32 *pkey = daddr; 1069 const __be32 *pkey = daddr;
1119 const struct rtable *rt; 1070 const struct rtable *rt;
1120 struct neighbour *n; 1071 struct neighbour *n;
1121 1072
1122 rt = (const struct rtable *) dst; 1073 rt = (const struct rtable *) dst;
1123 1074 if (rt->rt_gateway)
1124 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
1125 pkey = &inaddr_any;
1126 else if (rt->rt_gateway)
1127 pkey = (const __be32 *) &rt->rt_gateway; 1075 pkey = (const __be32 *) &rt->rt_gateway;
1076 else if (skb)
1077 pkey = &ip_hdr(skb)->daddr;
1128 1078
1129 n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey); 1079 n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
1130 if (n) 1080 if (n)
@@ -1132,16 +1082,6 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const vo
1132 return neigh_create(&arp_tbl, pkey, dev); 1082 return neigh_create(&arp_tbl, pkey, dev);
1133} 1083}
1134 1084
1135static int rt_bind_neighbour(struct rtable *rt)
1136{
1137 struct neighbour *n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
1138 if (IS_ERR(n))
1139 return PTR_ERR(n);
1140 dst_set_neighbour(&rt->dst, n);
1141
1142 return 0;
1143}
1144
1145static struct rtable *rt_intern_hash(unsigned int hash, struct rtable *rt, 1085static struct rtable *rt_intern_hash(unsigned int hash, struct rtable *rt,
1146 struct sk_buff *skb, int ifindex) 1086 struct sk_buff *skb, int ifindex)
1147{ 1087{
@@ -1150,7 +1090,6 @@ static struct rtable *rt_intern_hash(unsigned int hash, struct rtable *rt,
1150 unsigned long now; 1090 unsigned long now;
1151 u32 min_score; 1091 u32 min_score;
1152 int chain_length; 1092 int chain_length;
1153 int attempts = !in_softirq();
1154 1093
1155restart: 1094restart:
1156 chain_length = 0; 1095 chain_length = 0;
@@ -1159,7 +1098,7 @@ restart:
1159 candp = NULL; 1098 candp = NULL;
1160 now = jiffies; 1099 now = jiffies;
1161 1100
1162 if (!rt_caching(dev_net(rt->dst.dev))) { 1101 if (!rt_caching(dev_net(rt->dst.dev)) || (rt->dst.flags & DST_NOCACHE)) {
1163 /* 1102 /*
1164 * If we're not caching, just tell the caller we 1103 * If we're not caching, just tell the caller we
1165 * were successful and don't touch the route. The 1104 * were successful and don't touch the route. The
@@ -1177,15 +1116,6 @@ restart:
1177 */ 1116 */
1178 1117
1179 rt->dst.flags |= DST_NOCACHE; 1118 rt->dst.flags |= DST_NOCACHE;
1180 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1181 int err = rt_bind_neighbour(rt);
1182 if (err) {
1183 net_warn_ratelimited("Neighbour table failure & not caching routes\n");
1184 ip_rt_put(rt);
1185 return ERR_PTR(err);
1186 }
1187 }
1188
1189 goto skip_hashing; 1119 goto skip_hashing;
1190 } 1120 }
1191 1121
@@ -1268,40 +1198,6 @@ restart:
1268 } 1198 }
1269 } 1199 }
1270 1200
1271 /* Try to bind route to arp only if it is output
1272 route or unicast forwarding path.
1273 */
1274 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1275 int err = rt_bind_neighbour(rt);
1276 if (err) {
1277 spin_unlock_bh(rt_hash_lock_addr(hash));
1278
1279 if (err != -ENOBUFS) {
1280 rt_drop(rt);
1281 return ERR_PTR(err);
1282 }
1283
1284 /* Neighbour tables are full and nothing
1285 can be released. Try to shrink route cache,
1286 it is most likely it holds some neighbour records.
1287 */
1288 if (attempts-- > 0) {
1289 int saved_elasticity = ip_rt_gc_elasticity;
1290 int saved_int = ip_rt_gc_min_interval;
1291 ip_rt_gc_elasticity = 1;
1292 ip_rt_gc_min_interval = 0;
1293 rt_garbage_collect(&ipv4_dst_ops);
1294 ip_rt_gc_min_interval = saved_int;
1295 ip_rt_gc_elasticity = saved_elasticity;
1296 goto restart;
1297 }
1298
1299 net_warn_ratelimited("Neighbour table overflow\n");
1300 rt_drop(rt);
1301 return ERR_PTR(-ENOBUFS);
1302 }
1303 }
1304
1305 rt->dst.rt_next = rt_hash_table[hash].chain; 1201 rt->dst.rt_next = rt_hash_table[hash].chain;
1306 1202
1307 /* 1203 /*
@@ -1319,25 +1215,6 @@ skip_hashing:
1319 return rt; 1215 return rt;
1320} 1216}
1321 1217
1322static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
1323
1324static u32 rt_peer_genid(void)
1325{
1326 return atomic_read(&__rt_peer_genid);
1327}
1328
1329void rt_bind_peer(struct rtable *rt, __be32 daddr, int create)
1330{
1331 struct inet_peer *peer;
1332
1333 peer = inet_getpeer_v4(daddr, create);
1334
1335 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
1336 inet_putpeer(peer);
1337 else
1338 rt->rt_peer_genid = rt_peer_genid();
1339}
1340
1341/* 1218/*
1342 * Peer allocation may fail only in serious out-of-memory conditions. However 1219 * Peer allocation may fail only in serious out-of-memory conditions. However
1343 * we still can generate some output. 1220 * we still can generate some output.
@@ -1360,21 +1237,15 @@ static void ip_select_fb_ident(struct iphdr *iph)
1360 1237
1361void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more) 1238void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1362{ 1239{
1363 struct rtable *rt = (struct rtable *) dst; 1240 struct net *net = dev_net(dst->dev);
1364 1241 struct inet_peer *peer;
1365 if (rt && !(rt->dst.flags & DST_NOPEER)) {
1366 if (rt->peer == NULL)
1367 rt_bind_peer(rt, rt->rt_dst, 1);
1368 1242
1369 /* If peer is attached to destination, it is never detached, 1243 peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1);
1370 so that we need not to grab a lock to dereference it. 1244 if (peer) {
1371 */ 1245 iph->id = htons(inet_getid(peer, more));
1372 if (rt->peer) { 1246 inet_putpeer(peer);
1373 iph->id = htons(inet_getid(rt->peer, more)); 1247 return;
1374 return; 1248 }
1375 }
1376 } else if (!rt)
1377 pr_debug("rt_bind_peer(0) @%p\n", __builtin_return_address(0));
1378 1249
1379 ip_select_fb_ident(iph); 1250 ip_select_fb_ident(iph);
1380} 1251}
@@ -1400,32 +1271,6 @@ static void rt_del(unsigned int hash, struct rtable *rt)
1400 spin_unlock_bh(rt_hash_lock_addr(hash)); 1271 spin_unlock_bh(rt_hash_lock_addr(hash));
1401} 1272}
1402 1273
1403static void check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
1404{
1405 struct rtable *rt = (struct rtable *) dst;
1406 __be32 orig_gw = rt->rt_gateway;
1407 struct neighbour *n, *old_n;
1408
1409 dst_confirm(&rt->dst);
1410
1411 rt->rt_gateway = peer->redirect_learned.a4;
1412
1413 n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
1414 if (IS_ERR(n)) {
1415 rt->rt_gateway = orig_gw;
1416 return;
1417 }
1418 old_n = xchg(&rt->dst._neighbour, n);
1419 if (old_n)
1420 neigh_release(old_n);
1421 if (!(n->nud_state & NUD_VALID)) {
1422 neigh_event_send(n, NULL);
1423 } else {
1424 rt->rt_flags |= RTCF_REDIRECTED;
1425 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
1426 }
1427}
1428
1429/* called in rcu_read_lock() section */ 1274/* called in rcu_read_lock() section */
1430void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, 1275void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1431 __be32 saddr, struct net_device *dev) 1276 __be32 saddr, struct net_device *dev)
@@ -1434,7 +1279,6 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1434 struct in_device *in_dev = __in_dev_get_rcu(dev); 1279 struct in_device *in_dev = __in_dev_get_rcu(dev);
1435 __be32 skeys[2] = { saddr, 0 }; 1280 __be32 skeys[2] = { saddr, 0 };
1436 int ikeys[2] = { dev->ifindex, 0 }; 1281 int ikeys[2] = { dev->ifindex, 0 };
1437 struct inet_peer *peer;
1438 struct net *net; 1282 struct net *net;
1439 1283
1440 if (!in_dev) 1284 if (!in_dev)
@@ -1467,6 +1311,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1467 rthp = &rt_hash_table[hash].chain; 1311 rthp = &rt_hash_table[hash].chain;
1468 1312
1469 while ((rt = rcu_dereference(*rthp)) != NULL) { 1313 while ((rt = rcu_dereference(*rthp)) != NULL) {
1314 struct neighbour *n;
1315
1470 rthp = &rt->dst.rt_next; 1316 rthp = &rt->dst.rt_next;
1471 1317
1472 if (rt->rt_key_dst != daddr || 1318 if (rt->rt_key_dst != daddr ||
@@ -1480,16 +1326,16 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1480 rt->rt_gateway != old_gw) 1326 rt->rt_gateway != old_gw)
1481 continue; 1327 continue;
1482 1328
1483 if (!rt->peer) 1329 n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
1484 rt_bind_peer(rt, rt->rt_dst, 1); 1330 if (n) {
1485 1331 if (!(n->nud_state & NUD_VALID)) {
1486 peer = rt->peer; 1332 neigh_event_send(n, NULL);
1487 if (peer) { 1333 } else {
1488 if (peer->redirect_learned.a4 != new_gw) { 1334 rt->rt_gateway = new_gw;
1489 peer->redirect_learned.a4 = new_gw; 1335 rt->rt_flags |= RTCF_REDIRECTED;
1490 atomic_inc(&__rt_peer_genid); 1336 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
1491 } 1337 }
1492 check_peer_redir(&rt->dst, peer); 1338 neigh_release(n);
1493 } 1339 }
1494 } 1340 }
1495 } 1341 }
@@ -1507,23 +1353,6 @@ reject_redirect:
1507 ; 1353 ;
1508} 1354}
1509 1355
1510static bool peer_pmtu_expired(struct inet_peer *peer)
1511{
1512 unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1513
1514 return orig &&
1515 time_after_eq(jiffies, orig) &&
1516 cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1517}
1518
1519static bool peer_pmtu_cleaned(struct inet_peer *peer)
1520{
1521 unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1522
1523 return orig &&
1524 cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1525}
1526
1527static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) 1356static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1528{ 1357{
1529 struct rtable *rt = (struct rtable *)dst; 1358 struct rtable *rt = (struct rtable *)dst;
@@ -1533,14 +1362,13 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1533 if (dst->obsolete > 0) { 1362 if (dst->obsolete > 0) {
1534 ip_rt_put(rt); 1363 ip_rt_put(rt);
1535 ret = NULL; 1364 ret = NULL;
1536 } else if (rt->rt_flags & RTCF_REDIRECTED) { 1365 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1366 rt->dst.expires) {
1537 unsigned int hash = rt_hash(rt->rt_key_dst, rt->rt_key_src, 1367 unsigned int hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1538 rt->rt_oif, 1368 rt->rt_oif,
1539 rt_genid(dev_net(dst->dev))); 1369 rt_genid(dev_net(dst->dev)));
1540 rt_del(hash, rt); 1370 rt_del(hash, rt);
1541 ret = NULL; 1371 ret = NULL;
1542 } else if (rt->peer && peer_pmtu_expired(rt->peer)) {
1543 dst_metric_set(dst, RTAX_MTU, rt->peer->pmtu_orig);
1544 } 1372 }
1545 } 1373 }
1546 return ret; 1374 return ret;
@@ -1567,6 +1395,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1567 struct rtable *rt = skb_rtable(skb); 1395 struct rtable *rt = skb_rtable(skb);
1568 struct in_device *in_dev; 1396 struct in_device *in_dev;
1569 struct inet_peer *peer; 1397 struct inet_peer *peer;
1398 struct net *net;
1570 int log_martians; 1399 int log_martians;
1571 1400
1572 rcu_read_lock(); 1401 rcu_read_lock();
@@ -1578,9 +1407,8 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1578 log_martians = IN_DEV_LOG_MARTIANS(in_dev); 1407 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
1579 rcu_read_unlock(); 1408 rcu_read_unlock();
1580 1409
1581 if (!rt->peer) 1410 net = dev_net(rt->dst.dev);
1582 rt_bind_peer(rt, rt->rt_dst, 1); 1411 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
1583 peer = rt->peer;
1584 if (!peer) { 1412 if (!peer) {
1585 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); 1413 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1586 return; 1414 return;
@@ -1597,7 +1425,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1597 */ 1425 */
1598 if (peer->rate_tokens >= ip_rt_redirect_number) { 1426 if (peer->rate_tokens >= ip_rt_redirect_number) {
1599 peer->rate_last = jiffies; 1427 peer->rate_last = jiffies;
1600 return; 1428 goto out_put_peer;
1601 } 1429 }
1602 1430
1603 /* Check for load limit; set rate_last to the latest sent 1431 /* Check for load limit; set rate_last to the latest sent
@@ -1618,16 +1446,34 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1618 &rt->rt_dst, &rt->rt_gateway); 1446 &rt->rt_dst, &rt->rt_gateway);
1619#endif 1447#endif
1620 } 1448 }
1449out_put_peer:
1450 inet_putpeer(peer);
1621} 1451}
1622 1452
1623static int ip_error(struct sk_buff *skb) 1453static int ip_error(struct sk_buff *skb)
1624{ 1454{
1455 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
1625 struct rtable *rt = skb_rtable(skb); 1456 struct rtable *rt = skb_rtable(skb);
1626 struct inet_peer *peer; 1457 struct inet_peer *peer;
1627 unsigned long now; 1458 unsigned long now;
1459 struct net *net;
1628 bool send; 1460 bool send;
1629 int code; 1461 int code;
1630 1462
1463 net = dev_net(rt->dst.dev);
1464 if (!IN_DEV_FORWARD(in_dev)) {
1465 switch (rt->dst.error) {
1466 case EHOSTUNREACH:
1467 IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS);
1468 break;
1469
1470 case ENETUNREACH:
1471 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
1472 break;
1473 }
1474 goto out;
1475 }
1476
1631 switch (rt->dst.error) { 1477 switch (rt->dst.error) {
1632 case EINVAL: 1478 case EINVAL:
1633 default: 1479 default:
@@ -1637,17 +1483,14 @@ static int ip_error(struct sk_buff *skb)
1637 break; 1483 break;
1638 case ENETUNREACH: 1484 case ENETUNREACH:
1639 code = ICMP_NET_UNREACH; 1485 code = ICMP_NET_UNREACH;
1640 IP_INC_STATS_BH(dev_net(rt->dst.dev), 1486 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
1641 IPSTATS_MIB_INNOROUTES);
1642 break; 1487 break;
1643 case EACCES: 1488 case EACCES:
1644 code = ICMP_PKT_FILTERED; 1489 code = ICMP_PKT_FILTERED;
1645 break; 1490 break;
1646 } 1491 }
1647 1492
1648 if (!rt->peer) 1493 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
1649 rt_bind_peer(rt, rt->rt_dst, 1);
1650 peer = rt->peer;
1651 1494
1652 send = true; 1495 send = true;
1653 if (peer) { 1496 if (peer) {
@@ -1660,6 +1503,7 @@ static int ip_error(struct sk_buff *skb)
1660 peer->rate_tokens -= ip_rt_error_cost; 1503 peer->rate_tokens -= ip_rt_error_cost;
1661 else 1504 else
1662 send = false; 1505 send = false;
1506 inet_putpeer(peer);
1663 } 1507 }
1664 if (send) 1508 if (send)
1665 icmp_send(skb, ICMP_DEST_UNREACH, code, 0); 1509 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
@@ -1668,136 +1512,47 @@ out: kfree_skb(skb);
1668 return 0; 1512 return 0;
1669} 1513}
1670 1514
1671/*
1672 * The last two values are not from the RFC but
1673 * are needed for AMPRnet AX.25 paths.
1674 */
1675
1676static const unsigned short mtu_plateau[] =
1677{32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1678
1679static inline unsigned short guess_mtu(unsigned short old_mtu)
1680{
1681 int i;
1682
1683 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1684 if (old_mtu > mtu_plateau[i])
1685 return mtu_plateau[i];
1686 return 68;
1687}
1688
1689unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
1690 unsigned short new_mtu,
1691 struct net_device *dev)
1692{
1693 unsigned short old_mtu = ntohs(iph->tot_len);
1694 unsigned short est_mtu = 0;
1695 struct inet_peer *peer;
1696
1697 peer = inet_getpeer_v4(iph->daddr, 1);
1698 if (peer) {
1699 unsigned short mtu = new_mtu;
1700
1701 if (new_mtu < 68 || new_mtu >= old_mtu) {
1702 /* BSD 4.2 derived systems incorrectly adjust
1703 * tot_len by the IP header length, and report
1704 * a zero MTU in the ICMP message.
1705 */
1706 if (mtu == 0 &&
1707 old_mtu >= 68 + (iph->ihl << 2))
1708 old_mtu -= iph->ihl << 2;
1709 mtu = guess_mtu(old_mtu);
1710 }
1711
1712 if (mtu < ip_rt_min_pmtu)
1713 mtu = ip_rt_min_pmtu;
1714 if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
1715 unsigned long pmtu_expires;
1716
1717 pmtu_expires = jiffies + ip_rt_mtu_expires;
1718 if (!pmtu_expires)
1719 pmtu_expires = 1UL;
1720
1721 est_mtu = mtu;
1722 peer->pmtu_learned = mtu;
1723 peer->pmtu_expires = pmtu_expires;
1724 atomic_inc(&__rt_peer_genid);
1725 }
1726
1727 inet_putpeer(peer);
1728 }
1729 return est_mtu ? : new_mtu;
1730}
1731
1732static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
1733{
1734 unsigned long expires = ACCESS_ONCE(peer->pmtu_expires);
1735
1736 if (!expires)
1737 return;
1738 if (time_before(jiffies, expires)) {
1739 u32 orig_dst_mtu = dst_mtu(dst);
1740 if (peer->pmtu_learned < orig_dst_mtu) {
1741 if (!peer->pmtu_orig)
1742 peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU);
1743 dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
1744 }
1745 } else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
1746 dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
1747}
1748
1749static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu) 1515static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1750{ 1516{
1751 struct rtable *rt = (struct rtable *) dst; 1517 struct rtable *rt = (struct rtable *) dst;
1752 struct inet_peer *peer;
1753 1518
1754 dst_confirm(dst); 1519 dst_confirm(dst);
1755 1520
1756 if (!rt->peer) 1521 if (mtu < ip_rt_min_pmtu)
1757 rt_bind_peer(rt, rt->rt_dst, 1); 1522 mtu = ip_rt_min_pmtu;
1758 peer = rt->peer;
1759 if (peer) {
1760 unsigned long pmtu_expires = ACCESS_ONCE(peer->pmtu_expires);
1761
1762 if (mtu < ip_rt_min_pmtu)
1763 mtu = ip_rt_min_pmtu;
1764 if (!pmtu_expires || mtu < peer->pmtu_learned) {
1765 1523
1766 pmtu_expires = jiffies + ip_rt_mtu_expires; 1524 rt->rt_pmtu = mtu;
1767 if (!pmtu_expires) 1525 dst_set_expires(&rt->dst, ip_rt_mtu_expires);
1768 pmtu_expires = 1UL; 1526}
1769 1527
1770 peer->pmtu_learned = mtu; 1528void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1771 peer->pmtu_expires = pmtu_expires; 1529 int oif, u32 mark, u8 protocol, int flow_flags)
1530{
1531 const struct iphdr *iph = (const struct iphdr *)skb->data;
1532 struct flowi4 fl4;
1533 struct rtable *rt;
1772 1534
1773 atomic_inc(&__rt_peer_genid); 1535 flowi4_init_output(&fl4, oif, mark, RT_TOS(iph->tos), RT_SCOPE_UNIVERSE,
1774 rt->rt_peer_genid = rt_peer_genid(); 1536 protocol, flow_flags,
1775 } 1537 iph->daddr, iph->saddr, 0, 0);
1776 check_peer_pmtu(dst, peer); 1538 rt = __ip_route_output_key(net, &fl4);
1539 if (!IS_ERR(rt)) {
1540 ip_rt_update_pmtu(&rt->dst, mtu);
1541 ip_rt_put(rt);
1777 } 1542 }
1778} 1543}
1544EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1779 1545
1780 1546void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1781static void ipv4_validate_peer(struct rtable *rt)
1782{ 1547{
1783 if (rt->rt_peer_genid != rt_peer_genid()) { 1548 const struct inet_sock *inet = inet_sk(sk);
1784 struct inet_peer *peer;
1785
1786 if (!rt->peer)
1787 rt_bind_peer(rt, rt->rt_dst, 0);
1788
1789 peer = rt->peer;
1790 if (peer) {
1791 check_peer_pmtu(&rt->dst, peer);
1792 1549
1793 if (peer->redirect_learned.a4 && 1550 return ipv4_update_pmtu(skb, sock_net(sk), mtu,
1794 peer->redirect_learned.a4 != rt->rt_gateway) 1551 sk->sk_bound_dev_if, sk->sk_mark,
1795 check_peer_redir(&rt->dst, peer); 1552 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
1796 } 1553 inet_sk_flowi_flags(sk));
1797
1798 rt->rt_peer_genid = rt_peer_genid();
1799 }
1800} 1554}
1555EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1801 1556
1802static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) 1557static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1803{ 1558{
@@ -1805,23 +1560,17 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1805 1560
1806 if (rt_is_expired(rt)) 1561 if (rt_is_expired(rt))
1807 return NULL; 1562 return NULL;
1808 ipv4_validate_peer(rt);
1809 return dst; 1563 return dst;
1810} 1564}
1811 1565
1812static void ipv4_dst_destroy(struct dst_entry *dst) 1566static void ipv4_dst_destroy(struct dst_entry *dst)
1813{ 1567{
1814 struct rtable *rt = (struct rtable *) dst; 1568 struct rtable *rt = (struct rtable *) dst;
1815 struct inet_peer *peer = rt->peer;
1816 1569
1817 if (rt->fi) { 1570 if (rt->fi) {
1818 fib_info_put(rt->fi); 1571 fib_info_put(rt->fi);
1819 rt->fi = NULL; 1572 rt->fi = NULL;
1820 } 1573 }
1821 if (peer) {
1822 rt->peer = NULL;
1823 inet_putpeer(peer);
1824 }
1825} 1574}
1826 1575
1827 1576
@@ -1832,8 +1581,8 @@ static void ipv4_link_failure(struct sk_buff *skb)
1832 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); 1581 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1833 1582
1834 rt = skb_rtable(skb); 1583 rt = skb_rtable(skb);
1835 if (rt && rt->peer && peer_pmtu_cleaned(rt->peer)) 1584 if (rt)
1836 dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig); 1585 dst_set_expires(&rt->dst, 0);
1837} 1586}
1838 1587
1839static int ip_rt_bug(struct sk_buff *skb) 1588static int ip_rt_bug(struct sk_buff *skb)
@@ -1913,7 +1662,13 @@ static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1913static unsigned int ipv4_mtu(const struct dst_entry *dst) 1662static unsigned int ipv4_mtu(const struct dst_entry *dst)
1914{ 1663{
1915 const struct rtable *rt = (const struct rtable *) dst; 1664 const struct rtable *rt = (const struct rtable *) dst;
1916 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); 1665 unsigned int mtu = rt->rt_pmtu;
1666
1667 if (mtu && time_after_eq(jiffies, rt->dst.expires))
1668 mtu = 0;
1669
1670 if (!mtu)
1671 mtu = dst_metric_raw(dst, RTAX_MTU);
1917 1672
1918 if (mtu && rt_is_output_route(rt)) 1673 if (mtu && rt_is_output_route(rt))
1919 return mtu; 1674 return mtu;
@@ -1935,60 +1690,27 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
1935static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4, 1690static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
1936 struct fib_info *fi) 1691 struct fib_info *fi)
1937{ 1692{
1938 struct inet_peer *peer; 1693 if (fi->fib_metrics != (u32 *) dst_default_metrics) {
1939 int create = 0; 1694 rt->fi = fi;
1940 1695 atomic_inc(&fi->fib_clntref);
1941 /* If a peer entry exists for this destination, we must hook
1942 * it up in order to get at cached metrics.
1943 */
1944 if (fl4 && (fl4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS))
1945 create = 1;
1946
1947 rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create);
1948 if (peer) {
1949 rt->rt_peer_genid = rt_peer_genid();
1950 if (inet_metrics_new(peer))
1951 memcpy(peer->metrics, fi->fib_metrics,
1952 sizeof(u32) * RTAX_MAX);
1953 dst_init_metrics(&rt->dst, peer->metrics, false);
1954
1955 check_peer_pmtu(&rt->dst, peer);
1956
1957 if (peer->redirect_learned.a4 &&
1958 peer->redirect_learned.a4 != rt->rt_gateway) {
1959 rt->rt_gateway = peer->redirect_learned.a4;
1960 rt->rt_flags |= RTCF_REDIRECTED;
1961 }
1962 } else {
1963 if (fi->fib_metrics != (u32 *) dst_default_metrics) {
1964 rt->fi = fi;
1965 atomic_inc(&fi->fib_clntref);
1966 }
1967 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
1968 } 1696 }
1697 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
1969} 1698}
1970 1699
1971static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4, 1700static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4,
1972 const struct fib_result *res, 1701 const struct fib_result *res,
1973 struct fib_info *fi, u16 type, u32 itag) 1702 struct fib_info *fi, u16 type, u32 itag)
1974{ 1703{
1975 struct dst_entry *dst = &rt->dst;
1976
1977 if (fi) { 1704 if (fi) {
1978 if (FIB_RES_GW(*res) && 1705 if (FIB_RES_GW(*res) &&
1979 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) 1706 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1980 rt->rt_gateway = FIB_RES_GW(*res); 1707 rt->rt_gateway = FIB_RES_GW(*res);
1981 rt_init_metrics(rt, fl4, fi); 1708 rt_init_metrics(rt, fl4, fi);
1982#ifdef CONFIG_IP_ROUTE_CLASSID 1709#ifdef CONFIG_IP_ROUTE_CLASSID
1983 dst->tclassid = FIB_RES_NH(*res).nh_tclassid; 1710 rt->dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
1984#endif 1711#endif
1985 } 1712 }
1986 1713
1987 if (dst_mtu(dst) > IP_MAX_MTU)
1988 dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
1989 if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
1990 dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
1991
1992#ifdef CONFIG_IP_ROUTE_CLASSID 1714#ifdef CONFIG_IP_ROUTE_CLASSID
1993#ifdef CONFIG_IP_MULTIPLE_TABLES 1715#ifdef CONFIG_IP_MULTIPLE_TABLES
1994 set_class_tag(rt, fib_rules_tclass(res)); 1716 set_class_tag(rt, fib_rules_tclass(res));
@@ -2012,7 +1734,6 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2012{ 1734{
2013 unsigned int hash; 1735 unsigned int hash;
2014 struct rtable *rth; 1736 struct rtable *rth;
2015 __be32 spec_dst;
2016 struct in_device *in_dev = __in_dev_get_rcu(dev); 1737 struct in_device *in_dev = __in_dev_get_rcu(dev);
2017 u32 itag = 0; 1738 u32 itag = 0;
2018 int err; 1739 int err;
@@ -2023,16 +1744,19 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2023 return -EINVAL; 1744 return -EINVAL;
2024 1745
2025 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || 1746 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
2026 ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP)) 1747 skb->protocol != htons(ETH_P_IP))
2027 goto e_inval; 1748 goto e_inval;
2028 1749
1750 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1751 if (ipv4_is_loopback(saddr))
1752 goto e_inval;
1753
2029 if (ipv4_is_zeronet(saddr)) { 1754 if (ipv4_is_zeronet(saddr)) {
2030 if (!ipv4_is_local_multicast(daddr)) 1755 if (!ipv4_is_local_multicast(daddr))
2031 goto e_inval; 1756 goto e_inval;
2032 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2033 } else { 1757 } else {
2034 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst, 1758 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2035 &itag); 1759 in_dev, &itag);
2036 if (err < 0) 1760 if (err < 0)
2037 goto e_err; 1761 goto e_err;
2038 } 1762 }
@@ -2058,10 +1782,8 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2058 rth->rt_iif = dev->ifindex; 1782 rth->rt_iif = dev->ifindex;
2059 rth->rt_oif = 0; 1783 rth->rt_oif = 0;
2060 rth->rt_mark = skb->mark; 1784 rth->rt_mark = skb->mark;
1785 rth->rt_pmtu = 0;
2061 rth->rt_gateway = daddr; 1786 rth->rt_gateway = daddr;
2062 rth->rt_spec_dst= spec_dst;
2063 rth->rt_peer_genid = 0;
2064 rth->peer = NULL;
2065 rth->fi = NULL; 1787 rth->fi = NULL;
2066 if (our) { 1788 if (our) {
2067 rth->dst.input= ip_local_deliver; 1789 rth->dst.input= ip_local_deliver;
@@ -2123,7 +1845,6 @@ static int __mkroute_input(struct sk_buff *skb,
2123 int err; 1845 int err;
2124 struct in_device *out_dev; 1846 struct in_device *out_dev;
2125 unsigned int flags = 0; 1847 unsigned int flags = 0;
2126 __be32 spec_dst;
2127 u32 itag; 1848 u32 itag;
2128 1849
2129 /* get a working reference to the output device */ 1850 /* get a working reference to the output device */
@@ -2135,7 +1856,7 @@ static int __mkroute_input(struct sk_buff *skb,
2135 1856
2136 1857
2137 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res), 1858 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
2138 in_dev->dev, &spec_dst, &itag); 1859 in_dev->dev, in_dev, &itag);
2139 if (err < 0) { 1860 if (err < 0) {
2140 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, 1861 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
2141 saddr); 1862 saddr);
@@ -2186,10 +1907,8 @@ static int __mkroute_input(struct sk_buff *skb,
2186 rth->rt_iif = in_dev->dev->ifindex; 1907 rth->rt_iif = in_dev->dev->ifindex;
2187 rth->rt_oif = 0; 1908 rth->rt_oif = 0;
2188 rth->rt_mark = skb->mark; 1909 rth->rt_mark = skb->mark;
1910 rth->rt_pmtu = 0;
2189 rth->rt_gateway = daddr; 1911 rth->rt_gateway = daddr;
2190 rth->rt_spec_dst= spec_dst;
2191 rth->rt_peer_genid = 0;
2192 rth->peer = NULL;
2193 rth->fi = NULL; 1912 rth->fi = NULL;
2194 1913
2195 rth->dst.input = ip_forward; 1914 rth->dst.input = ip_forward;
@@ -2253,7 +1972,6 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2253 u32 itag = 0; 1972 u32 itag = 0;
2254 struct rtable *rth; 1973 struct rtable *rth;
2255 unsigned int hash; 1974 unsigned int hash;
2256 __be32 spec_dst;
2257 int err = -EINVAL; 1975 int err = -EINVAL;
2258 struct net *net = dev_net(dev); 1976 struct net *net = dev_net(dev);
2259 1977
@@ -2266,8 +1984,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2266 by fib_lookup. 1984 by fib_lookup.
2267 */ 1985 */
2268 1986
2269 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || 1987 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2270 ipv4_is_loopback(saddr))
2271 goto martian_source; 1988 goto martian_source;
2272 1989
2273 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0)) 1990 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
@@ -2279,9 +1996,17 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2279 if (ipv4_is_zeronet(saddr)) 1996 if (ipv4_is_zeronet(saddr))
2280 goto martian_source; 1997 goto martian_source;
2281 1998
2282 if (ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr)) 1999 if (ipv4_is_zeronet(daddr))
2283 goto martian_destination; 2000 goto martian_destination;
2284 2001
2002 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) {
2003 if (ipv4_is_loopback(daddr))
2004 goto martian_destination;
2005
2006 if (ipv4_is_loopback(saddr))
2007 goto martian_source;
2008 }
2009
2285 /* 2010 /*
2286 * Now we are ready to route packet. 2011 * Now we are ready to route packet.
2287 */ 2012 */
@@ -2293,11 +2018,8 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2293 fl4.daddr = daddr; 2018 fl4.daddr = daddr;
2294 fl4.saddr = saddr; 2019 fl4.saddr = saddr;
2295 err = fib_lookup(net, &fl4, &res); 2020 err = fib_lookup(net, &fl4, &res);
2296 if (err != 0) { 2021 if (err != 0)
2297 if (!IN_DEV_FORWARD(in_dev))
2298 goto e_hostunreach;
2299 goto no_route; 2022 goto no_route;
2300 }
2301 2023
2302 RT_CACHE_STAT_INC(in_slow_tot); 2024 RT_CACHE_STAT_INC(in_slow_tot);
2303 2025
@@ -2307,17 +2029,16 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2307 if (res.type == RTN_LOCAL) { 2029 if (res.type == RTN_LOCAL) {
2308 err = fib_validate_source(skb, saddr, daddr, tos, 2030 err = fib_validate_source(skb, saddr, daddr, tos,
2309 net->loopback_dev->ifindex, 2031 net->loopback_dev->ifindex,
2310 dev, &spec_dst, &itag); 2032 dev, in_dev, &itag);
2311 if (err < 0) 2033 if (err < 0)
2312 goto martian_source_keep_err; 2034 goto martian_source_keep_err;
2313 if (err) 2035 if (err)
2314 flags |= RTCF_DIRECTSRC; 2036 flags |= RTCF_DIRECTSRC;
2315 spec_dst = daddr;
2316 goto local_input; 2037 goto local_input;
2317 } 2038 }
2318 2039
2319 if (!IN_DEV_FORWARD(in_dev)) 2040 if (!IN_DEV_FORWARD(in_dev))
2320 goto e_hostunreach; 2041 goto no_route;
2321 if (res.type != RTN_UNICAST) 2042 if (res.type != RTN_UNICAST)
2322 goto martian_destination; 2043 goto martian_destination;
2323 2044
@@ -2328,11 +2049,9 @@ brd_input:
2328 if (skb->protocol != htons(ETH_P_IP)) 2049 if (skb->protocol != htons(ETH_P_IP))
2329 goto e_inval; 2050 goto e_inval;
2330 2051
2331 if (ipv4_is_zeronet(saddr)) 2052 if (!ipv4_is_zeronet(saddr)) {
2332 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK); 2053 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2333 else { 2054 in_dev, &itag);
2334 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
2335 &itag);
2336 if (err < 0) 2055 if (err < 0)
2337 goto martian_source_keep_err; 2056 goto martian_source_keep_err;
2338 if (err) 2057 if (err)
@@ -2362,17 +2081,12 @@ local_input:
2362 rth->rt_key_tos = tos; 2081 rth->rt_key_tos = tos;
2363 rth->rt_dst = daddr; 2082 rth->rt_dst = daddr;
2364 rth->rt_src = saddr; 2083 rth->rt_src = saddr;
2365#ifdef CONFIG_IP_ROUTE_CLASSID
2366 rth->dst.tclassid = itag;
2367#endif
2368 rth->rt_route_iif = dev->ifindex; 2084 rth->rt_route_iif = dev->ifindex;
2369 rth->rt_iif = dev->ifindex; 2085 rth->rt_iif = dev->ifindex;
2370 rth->rt_oif = 0; 2086 rth->rt_oif = 0;
2371 rth->rt_mark = skb->mark; 2087 rth->rt_mark = skb->mark;
2088 rth->rt_pmtu = 0;
2372 rth->rt_gateway = daddr; 2089 rth->rt_gateway = daddr;
2373 rth->rt_spec_dst= spec_dst;
2374 rth->rt_peer_genid = 0;
2375 rth->peer = NULL;
2376 rth->fi = NULL; 2090 rth->fi = NULL;
2377 if (res.type == RTN_UNREACHABLE) { 2091 if (res.type == RTN_UNREACHABLE) {
2378 rth->dst.input= ip_error; 2092 rth->dst.input= ip_error;
@@ -2388,7 +2102,6 @@ local_input:
2388 2102
2389no_route: 2103no_route:
2390 RT_CACHE_STAT_INC(in_no_route); 2104 RT_CACHE_STAT_INC(in_no_route);
2391 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2392 res.type = RTN_UNREACHABLE; 2105 res.type = RTN_UNREACHABLE;
2393 if (err == -ESRCH) 2106 if (err == -ESRCH)
2394 err = -ENETUNREACH; 2107 err = -ENETUNREACH;
@@ -2405,10 +2118,6 @@ martian_destination:
2405 &daddr, &saddr, dev->name); 2118 &daddr, &saddr, dev->name);
2406#endif 2119#endif
2407 2120
2408e_hostunreach:
2409 err = -EHOSTUNREACH;
2410 goto out;
2411
2412e_inval: 2121e_inval:
2413 err = -EINVAL; 2122 err = -EINVAL;
2414 goto out; 2123 goto out;
@@ -2452,7 +2161,6 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2452 rth->rt_mark == skb->mark && 2161 rth->rt_mark == skb->mark &&
2453 net_eq(dev_net(rth->dst.dev), net) && 2162 net_eq(dev_net(rth->dst.dev), net) &&
2454 !rt_is_expired(rth)) { 2163 !rt_is_expired(rth)) {
2455 ipv4_validate_peer(rth);
2456 if (noref) { 2164 if (noref) {
2457 dst_use_noref(&rth->dst, jiffies); 2165 dst_use_noref(&rth->dst, jiffies);
2458 skb_dst_set_noref(skb, &rth->dst); 2166 skb_dst_set_noref(skb, &rth->dst);
@@ -2520,9 +2228,14 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2520 u16 type = res->type; 2228 u16 type = res->type;
2521 struct rtable *rth; 2229 struct rtable *rth;
2522 2230
2523 if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK)) 2231 in_dev = __in_dev_get_rcu(dev_out);
2232 if (!in_dev)
2524 return ERR_PTR(-EINVAL); 2233 return ERR_PTR(-EINVAL);
2525 2234
2235 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2236 if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
2237 return ERR_PTR(-EINVAL);
2238
2526 if (ipv4_is_lbcast(fl4->daddr)) 2239 if (ipv4_is_lbcast(fl4->daddr))
2527 type = RTN_BROADCAST; 2240 type = RTN_BROADCAST;
2528 else if (ipv4_is_multicast(fl4->daddr)) 2241 else if (ipv4_is_multicast(fl4->daddr))
@@ -2533,10 +2246,6 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2533 if (dev_out->flags & IFF_LOOPBACK) 2246 if (dev_out->flags & IFF_LOOPBACK)
2534 flags |= RTCF_LOCAL; 2247 flags |= RTCF_LOCAL;
2535 2248
2536 in_dev = __in_dev_get_rcu(dev_out);
2537 if (!in_dev)
2538 return ERR_PTR(-EINVAL);
2539
2540 if (type == RTN_BROADCAST) { 2249 if (type == RTN_BROADCAST) {
2541 flags |= RTCF_BROADCAST | RTCF_LOCAL; 2250 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2542 fi = NULL; 2251 fi = NULL;
@@ -2573,20 +2282,15 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2573 rth->rt_iif = orig_oif ? : dev_out->ifindex; 2282 rth->rt_iif = orig_oif ? : dev_out->ifindex;
2574 rth->rt_oif = orig_oif; 2283 rth->rt_oif = orig_oif;
2575 rth->rt_mark = fl4->flowi4_mark; 2284 rth->rt_mark = fl4->flowi4_mark;
2285 rth->rt_pmtu = 0;
2576 rth->rt_gateway = fl4->daddr; 2286 rth->rt_gateway = fl4->daddr;
2577 rth->rt_spec_dst= fl4->saddr;
2578 rth->rt_peer_genid = 0;
2579 rth->peer = NULL;
2580 rth->fi = NULL; 2287 rth->fi = NULL;
2581 2288
2582 RT_CACHE_STAT_INC(out_slow_tot); 2289 RT_CACHE_STAT_INC(out_slow_tot);
2583 2290
2584 if (flags & RTCF_LOCAL) { 2291 if (flags & RTCF_LOCAL)
2585 rth->dst.input = ip_local_deliver; 2292 rth->dst.input = ip_local_deliver;
2586 rth->rt_spec_dst = fl4->daddr;
2587 }
2588 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { 2293 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2589 rth->rt_spec_dst = fl4->saddr;
2590 if (flags & RTCF_LOCAL && 2294 if (flags & RTCF_LOCAL &&
2591 !(dev_out->flags & IFF_LOOPBACK)) { 2295 !(dev_out->flags & IFF_LOOPBACK)) {
2592 rth->dst.output = ip_mc_output; 2296 rth->dst.output = ip_mc_output;
@@ -2605,6 +2309,9 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2605 2309
2606 rt_set_nexthop(rth, fl4, res, fi, type, 0); 2310 rt_set_nexthop(rth, fl4, res, fi, type, 0);
2607 2311
2312 if (fl4->flowi4_flags & FLOWI_FLAG_RT_NOCACHE)
2313 rth->dst.flags |= DST_NOCACHE;
2314
2608 return rth; 2315 return rth;
2609} 2316}
2610 2317
@@ -2625,6 +2332,7 @@ static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
2625 int orig_oif; 2332 int orig_oif;
2626 2333
2627 res.fi = NULL; 2334 res.fi = NULL;
2335 res.table = NULL;
2628#ifdef CONFIG_IP_MULTIPLE_TABLES 2336#ifdef CONFIG_IP_MULTIPLE_TABLES
2629 res.r = NULL; 2337 res.r = NULL;
2630#endif 2338#endif
@@ -2730,6 +2438,7 @@ static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
2730 2438
2731 if (fib_lookup(net, fl4, &res)) { 2439 if (fib_lookup(net, fl4, &res)) {
2732 res.fi = NULL; 2440 res.fi = NULL;
2441 res.table = NULL;
2733 if (fl4->flowi4_oif) { 2442 if (fl4->flowi4_oif) {
2734 /* Apparently, routing tables are wrong. Assume, 2443 /* Apparently, routing tables are wrong. Assume,
2735 that the destination is on link. 2444 that the destination is on link.
@@ -2828,7 +2537,6 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
2828 (IPTOS_RT_MASK | RTO_ONLINK)) && 2537 (IPTOS_RT_MASK | RTO_ONLINK)) &&
2829 net_eq(dev_net(rth->dst.dev), net) && 2538 net_eq(dev_net(rth->dst.dev), net) &&
2830 !rt_is_expired(rth)) { 2539 !rt_is_expired(rth)) {
2831 ipv4_validate_peer(rth);
2832 dst_use(&rth->dst, jiffies); 2540 dst_use(&rth->dst, jiffies);
2833 RT_CACHE_STAT_INC(out_hit); 2541 RT_CACHE_STAT_INC(out_hit);
2834 rcu_read_unlock_bh(); 2542 rcu_read_unlock_bh();
@@ -2892,7 +2600,6 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
2892 new->__use = 1; 2600 new->__use = 1;
2893 new->input = dst_discard; 2601 new->input = dst_discard;
2894 new->output = dst_discard; 2602 new->output = dst_discard;
2895 dst_copy_metrics(new, &ort->dst);
2896 2603
2897 new->dev = ort->dst.dev; 2604 new->dev = ort->dst.dev;
2898 if (new->dev) 2605 if (new->dev)
@@ -2905,6 +2612,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
2905 rt->rt_iif = ort->rt_iif; 2612 rt->rt_iif = ort->rt_iif;
2906 rt->rt_oif = ort->rt_oif; 2613 rt->rt_oif = ort->rt_oif;
2907 rt->rt_mark = ort->rt_mark; 2614 rt->rt_mark = ort->rt_mark;
2615 rt->rt_pmtu = ort->rt_pmtu;
2908 2616
2909 rt->rt_genid = rt_genid(net); 2617 rt->rt_genid = rt_genid(net);
2910 rt->rt_flags = ort->rt_flags; 2618 rt->rt_flags = ort->rt_flags;
@@ -2912,10 +2620,6 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
2912 rt->rt_dst = ort->rt_dst; 2620 rt->rt_dst = ort->rt_dst;
2913 rt->rt_src = ort->rt_src; 2621 rt->rt_src = ort->rt_src;
2914 rt->rt_gateway = ort->rt_gateway; 2622 rt->rt_gateway = ort->rt_gateway;
2915 rt->rt_spec_dst = ort->rt_spec_dst;
2916 rt->peer = ort->peer;
2917 if (rt->peer)
2918 atomic_inc(&rt->peer->refcnt);
2919 rt->fi = ort->fi; 2623 rt->fi = ort->fi;
2920 if (rt->fi) 2624 if (rt->fi)
2921 atomic_inc(&rt->fi->fib_clntref); 2625 atomic_inc(&rt->fi->fib_clntref);
@@ -2953,8 +2657,7 @@ static int rt_fill_info(struct net *net,
2953 struct rtmsg *r; 2657 struct rtmsg *r;
2954 struct nlmsghdr *nlh; 2658 struct nlmsghdr *nlh;
2955 unsigned long expires = 0; 2659 unsigned long expires = 0;
2956 const struct inet_peer *peer = rt->peer; 2660 u32 error;
2957 u32 id = 0, ts = 0, tsage = 0, error;
2958 2661
2959 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags); 2662 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2960 if (nlh == NULL) 2663 if (nlh == NULL)
@@ -2990,10 +2693,8 @@ static int rt_fill_info(struct net *net,
2990 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid)) 2693 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2991 goto nla_put_failure; 2694 goto nla_put_failure;
2992#endif 2695#endif
2993 if (rt_is_input_route(rt)) { 2696 if (!rt_is_input_route(rt) &&
2994 if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_spec_dst)) 2697 rt->rt_src != rt->rt_key_src) {
2995 goto nla_put_failure;
2996 } else if (rt->rt_src != rt->rt_key_src) {
2997 if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_src)) 2698 if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_src))
2998 goto nla_put_failure; 2699 goto nla_put_failure;
2999 } 2700 }
@@ -3009,20 +2710,12 @@ static int rt_fill_info(struct net *net,
3009 goto nla_put_failure; 2710 goto nla_put_failure;
3010 2711
3011 error = rt->dst.error; 2712 error = rt->dst.error;
3012 if (peer) { 2713 expires = rt->dst.expires;
3013 inet_peer_refcheck(rt->peer); 2714 if (expires) {
3014 id = atomic_read(&peer->ip_id_count) & 0xffff; 2715 if (time_before(jiffies, expires))
3015 if (peer->tcp_ts_stamp) { 2716 expires -= jiffies;
3016 ts = peer->tcp_ts; 2717 else
3017 tsage = get_seconds() - peer->tcp_ts_stamp; 2718 expires = 0;
3018 }
3019 expires = ACCESS_ONCE(peer->pmtu_expires);
3020 if (expires) {
3021 if (time_before(jiffies, expires))
3022 expires -= jiffies;
3023 else
3024 expires = 0;
3025 }
3026 } 2719 }
3027 2720
3028 if (rt_is_input_route(rt)) { 2721 if (rt_is_input_route(rt)) {
@@ -3051,8 +2744,7 @@ static int rt_fill_info(struct net *net,
3051 goto nla_put_failure; 2744 goto nla_put_failure;
3052 } 2745 }
3053 2746
3054 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage, 2747 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
3055 expires, error) < 0)
3056 goto nla_put_failure; 2748 goto nla_put_failure;
3057 2749
3058 return nlmsg_end(skb, nlh); 2750 return nlmsg_end(skb, nlh);
@@ -3400,6 +3092,30 @@ static __net_initdata struct pernet_operations rt_genid_ops = {
3400 .init = rt_genid_init, 3092 .init = rt_genid_init,
3401}; 3093};
3402 3094
3095static int __net_init ipv4_inetpeer_init(struct net *net)
3096{
3097 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3098
3099 if (!bp)
3100 return -ENOMEM;
3101 inet_peer_base_init(bp);
3102 net->ipv4.peers = bp;
3103 return 0;
3104}
3105
3106static void __net_exit ipv4_inetpeer_exit(struct net *net)
3107{
3108 struct inet_peer_base *bp = net->ipv4.peers;
3109
3110 net->ipv4.peers = NULL;
3111 inetpeer_invalidate_tree(bp);
3112 kfree(bp);
3113}
3114
3115static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3116 .init = ipv4_inetpeer_init,
3117 .exit = ipv4_inetpeer_exit,
3118};
3403 3119
3404#ifdef CONFIG_IP_ROUTE_CLASSID 3120#ifdef CONFIG_IP_ROUTE_CLASSID
3405struct ip_rt_acct __percpu *ip_rt_acct __read_mostly; 3121struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
@@ -3480,6 +3196,7 @@ int __init ip_rt_init(void)
3480 register_pernet_subsys(&sysctl_route_ops); 3196 register_pernet_subsys(&sysctl_route_ops);
3481#endif 3197#endif
3482 register_pernet_subsys(&rt_genid_ops); 3198 register_pernet_subsys(&rt_genid_ops);
3199 register_pernet_subsys(&ipv4_inetpeer_ops);
3483 return rc; 3200 return rc;
3484} 3201}
3485 3202
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index ef32956ed65..12aa0c5867c 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -301,6 +301,13 @@ static struct ctl_table ipv4_table[] = {
301 .proc_handler = proc_dointvec 301 .proc_handler = proc_dointvec
302 }, 302 },
303 { 303 {
304 .procname = "ip_early_demux",
305 .data = &sysctl_ip_early_demux,
306 .maxlen = sizeof(int),
307 .mode = 0644,
308 .proc_handler = proc_dointvec
309 },
310 {
304 .procname = "ip_dynaddr", 311 .procname = "ip_dynaddr",
305 .data = &sysctl_ip_dynaddr, 312 .data = &sysctl_ip_dynaddr,
306 .maxlen = sizeof(int), 313 .maxlen = sizeof(int),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3ba605f60e4..d902da96d15 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3310,8 +3310,7 @@ EXPORT_SYMBOL(tcp_md5_hash_key);
3310 3310
3311#endif 3311#endif
3312 3312
3313/** 3313/* Each Responder maintains up to two secret values concurrently for
3314 * Each Responder maintains up to two secret values concurrently for
3315 * efficient secret rollover. Each secret value has 4 states: 3314 * efficient secret rollover. Each secret value has 4 states:
3316 * 3315 *
3317 * Generating. (tcp_secret_generating != tcp_secret_primary) 3316 * Generating. (tcp_secret_generating != tcp_secret_primary)
@@ -3563,6 +3562,8 @@ void __init tcp_init(void)
3563 pr_info("Hash tables configured (established %u bind %u)\n", 3562 pr_info("Hash tables configured (established %u bind %u)\n",
3564 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); 3563 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
3565 3564
3565 tcp_metrics_init();
3566
3566 tcp_register_congestion_control(&tcp_reno); 3567 tcp_register_congestion_control(&tcp_reno);
3567 3568
3568 memset(&tcp_secret_one.secrets[0], 0, sizeof(tcp_secret_one.secrets)); 3569 memset(&tcp_secret_one.secrets[0], 0, sizeof(tcp_secret_one.secrets));
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b224eb8bce8..055ac49b8b4 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -93,7 +93,6 @@ int sysctl_tcp_rfc1337 __read_mostly;
93int sysctl_tcp_max_orphans __read_mostly = NR_FILE; 93int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
94int sysctl_tcp_frto __read_mostly = 2; 94int sysctl_tcp_frto __read_mostly = 2;
95int sysctl_tcp_frto_response __read_mostly; 95int sysctl_tcp_frto_response __read_mostly;
96int sysctl_tcp_nometrics_save __read_mostly;
97 96
98int sysctl_tcp_thin_dupack __read_mostly; 97int sysctl_tcp_thin_dupack __read_mostly;
99 98
@@ -701,7 +700,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
701/* Calculate rto without backoff. This is the second half of Van Jacobson's 700/* Calculate rto without backoff. This is the second half of Van Jacobson's
702 * routine referred to above. 701 * routine referred to above.
703 */ 702 */
704static inline void tcp_set_rto(struct sock *sk) 703void tcp_set_rto(struct sock *sk)
705{ 704{
706 const struct tcp_sock *tp = tcp_sk(sk); 705 const struct tcp_sock *tp = tcp_sk(sk);
707 /* Old crap is replaced with new one. 8) 706 /* Old crap is replaced with new one. 8)
@@ -728,109 +727,6 @@ static inline void tcp_set_rto(struct sock *sk)
728 tcp_bound_rto(sk); 727 tcp_bound_rto(sk);
729} 728}
730 729
731/* Save metrics learned by this TCP session.
732 This function is called only, when TCP finishes successfully
733 i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE.
734 */
735void tcp_update_metrics(struct sock *sk)
736{
737 struct tcp_sock *tp = tcp_sk(sk);
738 struct dst_entry *dst = __sk_dst_get(sk);
739
740 if (sysctl_tcp_nometrics_save)
741 return;
742
743 dst_confirm(dst);
744
745 if (dst && (dst->flags & DST_HOST)) {
746 const struct inet_connection_sock *icsk = inet_csk(sk);
747 int m;
748 unsigned long rtt;
749
750 if (icsk->icsk_backoff || !tp->srtt) {
751 /* This session failed to estimate rtt. Why?
752 * Probably, no packets returned in time.
753 * Reset our results.
754 */
755 if (!(dst_metric_locked(dst, RTAX_RTT)))
756 dst_metric_set(dst, RTAX_RTT, 0);
757 return;
758 }
759
760 rtt = dst_metric_rtt(dst, RTAX_RTT);
761 m = rtt - tp->srtt;
762
763 /* If newly calculated rtt larger than stored one,
764 * store new one. Otherwise, use EWMA. Remember,
765 * rtt overestimation is always better than underestimation.
766 */
767 if (!(dst_metric_locked(dst, RTAX_RTT))) {
768 if (m <= 0)
769 set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt);
770 else
771 set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3));
772 }
773
774 if (!(dst_metric_locked(dst, RTAX_RTTVAR))) {
775 unsigned long var;
776 if (m < 0)
777 m = -m;
778
779 /* Scale deviation to rttvar fixed point */
780 m >>= 1;
781 if (m < tp->mdev)
782 m = tp->mdev;
783
784 var = dst_metric_rtt(dst, RTAX_RTTVAR);
785 if (m >= var)
786 var = m;
787 else
788 var -= (var - m) >> 2;
789
790 set_dst_metric_rtt(dst, RTAX_RTTVAR, var);
791 }
792
793 if (tcp_in_initial_slowstart(tp)) {
794 /* Slow start still did not finish. */
795 if (dst_metric(dst, RTAX_SSTHRESH) &&
796 !dst_metric_locked(dst, RTAX_SSTHRESH) &&
797 (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH))
798 dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1);
799 if (!dst_metric_locked(dst, RTAX_CWND) &&
800 tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
801 dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd);
802 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
803 icsk->icsk_ca_state == TCP_CA_Open) {
804 /* Cong. avoidance phase, cwnd is reliable. */
805 if (!dst_metric_locked(dst, RTAX_SSTHRESH))
806 dst_metric_set(dst, RTAX_SSTHRESH,
807 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
808 if (!dst_metric_locked(dst, RTAX_CWND))
809 dst_metric_set(dst, RTAX_CWND,
810 (dst_metric(dst, RTAX_CWND) +
811 tp->snd_cwnd) >> 1);
812 } else {
813 /* Else slow start did not finish, cwnd is non-sense,
814 ssthresh may be also invalid.
815 */
816 if (!dst_metric_locked(dst, RTAX_CWND))
817 dst_metric_set(dst, RTAX_CWND,
818 (dst_metric(dst, RTAX_CWND) +
819 tp->snd_ssthresh) >> 1);
820 if (dst_metric(dst, RTAX_SSTHRESH) &&
821 !dst_metric_locked(dst, RTAX_SSTHRESH) &&
822 tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH))
823 dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh);
824 }
825
826 if (!dst_metric_locked(dst, RTAX_REORDERING)) {
827 if (dst_metric(dst, RTAX_REORDERING) < tp->reordering &&
828 tp->reordering != sysctl_tcp_reordering)
829 dst_metric_set(dst, RTAX_REORDERING, tp->reordering);
830 }
831 }
832}
833
834__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) 730__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
835{ 731{
836 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); 732 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
@@ -867,7 +763,7 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
867 * Packet counting of FACK is based on in-order assumptions, therefore TCP 763 * Packet counting of FACK is based on in-order assumptions, therefore TCP
868 * disables it when reordering is detected 764 * disables it when reordering is detected
869 */ 765 */
870static void tcp_disable_fack(struct tcp_sock *tp) 766void tcp_disable_fack(struct tcp_sock *tp)
871{ 767{
872 /* RFC3517 uses different metric in lost marker => reset on change */ 768 /* RFC3517 uses different metric in lost marker => reset on change */
873 if (tcp_is_fack(tp)) 769 if (tcp_is_fack(tp))
@@ -881,86 +777,6 @@ static void tcp_dsack_seen(struct tcp_sock *tp)
881 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; 777 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN;
882} 778}
883 779
884/* Initialize metrics on socket. */
885
886static void tcp_init_metrics(struct sock *sk)
887{
888 struct tcp_sock *tp = tcp_sk(sk);
889 struct dst_entry *dst = __sk_dst_get(sk);
890
891 if (dst == NULL)
892 goto reset;
893
894 dst_confirm(dst);
895
896 if (dst_metric_locked(dst, RTAX_CWND))
897 tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND);
898 if (dst_metric(dst, RTAX_SSTHRESH)) {
899 tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH);
900 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
901 tp->snd_ssthresh = tp->snd_cwnd_clamp;
902 } else {
903 /* ssthresh may have been reduced unnecessarily during.
904 * 3WHS. Restore it back to its initial default.
905 */
906 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
907 }
908 if (dst_metric(dst, RTAX_REORDERING) &&
909 tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
910 tcp_disable_fack(tp);
911 tcp_disable_early_retrans(tp);
912 tp->reordering = dst_metric(dst, RTAX_REORDERING);
913 }
914
915 if (dst_metric(dst, RTAX_RTT) == 0 || tp->srtt == 0)
916 goto reset;
917
918 /* Initial rtt is determined from SYN,SYN-ACK.
919 * The segment is small and rtt may appear much
920 * less than real one. Use per-dst memory
921 * to make it more realistic.
922 *
923 * A bit of theory. RTT is time passed after "normal" sized packet
924 * is sent until it is ACKed. In normal circumstances sending small
925 * packets force peer to delay ACKs and calculation is correct too.
926 * The algorithm is adaptive and, provided we follow specs, it
927 * NEVER underestimate RTT. BUT! If peer tries to make some clever
928 * tricks sort of "quick acks" for time long enough to decrease RTT
929 * to low value, and then abruptly stops to do it and starts to delay
930 * ACKs, wait for troubles.
931 */
932 if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) {
933 tp->srtt = dst_metric_rtt(dst, RTAX_RTT);
934 tp->rtt_seq = tp->snd_nxt;
935 }
936 if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) {
937 tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR);
938 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
939 }
940 tcp_set_rto(sk);
941reset:
942 if (tp->srtt == 0) {
943 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
944 * 3WHS. This is most likely due to retransmission,
945 * including spurious one. Reset the RTO back to 3secs
946 * from the more aggressive 1sec to avoid more spurious
947 * retransmission.
948 */
949 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
950 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
951 }
952 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
953 * retransmitted. In light of RFC6298 more aggressive 1sec
954 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
955 * retransmission has occurred.
956 */
957 if (tp->total_retrans > 1)
958 tp->snd_cwnd = 1;
959 else
960 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
961 tp->snd_cwnd_stamp = tcp_time_stamp;
962}
963
964static void tcp_update_reordering(struct sock *sk, const int metric, 780static void tcp_update_reordering(struct sock *sk, const int metric,
965 const int ts) 781 const int ts)
966{ 782{
@@ -3869,9 +3685,11 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3869 tcp_cong_avoid(sk, ack, prior_in_flight); 3685 tcp_cong_avoid(sk, ack, prior_in_flight);
3870 } 3686 }
3871 3687
3872 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) 3688 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) {
3873 dst_confirm(__sk_dst_get(sk)); 3689 struct dst_entry *dst = __sk_dst_get(sk);
3874 3690 if (dst)
3691 dst_confirm(dst);
3692 }
3875 return 1; 3693 return 1;
3876 3694
3877no_queue: 3695no_queue:
@@ -5518,6 +5336,18 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5518 struct tcp_sock *tp = tcp_sk(sk); 5336 struct tcp_sock *tp = tcp_sk(sk);
5519 int res; 5337 int res;
5520 5338
5339 if (sk->sk_rx_dst) {
5340 struct dst_entry *dst = sk->sk_rx_dst;
5341 if (unlikely(dst->obsolete)) {
5342 if (dst->ops->check(dst, 0) == NULL) {
5343 dst_release(dst);
5344 sk->sk_rx_dst = NULL;
5345 }
5346 }
5347 }
5348 if (unlikely(sk->sk_rx_dst == NULL))
5349 sk->sk_rx_dst = dst_clone(skb_dst(skb));
5350
5521 /* 5351 /*
5522 * Header prediction. 5352 * Header prediction.
5523 * The code loosely follows the one in the famous 5353 * The code loosely follows the one in the famous
@@ -5729,8 +5559,10 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5729 5559
5730 tcp_set_state(sk, TCP_ESTABLISHED); 5560 tcp_set_state(sk, TCP_ESTABLISHED);
5731 5561
5732 if (skb != NULL) 5562 if (skb != NULL) {
5563 sk->sk_rx_dst = dst_clone(skb_dst(skb));
5733 security_inet_conn_established(sk, skb); 5564 security_inet_conn_established(sk, skb);
5565 }
5734 5566
5735 /* Make sure socket is routed, for correct metrics. */ 5567 /* Make sure socket is routed, for correct metrics. */
5736 icsk->icsk_af_ops->rebuild_header(sk); 5568 icsk->icsk_af_ops->rebuild_header(sk);
@@ -6126,9 +5958,14 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
6126 5958
6127 case TCP_FIN_WAIT1: 5959 case TCP_FIN_WAIT1:
6128 if (tp->snd_una == tp->write_seq) { 5960 if (tp->snd_una == tp->write_seq) {
5961 struct dst_entry *dst;
5962
6129 tcp_set_state(sk, TCP_FIN_WAIT2); 5963 tcp_set_state(sk, TCP_FIN_WAIT2);
6130 sk->sk_shutdown |= SEND_SHUTDOWN; 5964 sk->sk_shutdown |= SEND_SHUTDOWN;
6131 dst_confirm(__sk_dst_get(sk)); 5965
5966 dst = __sk_dst_get(sk);
5967 if (dst)
5968 dst_confirm(dst);
6132 5969
6133 if (!sock_flag(sk, SOCK_DEAD)) 5970 if (!sock_flag(sk, SOCK_DEAD))
6134 /* Wake up lingering close() */ 5971 /* Wake up lingering close() */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index c8d28c433b2..ddefd39ac0c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -209,22 +209,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
209 } 209 }
210 210
211 if (tcp_death_row.sysctl_tw_recycle && 211 if (tcp_death_row.sysctl_tw_recycle &&
212 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) { 212 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
213 struct inet_peer *peer = rt_get_peer(rt, fl4->daddr); 213 tcp_fetch_timewait_stamp(sk, &rt->dst);
214 /*
215 * VJ's idea. We save last timestamp seen from
216 * the destination in peer table, when entering state
217 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
218 * when trying new connection.
219 */
220 if (peer) {
221 inet_peer_refcheck(peer);
222 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
223 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
224 tp->rx_opt.ts_recent = peer->tcp_ts;
225 }
226 }
227 }
228 214
229 inet->inet_dport = usin->sin_port; 215 inet->inet_dport = usin->sin_port;
230 inet->inet_daddr = daddr; 216 inet->inet_daddr = daddr;
@@ -698,8 +684,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
698 684
699 net = dev_net(skb_dst(skb)->dev); 685 net = dev_net(skb_dst(skb)->dev);
700 arg.tos = ip_hdr(skb)->tos; 686 arg.tos = ip_hdr(skb)->tos;
701 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr, 687 ip_send_unicast_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
702 &arg, arg.iov[0].iov_len); 688 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
703 689
704 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 690 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
705 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); 691 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
@@ -781,8 +767,8 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
781 if (oif) 767 if (oif)
782 arg.bound_dev_if = oif; 768 arg.bound_dev_if = oif;
783 arg.tos = tos; 769 arg.tos = tos;
784 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr, 770 ip_send_unicast_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
785 &arg, arg.iov[0].iov_len); 771 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
786 772
787 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 773 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
788} 774}
@@ -825,7 +811,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
825static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, 811static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
826 struct request_sock *req, 812 struct request_sock *req,
827 struct request_values *rvp, 813 struct request_values *rvp,
828 u16 queue_mapping) 814 u16 queue_mapping,
815 bool nocache)
829{ 816{
830 const struct inet_request_sock *ireq = inet_rsk(req); 817 const struct inet_request_sock *ireq = inet_rsk(req);
831 struct flowi4 fl4; 818 struct flowi4 fl4;
@@ -833,7 +820,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
833 struct sk_buff * skb; 820 struct sk_buff * skb;
834 821
835 /* First, grab a route. */ 822 /* First, grab a route. */
836 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) 823 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req, nocache)) == NULL)
837 return -1; 824 return -1;
838 825
839 skb = tcp_make_synack(sk, dst, req, rvp); 826 skb = tcp_make_synack(sk, dst, req, rvp);
@@ -848,7 +835,6 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
848 err = net_xmit_eval(err); 835 err = net_xmit_eval(err);
849 } 836 }
850 837
851 dst_release(dst);
852 return err; 838 return err;
853} 839}
854 840
@@ -856,7 +842,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
856 struct request_values *rvp) 842 struct request_values *rvp)
857{ 843{
858 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 844 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
859 return tcp_v4_send_synack(sk, NULL, req, rvp, 0); 845 return tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
860} 846}
861 847
862/* 848/*
@@ -1375,7 +1361,6 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1375 isn = cookie_v4_init_sequence(sk, skb, &req->mss); 1361 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1376 req->cookie_ts = tmp_opt.tstamp_ok; 1362 req->cookie_ts = tmp_opt.tstamp_ok;
1377 } else if (!isn) { 1363 } else if (!isn) {
1378 struct inet_peer *peer = NULL;
1379 struct flowi4 fl4; 1364 struct flowi4 fl4;
1380 1365
1381 /* VJ's idea. We save last timestamp seen 1366 /* VJ's idea. We save last timestamp seen
@@ -1389,13 +1374,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1389 */ 1374 */
1390 if (tmp_opt.saw_tstamp && 1375 if (tmp_opt.saw_tstamp &&
1391 tcp_death_row.sysctl_tw_recycle && 1376 tcp_death_row.sysctl_tw_recycle &&
1392 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL && 1377 (dst = inet_csk_route_req(sk, &fl4, req, want_cookie)) != NULL &&
1393 fl4.daddr == saddr && 1378 fl4.daddr == saddr) {
1394 (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) { 1379 if (!tcp_peer_is_proven(req, dst, true)) {
1395 inet_peer_refcheck(peer);
1396 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1397 (s32)(peer->tcp_ts - req->ts_recent) >
1398 TCP_PAWS_WINDOW) {
1399 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); 1380 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1400 goto drop_and_release; 1381 goto drop_and_release;
1401 } 1382 }
@@ -1404,8 +1385,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1404 else if (!sysctl_tcp_syncookies && 1385 else if (!sysctl_tcp_syncookies &&
1405 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < 1386 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1406 (sysctl_max_syn_backlog >> 2)) && 1387 (sysctl_max_syn_backlog >> 2)) &&
1407 (!peer || !peer->tcp_ts_stamp) && 1388 !tcp_peer_is_proven(req, dst, false)) {
1408 (!dst || !dst_metric(dst, RTAX_RTT))) {
1409 /* Without syncookies last quarter of 1389 /* Without syncookies last quarter of
1410 * backlog is filled with destinations, 1390 * backlog is filled with destinations,
1411 * proven to be alive. 1391 * proven to be alive.
@@ -1425,7 +1405,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1425 1405
1426 if (tcp_v4_send_synack(sk, dst, req, 1406 if (tcp_v4_send_synack(sk, dst, req,
1427 (struct request_values *)&tmp_ext, 1407 (struct request_values *)&tmp_ext,
1428 skb_get_queue_mapping(skb)) || 1408 skb_get_queue_mapping(skb),
1409 want_cookie) ||
1429 want_cookie) 1410 want_cookie)
1430 goto drop_and_free; 1411 goto drop_and_free;
1431 1412
@@ -1672,6 +1653,51 @@ csum_err:
1672} 1653}
1673EXPORT_SYMBOL(tcp_v4_do_rcv); 1654EXPORT_SYMBOL(tcp_v4_do_rcv);
1674 1655
1656void tcp_v4_early_demux(struct sk_buff *skb)
1657{
1658 struct net *net = dev_net(skb->dev);
1659 const struct iphdr *iph;
1660 const struct tcphdr *th;
1661 struct net_device *dev;
1662 struct sock *sk;
1663
1664 if (skb->pkt_type != PACKET_HOST)
1665 return;
1666
1667 if (!pskb_may_pull(skb, ip_hdrlen(skb) + sizeof(struct tcphdr)))
1668 return;
1669
1670 iph = ip_hdr(skb);
1671 th = (struct tcphdr *) ((char *)iph + ip_hdrlen(skb));
1672
1673 if (th->doff < sizeof(struct tcphdr) / 4)
1674 return;
1675
1676 if (!pskb_may_pull(skb, ip_hdrlen(skb) + th->doff * 4))
1677 return;
1678
1679 dev = skb->dev;
1680 sk = __inet_lookup_established(net, &tcp_hashinfo,
1681 iph->saddr, th->source,
1682 iph->daddr, ntohs(th->dest),
1683 dev->ifindex);
1684 if (sk) {
1685 skb->sk = sk;
1686 skb->destructor = sock_edemux;
1687 if (sk->sk_state != TCP_TIME_WAIT) {
1688 struct dst_entry *dst = sk->sk_rx_dst;
1689 if (dst)
1690 dst = dst_check(dst, 0);
1691 if (dst) {
1692 struct rtable *rt = (struct rtable *) dst;
1693
1694 if (rt->rt_iif == dev->ifindex)
1695 skb_dst_set_noref(skb, dst);
1696 }
1697 }
1698 }
1699}
1700
1675/* 1701/*
1676 * From tcp_input.c 1702 * From tcp_input.c
1677 */ 1703 */
@@ -1821,40 +1847,10 @@ do_time_wait:
1821 goto discard_it; 1847 goto discard_it;
1822} 1848}
1823 1849
1824struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
1825{
1826 struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
1827 struct inet_sock *inet = inet_sk(sk);
1828 struct inet_peer *peer;
1829
1830 if (!rt ||
1831 inet->cork.fl.u.ip4.daddr != inet->inet_daddr) {
1832 peer = inet_getpeer_v4(inet->inet_daddr, 1);
1833 *release_it = true;
1834 } else {
1835 if (!rt->peer)
1836 rt_bind_peer(rt, inet->inet_daddr, 1);
1837 peer = rt->peer;
1838 *release_it = false;
1839 }
1840
1841 return peer;
1842}
1843EXPORT_SYMBOL(tcp_v4_get_peer);
1844
1845void *tcp_v4_tw_get_peer(struct sock *sk)
1846{
1847 const struct inet_timewait_sock *tw = inet_twsk(sk);
1848
1849 return inet_getpeer_v4(tw->tw_daddr, 1);
1850}
1851EXPORT_SYMBOL(tcp_v4_tw_get_peer);
1852
1853static struct timewait_sock_ops tcp_timewait_sock_ops = { 1850static struct timewait_sock_ops tcp_timewait_sock_ops = {
1854 .twsk_obj_size = sizeof(struct tcp_timewait_sock), 1851 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1855 .twsk_unique = tcp_twsk_unique, 1852 .twsk_unique = tcp_twsk_unique,
1856 .twsk_destructor= tcp_twsk_destructor, 1853 .twsk_destructor= tcp_twsk_destructor,
1857 .twsk_getpeer = tcp_v4_tw_get_peer,
1858}; 1854};
1859 1855
1860const struct inet_connection_sock_af_ops ipv4_specific = { 1856const struct inet_connection_sock_af_ops ipv4_specific = {
@@ -1863,7 +1859,6 @@ const struct inet_connection_sock_af_ops ipv4_specific = {
1863 .rebuild_header = inet_sk_rebuild_header, 1859 .rebuild_header = inet_sk_rebuild_header,
1864 .conn_request = tcp_v4_conn_request, 1860 .conn_request = tcp_v4_conn_request,
1865 .syn_recv_sock = tcp_v4_syn_recv_sock, 1861 .syn_recv_sock = tcp_v4_syn_recv_sock,
1866 .get_peer = tcp_v4_get_peer,
1867 .net_header_len = sizeof(struct iphdr), 1862 .net_header_len = sizeof(struct iphdr),
1868 .setsockopt = ip_setsockopt, 1863 .setsockopt = ip_setsockopt,
1869 .getsockopt = ip_getsockopt, 1864 .getsockopt = ip_getsockopt,
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
new file mode 100644
index 00000000000..1fd83d3118f
--- /dev/null
+++ b/net/ipv4/tcp_metrics.c
@@ -0,0 +1,697 @@
1#include <linux/rcupdate.h>
2#include <linux/spinlock.h>
3#include <linux/jiffies.h>
4#include <linux/bootmem.h>
5#include <linux/module.h>
6#include <linux/cache.h>
7#include <linux/slab.h>
8#include <linux/init.h>
9#include <linux/tcp.h>
10
11#include <net/inet_connection_sock.h>
12#include <net/net_namespace.h>
13#include <net/request_sock.h>
14#include <net/inetpeer.h>
15#include <net/sock.h>
16#include <net/ipv6.h>
17#include <net/dst.h>
18#include <net/tcp.h>
19
20int sysctl_tcp_nometrics_save __read_mostly;
21
22enum tcp_metric_index {
23 TCP_METRIC_RTT,
24 TCP_METRIC_RTTVAR,
25 TCP_METRIC_SSTHRESH,
26 TCP_METRIC_CWND,
27 TCP_METRIC_REORDERING,
28
29 /* Always last. */
30 TCP_METRIC_MAX,
31};
32
33struct tcp_metrics_block {
34 struct tcp_metrics_block __rcu *tcpm_next;
35 struct inetpeer_addr tcpm_addr;
36 unsigned long tcpm_stamp;
37 u32 tcpm_ts;
38 u32 tcpm_ts_stamp;
39 u32 tcpm_lock;
40 u32 tcpm_vals[TCP_METRIC_MAX];
41};
42
43static bool tcp_metric_locked(struct tcp_metrics_block *tm,
44 enum tcp_metric_index idx)
45{
46 return tm->tcpm_lock & (1 << idx);
47}
48
49static u32 tcp_metric_get(struct tcp_metrics_block *tm,
50 enum tcp_metric_index idx)
51{
52 return tm->tcpm_vals[idx];
53}
54
55static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm,
56 enum tcp_metric_index idx)
57{
58 return msecs_to_jiffies(tm->tcpm_vals[idx]);
59}
60
61static void tcp_metric_set(struct tcp_metrics_block *tm,
62 enum tcp_metric_index idx,
63 u32 val)
64{
65 tm->tcpm_vals[idx] = val;
66}
67
68static void tcp_metric_set_msecs(struct tcp_metrics_block *tm,
69 enum tcp_metric_index idx,
70 u32 val)
71{
72 tm->tcpm_vals[idx] = jiffies_to_msecs(val);
73}
74
75static bool addr_same(const struct inetpeer_addr *a,
76 const struct inetpeer_addr *b)
77{
78 const struct in6_addr *a6, *b6;
79
80 if (a->family != b->family)
81 return false;
82 if (a->family == AF_INET)
83 return a->addr.a4 == b->addr.a4;
84
85 a6 = (const struct in6_addr *) &a->addr.a6[0];
86 b6 = (const struct in6_addr *) &b->addr.a6[0];
87
88 return ipv6_addr_equal(a6, b6);
89}
90
91struct tcpm_hash_bucket {
92 struct tcp_metrics_block __rcu *chain;
93};
94
95static DEFINE_SPINLOCK(tcp_metrics_lock);
96
97static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst)
98{
99 u32 val;
100
101 val = 0;
102 if (dst_metric_locked(dst, RTAX_RTT))
103 val |= 1 << TCP_METRIC_RTT;
104 if (dst_metric_locked(dst, RTAX_RTTVAR))
105 val |= 1 << TCP_METRIC_RTTVAR;
106 if (dst_metric_locked(dst, RTAX_SSTHRESH))
107 val |= 1 << TCP_METRIC_SSTHRESH;
108 if (dst_metric_locked(dst, RTAX_CWND))
109 val |= 1 << TCP_METRIC_CWND;
110 if (dst_metric_locked(dst, RTAX_REORDERING))
111 val |= 1 << TCP_METRIC_REORDERING;
112 tm->tcpm_lock = val;
113
114 tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT);
115 tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR);
116 tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
117 tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
118 tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
119 tm->tcpm_ts = 0;
120 tm->tcpm_ts_stamp = 0;
121}
122
123static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
124 struct inetpeer_addr *addr,
125 unsigned int hash,
126 bool reclaim)
127{
128 struct tcp_metrics_block *tm;
129 struct net *net;
130
131 spin_lock_bh(&tcp_metrics_lock);
132 net = dev_net(dst->dev);
133 if (unlikely(reclaim)) {
134 struct tcp_metrics_block *oldest;
135
136 oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
137 for (tm = rcu_dereference(oldest->tcpm_next); tm;
138 tm = rcu_dereference(tm->tcpm_next)) {
139 if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
140 oldest = tm;
141 }
142 tm = oldest;
143 } else {
144 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
145 if (!tm)
146 goto out_unlock;
147 }
148 tm->tcpm_addr = *addr;
149 tm->tcpm_stamp = jiffies;
150
151 tcpm_suck_dst(tm, dst);
152
153 if (likely(!reclaim)) {
154 tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
155 rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
156 }
157
158out_unlock:
159 spin_unlock_bh(&tcp_metrics_lock);
160 return tm;
161}
162
163#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
164
165static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
166{
167 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
168 tcpm_suck_dst(tm, dst);
169}
170
171#define TCP_METRICS_RECLAIM_DEPTH 5
172#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
173
174static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
175{
176 if (tm)
177 return tm;
178 if (depth > TCP_METRICS_RECLAIM_DEPTH)
179 return TCP_METRICS_RECLAIM_PTR;
180 return NULL;
181}
182
183static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
184 struct net *net, unsigned int hash)
185{
186 struct tcp_metrics_block *tm;
187 int depth = 0;
188
189 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
190 tm = rcu_dereference(tm->tcpm_next)) {
191 if (addr_same(&tm->tcpm_addr, addr))
192 break;
193 depth++;
194 }
195 return tcp_get_encode(tm, depth);
196}
197
198static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
199 struct dst_entry *dst)
200{
201 struct tcp_metrics_block *tm;
202 struct inetpeer_addr addr;
203 unsigned int hash;
204 struct net *net;
205
206 addr.family = req->rsk_ops->family;
207 switch (addr.family) {
208 case AF_INET:
209 addr.addr.a4 = inet_rsk(req)->rmt_addr;
210 hash = (__force unsigned int) addr.addr.a4;
211 break;
212 case AF_INET6:
213 *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr;
214 hash = ((__force unsigned int) addr.addr.a6[0] ^
215 (__force unsigned int) addr.addr.a6[1] ^
216 (__force unsigned int) addr.addr.a6[2] ^
217 (__force unsigned int) addr.addr.a6[3]);
218 break;
219 default:
220 return NULL;
221 }
222
223 hash ^= (hash >> 24) ^ (hash >> 16) ^ (hash >> 8);
224
225 net = dev_net(dst->dev);
226 hash &= net->ipv4.tcp_metrics_hash_mask;
227
228 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
229 tm = rcu_dereference(tm->tcpm_next)) {
230 if (addr_same(&tm->tcpm_addr, &addr))
231 break;
232 }
233 tcpm_check_stamp(tm, dst);
234 return tm;
235}
236
237static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
238{
239 struct inet6_timewait_sock *tw6;
240 struct tcp_metrics_block *tm;
241 struct inetpeer_addr addr;
242 unsigned int hash;
243 struct net *net;
244
245 addr.family = tw->tw_family;
246 switch (addr.family) {
247 case AF_INET:
248 addr.addr.a4 = tw->tw_daddr;
249 hash = (__force unsigned int) addr.addr.a4;
250 break;
251 case AF_INET6:
252 tw6 = inet6_twsk((struct sock *)tw);
253 *(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr;
254 hash = ((__force unsigned int) addr.addr.a6[0] ^
255 (__force unsigned int) addr.addr.a6[1] ^
256 (__force unsigned int) addr.addr.a6[2] ^
257 (__force unsigned int) addr.addr.a6[3]);
258 break;
259 default:
260 return NULL;
261 }
262
263 hash ^= (hash >> 24) ^ (hash >> 16) ^ (hash >> 8);
264
265 net = twsk_net(tw);
266 hash &= net->ipv4.tcp_metrics_hash_mask;
267
268 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
269 tm = rcu_dereference(tm->tcpm_next)) {
270 if (addr_same(&tm->tcpm_addr, &addr))
271 break;
272 }
273 return tm;
274}
275
276static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
277 struct dst_entry *dst,
278 bool create)
279{
280 struct tcp_metrics_block *tm;
281 struct inetpeer_addr addr;
282 unsigned int hash;
283 struct net *net;
284 bool reclaim;
285
286 addr.family = sk->sk_family;
287 switch (addr.family) {
288 case AF_INET:
289 addr.addr.a4 = inet_sk(sk)->inet_daddr;
290 hash = (__force unsigned int) addr.addr.a4;
291 break;
292 case AF_INET6:
293 *(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr;
294 hash = ((__force unsigned int) addr.addr.a6[0] ^
295 (__force unsigned int) addr.addr.a6[1] ^
296 (__force unsigned int) addr.addr.a6[2] ^
297 (__force unsigned int) addr.addr.a6[3]);
298 break;
299 default:
300 return NULL;
301 }
302
303 hash ^= (hash >> 24) ^ (hash >> 16) ^ (hash >> 8);
304
305 net = dev_net(dst->dev);
306 hash &= net->ipv4.tcp_metrics_hash_mask;
307
308 tm = __tcp_get_metrics(&addr, net, hash);
309 reclaim = false;
310 if (tm == TCP_METRICS_RECLAIM_PTR) {
311 reclaim = true;
312 tm = NULL;
313 }
314 if (!tm && create)
315 tm = tcpm_new(dst, &addr, hash, reclaim);
316 else
317 tcpm_check_stamp(tm, dst);
318
319 return tm;
320}
321
322/* Save metrics learned by this TCP session. This function is called
323 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
324 * or goes from LAST-ACK to CLOSE.
325 */
326void tcp_update_metrics(struct sock *sk)
327{
328 const struct inet_connection_sock *icsk = inet_csk(sk);
329 struct dst_entry *dst = __sk_dst_get(sk);
330 struct tcp_sock *tp = tcp_sk(sk);
331 struct tcp_metrics_block *tm;
332 unsigned long rtt;
333 u32 val;
334 int m;
335
336 if (sysctl_tcp_nometrics_save || !dst)
337 return;
338
339 if (dst->flags & DST_HOST)
340 dst_confirm(dst);
341
342 rcu_read_lock();
343 if (icsk->icsk_backoff || !tp->srtt) {
344 /* This session failed to estimate rtt. Why?
345 * Probably, no packets returned in time. Reset our
346 * results.
347 */
348 tm = tcp_get_metrics(sk, dst, false);
349 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
350 tcp_metric_set(tm, TCP_METRIC_RTT, 0);
351 goto out_unlock;
352 } else
353 tm = tcp_get_metrics(sk, dst, true);
354
355 if (!tm)
356 goto out_unlock;
357
358 rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
359 m = rtt - tp->srtt;
360
361 /* If newly calculated rtt larger than stored one, store new
362 * one. Otherwise, use EWMA. Remember, rtt overestimation is
363 * always better than underestimation.
364 */
365 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
366 if (m <= 0)
367 rtt = tp->srtt;
368 else
369 rtt -= (m >> 3);
370 tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt);
371 }
372
373 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
374 unsigned long var;
375
376 if (m < 0)
377 m = -m;
378
379 /* Scale deviation to rttvar fixed point */
380 m >>= 1;
381 if (m < tp->mdev)
382 m = tp->mdev;
383
384 var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
385 if (m >= var)
386 var = m;
387 else
388 var -= (var - m) >> 2;
389
390 tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var);
391 }
392
393 if (tcp_in_initial_slowstart(tp)) {
394 /* Slow start still did not finish. */
395 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
396 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
397 if (val && (tp->snd_cwnd >> 1) > val)
398 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
399 tp->snd_cwnd >> 1);
400 }
401 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
402 val = tcp_metric_get(tm, TCP_METRIC_CWND);
403 if (tp->snd_cwnd > val)
404 tcp_metric_set(tm, TCP_METRIC_CWND,
405 tp->snd_cwnd);
406 }
407 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
408 icsk->icsk_ca_state == TCP_CA_Open) {
409 /* Cong. avoidance phase, cwnd is reliable. */
410 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
411 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
412 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
413 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
414 val = tcp_metric_get(tm, TCP_METRIC_CWND);
415 tcp_metric_set(tm, RTAX_CWND, (val + tp->snd_cwnd) >> 1);
416 }
417 } else {
418 /* Else slow start did not finish, cwnd is non-sense,
419 * ssthresh may be also invalid.
420 */
421 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
422 val = tcp_metric_get(tm, TCP_METRIC_CWND);
423 tcp_metric_set(tm, TCP_METRIC_CWND,
424 (val + tp->snd_ssthresh) >> 1);
425 }
426 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
427 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
428 if (val && tp->snd_ssthresh > val)
429 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
430 tp->snd_ssthresh);
431 }
432 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
433 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
434 if (val < tp->reordering &&
435 tp->reordering != sysctl_tcp_reordering)
436 tcp_metric_set(tm, TCP_METRIC_REORDERING,
437 tp->reordering);
438 }
439 }
440 tm->tcpm_stamp = jiffies;
441out_unlock:
442 rcu_read_unlock();
443}
444
445/* Initialize metrics on socket. */
446
447void tcp_init_metrics(struct sock *sk)
448{
449 struct dst_entry *dst = __sk_dst_get(sk);
450 struct tcp_sock *tp = tcp_sk(sk);
451 struct tcp_metrics_block *tm;
452 u32 val;
453
454 if (dst == NULL)
455 goto reset;
456
457 dst_confirm(dst);
458
459 rcu_read_lock();
460 tm = tcp_get_metrics(sk, dst, true);
461 if (!tm) {
462 rcu_read_unlock();
463 goto reset;
464 }
465
466 if (tcp_metric_locked(tm, TCP_METRIC_CWND))
467 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
468
469 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
470 if (val) {
471 tp->snd_ssthresh = val;
472 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
473 tp->snd_ssthresh = tp->snd_cwnd_clamp;
474 } else {
475 /* ssthresh may have been reduced unnecessarily during.
476 * 3WHS. Restore it back to its initial default.
477 */
478 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
479 }
480 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
481 if (val && tp->reordering != val) {
482 tcp_disable_fack(tp);
483 tcp_disable_early_retrans(tp);
484 tp->reordering = val;
485 }
486
487 val = tcp_metric_get(tm, TCP_METRIC_RTT);
488 if (val == 0 || tp->srtt == 0) {
489 rcu_read_unlock();
490 goto reset;
491 }
492 /* Initial rtt is determined from SYN,SYN-ACK.
493 * The segment is small and rtt may appear much
494 * less than real one. Use per-dst memory
495 * to make it more realistic.
496 *
497 * A bit of theory. RTT is time passed after "normal" sized packet
498 * is sent until it is ACKed. In normal circumstances sending small
499 * packets force peer to delay ACKs and calculation is correct too.
500 * The algorithm is adaptive and, provided we follow specs, it
501 * NEVER underestimate RTT. BUT! If peer tries to make some clever
502 * tricks sort of "quick acks" for time long enough to decrease RTT
503 * to low value, and then abruptly stops to do it and starts to delay
504 * ACKs, wait for troubles.
505 */
506 val = msecs_to_jiffies(val);
507 if (val > tp->srtt) {
508 tp->srtt = val;
509 tp->rtt_seq = tp->snd_nxt;
510 }
511 val = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
512 if (val > tp->mdev) {
513 tp->mdev = val;
514 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
515 }
516 rcu_read_unlock();
517
518 tcp_set_rto(sk);
519reset:
520 if (tp->srtt == 0) {
521 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
522 * 3WHS. This is most likely due to retransmission,
523 * including spurious one. Reset the RTO back to 3secs
524 * from the more aggressive 1sec to avoid more spurious
525 * retransmission.
526 */
527 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
528 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
529 }
530 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
531 * retransmitted. In light of RFC6298 more aggressive 1sec
532 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
533 * retransmission has occurred.
534 */
535 if (tp->total_retrans > 1)
536 tp->snd_cwnd = 1;
537 else
538 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
539 tp->snd_cwnd_stamp = tcp_time_stamp;
540}
541
542bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check)
543{
544 struct tcp_metrics_block *tm;
545 bool ret;
546
547 if (!dst)
548 return false;
549
550 rcu_read_lock();
551 tm = __tcp_get_metrics_req(req, dst);
552 if (paws_check) {
553 if (tm &&
554 (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
555 (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW)
556 ret = false;
557 else
558 ret = true;
559 } else {
560 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
561 ret = true;
562 else
563 ret = false;
564 }
565 rcu_read_unlock();
566
567 return ret;
568}
569EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
570
571void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
572{
573 struct tcp_metrics_block *tm;
574
575 rcu_read_lock();
576 tm = tcp_get_metrics(sk, dst, true);
577 if (tm) {
578 struct tcp_sock *tp = tcp_sk(sk);
579
580 if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
581 tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
582 tp->rx_opt.ts_recent = tm->tcpm_ts;
583 }
584 }
585 rcu_read_unlock();
586}
587EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
588
589/* VJ's idea. Save last timestamp seen from this destination and hold
590 * it at least for normal timewait interval to use for duplicate
591 * segment detection in subsequent connections, before they enter
592 * synchronized state.
593 */
594bool tcp_remember_stamp(struct sock *sk)
595{
596 struct dst_entry *dst = __sk_dst_get(sk);
597 bool ret = false;
598
599 if (dst) {
600 struct tcp_metrics_block *tm;
601
602 rcu_read_lock();
603 tm = tcp_get_metrics(sk, dst, true);
604 if (tm) {
605 struct tcp_sock *tp = tcp_sk(sk);
606
607 if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
608 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
609 tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
610 tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
611 tm->tcpm_ts = tp->rx_opt.ts_recent;
612 }
613 ret = true;
614 }
615 rcu_read_unlock();
616 }
617 return ret;
618}
619
620bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
621{
622 struct tcp_metrics_block *tm;
623 bool ret = false;
624
625 rcu_read_lock();
626 tm = __tcp_get_metrics_tw(tw);
627 if (tw) {
628 const struct tcp_timewait_sock *tcptw;
629 struct sock *sk = (struct sock *) tw;
630
631 tcptw = tcp_twsk(sk);
632 if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
633 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
634 tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
635 tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
636 tm->tcpm_ts = tcptw->tw_ts_recent;
637 }
638 ret = true;
639 }
640 rcu_read_unlock();
641
642 return ret;
643}
644
645static unsigned long tcpmhash_entries;
646static int __init set_tcpmhash_entries(char *str)
647{
648 ssize_t ret;
649
650 if (!str)
651 return 0;
652
653 ret = kstrtoul(str, 0, &tcpmhash_entries);
654 if (ret)
655 return 0;
656
657 return 1;
658}
659__setup("tcpmhash_entries=", set_tcpmhash_entries);
660
661static int __net_init tcp_net_metrics_init(struct net *net)
662{
663 int slots, size;
664
665 slots = tcpmhash_entries;
666 if (!slots) {
667 if (totalram_pages >= 128 * 1024)
668 slots = 16 * 1024;
669 else
670 slots = 8 * 1024;
671 }
672
673 size = slots * sizeof(struct tcpm_hash_bucket);
674
675 net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL);
676 if (!net->ipv4.tcp_metrics_hash)
677 return -ENOMEM;
678
679 net->ipv4.tcp_metrics_hash_mask = (slots - 1);
680
681 return 0;
682}
683
684static void __net_exit tcp_net_metrics_exit(struct net *net)
685{
686 kfree(net->ipv4.tcp_metrics_hash);
687}
688
689static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
690 .init = tcp_net_metrics_init,
691 .exit = tcp_net_metrics_exit,
692};
693
694void __init tcp_metrics_init(void)
695{
696 register_pernet_subsys(&tcp_net_metrics_ops);
697}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index b85d9fe7d66..65608863fde 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -49,56 +49,6 @@ struct inet_timewait_death_row tcp_death_row = {
49}; 49};
50EXPORT_SYMBOL_GPL(tcp_death_row); 50EXPORT_SYMBOL_GPL(tcp_death_row);
51 51
52/* VJ's idea. Save last timestamp seen from this destination
53 * and hold it at least for normal timewait interval to use for duplicate
54 * segment detection in subsequent connections, before they enter synchronized
55 * state.
56 */
57
58static bool tcp_remember_stamp(struct sock *sk)
59{
60 const struct inet_connection_sock *icsk = inet_csk(sk);
61 struct tcp_sock *tp = tcp_sk(sk);
62 struct inet_peer *peer;
63 bool release_it;
64
65 peer = icsk->icsk_af_ops->get_peer(sk, &release_it);
66 if (peer) {
67 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
68 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
69 peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
70 peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
71 peer->tcp_ts = tp->rx_opt.ts_recent;
72 }
73 if (release_it)
74 inet_putpeer(peer);
75 return true;
76 }
77
78 return false;
79}
80
81static bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
82{
83 struct sock *sk = (struct sock *) tw;
84 struct inet_peer *peer;
85
86 peer = twsk_getpeer(sk);
87 if (peer) {
88 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
89
90 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
91 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
92 peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
93 peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
94 peer->tcp_ts = tcptw->tw_ts_recent;
95 }
96 inet_putpeer(peer);
97 return true;
98 }
99 return false;
100}
101
102static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) 52static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
103{ 53{
104 if (seq == s_win) 54 if (seq == s_win)
@@ -327,8 +277,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
327 if (tw != NULL) { 277 if (tw != NULL) {
328 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 278 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
329 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); 279 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
280 struct inet_sock *inet = inet_sk(sk);
330 281
331 tw->tw_transparent = inet_sk(sk)->transparent; 282 tw->tw_transparent = inet->transparent;
332 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; 283 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
333 tcptw->tw_rcv_nxt = tp->rcv_nxt; 284 tcptw->tw_rcv_nxt = tp->rcv_nxt;
334 tcptw->tw_snd_nxt = tp->snd_nxt; 285 tcptw->tw_snd_nxt = tp->snd_nxt;
@@ -403,6 +354,7 @@ void tcp_twsk_destructor(struct sock *sk)
403{ 354{
404#ifdef CONFIG_TCP_MD5SIG 355#ifdef CONFIG_TCP_MD5SIG
405 struct tcp_timewait_sock *twsk = tcp_twsk(sk); 356 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
357
406 if (twsk->tw_md5_key) { 358 if (twsk->tw_md5_key) {
407 tcp_free_md5sig_pool(); 359 tcp_free_md5sig_pool();
408 kfree_rcu(twsk->tw_md5_key, rcu); 360 kfree_rcu(twsk->tw_md5_key, rcu);
@@ -435,6 +387,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
435 struct tcp_sock *oldtp = tcp_sk(sk); 387 struct tcp_sock *oldtp = tcp_sk(sk);
436 struct tcp_cookie_values *oldcvp = oldtp->cookie_values; 388 struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
437 389
390 newsk->sk_rx_dst = dst_clone(skb_dst(skb));
391
438 /* TCP Cookie Transactions require space for the cookie pair, 392 /* TCP Cookie Transactions require space for the cookie pair,
439 * as it differs for each connection. There is no need to 393 * as it differs for each connection. There is no need to
440 * copy any s_data_payload stored at the original socket. 394 * copy any s_data_payload stored at the original socket.
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 803cbfe82fb..c465d3e51e2 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2442,7 +2442,16 @@ int tcp_send_synack(struct sock *sk)
2442 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2442 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2443} 2443}
2444 2444
2445/* Prepare a SYN-ACK. */ 2445/**
2446 * tcp_make_synack - Prepare a SYN-ACK.
2447 * sk: listener socket
2448 * dst: dst entry attached to the SYNACK
2449 * req: request_sock pointer
2450 * rvp: request_values pointer
2451 *
2452 * Allocate one skb and build a SYNACK packet.
2453 * @dst is consumed : Caller should not use it again.
2454 */
2446struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, 2455struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2447 struct request_sock *req, 2456 struct request_sock *req,
2448 struct request_values *rvp) 2457 struct request_values *rvp)
@@ -2461,14 +2470,15 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2461 2470
2462 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired) 2471 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
2463 s_data_desired = cvp->s_data_desired; 2472 s_data_desired = cvp->s_data_desired;
2464 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC); 2473 skb = alloc_skb(MAX_TCP_HEADER + 15 + s_data_desired, GFP_ATOMIC);
2465 if (skb == NULL) 2474 if (unlikely(!skb)) {
2475 dst_release(dst);
2466 return NULL; 2476 return NULL;
2467 2477 }
2468 /* Reserve space for headers. */ 2478 /* Reserve space for headers. */
2469 skb_reserve(skb, MAX_TCP_HEADER); 2479 skb_reserve(skb, MAX_TCP_HEADER);
2470 2480
2471 skb_dst_set(skb, dst_clone(dst)); 2481 skb_dst_set(skb, dst);
2472 2482
2473 mss = dst_metric_advmss(dst); 2483 mss = dst_metric_advmss(dst);
2474 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 2484 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index eaca73644e7..ee37d47d472 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -108,6 +108,7 @@
108#include <net/xfrm.h> 108#include <net/xfrm.h>
109#include <trace/events/udp.h> 109#include <trace/events/udp.h>
110#include <linux/static_key.h> 110#include <linux/static_key.h>
111#include <trace/events/skb.h>
111#include "udp_impl.h" 112#include "udp_impl.h"
112 113
113struct udp_table udp_table __read_mostly; 114struct udp_table udp_table __read_mostly;
@@ -615,6 +616,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
615 break; 616 break;
616 case ICMP_DEST_UNREACH: 617 case ICMP_DEST_UNREACH:
617 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ 618 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
619 ipv4_sk_update_pmtu(skb, sk, info);
618 if (inet->pmtudisc != IP_PMTUDISC_DONT) { 620 if (inet->pmtudisc != IP_PMTUDISC_DONT) {
619 err = EMSGSIZE; 621 err = EMSGSIZE;
620 harderr = 1; 622 harderr = 1;
@@ -1219,8 +1221,10 @@ try_again:
1219 goto csum_copy_err; 1221 goto csum_copy_err;
1220 } 1222 }
1221 1223
1222 if (err) 1224 if (unlikely(err)) {
1225 trace_kfree_skb(skb, udp_recvmsg);
1223 goto out_free; 1226 goto out_free;
1227 }
1224 1228
1225 if (!peeked) 1229 if (!peeked)
1226 UDP_INC_STATS_USER(sock_net(sk), 1230 UDP_INC_STATS_USER(sock_net(sk),
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 0d3426cb5c4..87d3fcc302d 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -90,10 +90,6 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
90 xdst->u.dst.dev = dev; 90 xdst->u.dst.dev = dev;
91 dev_hold(dev); 91 dev_hold(dev);
92 92
93 xdst->u.rt.peer = rt->peer;
94 if (rt->peer)
95 atomic_inc(&rt->peer->refcnt);
96
97 /* Sheit... I remember I did this right. Apparently, 93 /* Sheit... I remember I did this right. Apparently,
98 * it was magically lost, so this code needs audit */ 94 * it was magically lost, so this code needs audit */
99 xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST | 95 xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST |
@@ -102,7 +98,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
102 xdst->u.rt.rt_src = rt->rt_src; 98 xdst->u.rt.rt_src = rt->rt_src;
103 xdst->u.rt.rt_dst = rt->rt_dst; 99 xdst->u.rt.rt_dst = rt->rt_dst;
104 xdst->u.rt.rt_gateway = rt->rt_gateway; 100 xdst->u.rt.rt_gateway = rt->rt_gateway;
105 xdst->u.rt.rt_spec_dst = rt->rt_spec_dst; 101 xdst->u.rt.rt_pmtu = rt->rt_pmtu;
106 102
107 return 0; 103 return 0;
108} 104}
@@ -212,9 +208,6 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
212 208
213 dst_destroy_metrics_generic(dst); 209 dst_destroy_metrics_generic(dst);
214 210
215 if (likely(xdst->u.rt.peer))
216 inet_putpeer(xdst->u.rt.peer);
217
218 xfrm_dst_destroy(xdst); 211 xfrm_dst_destroy(xdst);
219} 212}
220 213
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index f1a4a2c28ed..49d4d26bda8 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -35,6 +35,7 @@
35#include <linux/pfkeyv2.h> 35#include <linux/pfkeyv2.h>
36#include <linux/string.h> 36#include <linux/string.h>
37#include <linux/scatterlist.h> 37#include <linux/scatterlist.h>
38#include <net/ip6_route.h>
38#include <net/icmp.h> 39#include <net/icmp.h>
39#include <net/ipv6.h> 40#include <net/ipv6.h>
40#include <net/protocol.h> 41#include <net/protocol.h>
@@ -621,7 +622,7 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
621 622
622 NETDEBUG(KERN_DEBUG "pmtu discovery on SA AH/%08x/%pI6\n", 623 NETDEBUG(KERN_DEBUG "pmtu discovery on SA AH/%08x/%pI6\n",
623 ntohl(ah->spi), &iph->daddr); 624 ntohl(ah->spi), &iph->daddr);
624 625 ip6_update_pmtu(skb, net, info, 0, 0);
625 xfrm_state_put(x); 626 xfrm_state_put(x);
626} 627}
627 628
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index db1521fcda5..89a615ba84f 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -39,6 +39,7 @@
39#include <linux/random.h> 39#include <linux/random.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/spinlock.h> 41#include <linux/spinlock.h>
42#include <net/ip6_route.h>
42#include <net/icmp.h> 43#include <net/icmp.h>
43#include <net/ipv6.h> 44#include <net/ipv6.h>
44#include <net/protocol.h> 45#include <net/protocol.h>
@@ -442,6 +443,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
442 return; 443 return;
443 pr_debug("pmtu discovery on SA ESP/%08x/%pI6\n", 444 pr_debug("pmtu discovery on SA ESP/%08x/%pI6\n",
444 ntohl(esph->spi), &iph->daddr); 445 ntohl(esph->spi), &iph->daddr);
446 ip6_update_pmtu(skb, net, info, 0, 0);
445 xfrm_state_put(x); 447 xfrm_state_put(x);
446} 448}
447 449
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 6447dc49429..fa3d9c32809 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -791,14 +791,14 @@ static int ipv6_renew_option(void *ohdr,
791 if (ohdr) { 791 if (ohdr) {
792 memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr)); 792 memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr));
793 *hdr = (struct ipv6_opt_hdr *)*p; 793 *hdr = (struct ipv6_opt_hdr *)*p;
794 *p += CMSG_ALIGN(ipv6_optlen(*(struct ipv6_opt_hdr **)hdr)); 794 *p += CMSG_ALIGN(ipv6_optlen(*hdr));
795 } 795 }
796 } else { 796 } else {
797 if (newopt) { 797 if (newopt) {
798 if (copy_from_user(*p, newopt, newoptlen)) 798 if (copy_from_user(*p, newopt, newoptlen))
799 return -EFAULT; 799 return -EFAULT;
800 *hdr = (struct ipv6_opt_hdr *)*p; 800 *hdr = (struct ipv6_opt_hdr *)*p;
801 if (ipv6_optlen(*(struct ipv6_opt_hdr **)hdr) > newoptlen) 801 if (ipv6_optlen(*hdr) > newoptlen)
802 return -EINVAL; 802 return -EINVAL;
803 *p += CMSG_ALIGN(newoptlen); 803 *p += CMSG_ALIGN(newoptlen);
804 } 804 }
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 091a2971c7b..a113f7d7e93 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -188,14 +188,16 @@ static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
188 } else { 188 } else {
189 struct rt6_info *rt = (struct rt6_info *)dst; 189 struct rt6_info *rt = (struct rt6_info *)dst;
190 int tmo = net->ipv6.sysctl.icmpv6_time; 190 int tmo = net->ipv6.sysctl.icmpv6_time;
191 struct inet_peer *peer;
191 192
192 /* Give more bandwidth to wider prefixes. */ 193 /* Give more bandwidth to wider prefixes. */
193 if (rt->rt6i_dst.plen < 128) 194 if (rt->rt6i_dst.plen < 128)
194 tmo >>= ((128 - rt->rt6i_dst.plen)>>5); 195 tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
195 196
196 if (!rt->rt6i_peer) 197 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
197 rt6_bind_peer(rt, 1); 198 res = inet_peer_xrlim_allow(peer, tmo);
198 res = inet_peer_xrlim_allow(rt->rt6i_peer, tmo); 199 if (peer)
200 inet_putpeer(peer);
199 } 201 }
200 dst_release(dst); 202 dst_release(dst);
201 return res; 203 return res;
@@ -600,9 +602,8 @@ static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
600{ 602{
601 const struct inet6_protocol *ipprot; 603 const struct inet6_protocol *ipprot;
602 int inner_offset; 604 int inner_offset;
603 int hash;
604 u8 nexthdr;
605 __be16 frag_off; 605 __be16 frag_off;
606 u8 nexthdr;
606 607
607 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 608 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
608 return; 609 return;
@@ -629,10 +630,8 @@ static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
629 --ANK (980726) 630 --ANK (980726)
630 */ 631 */
631 632
632 hash = nexthdr & (MAX_INET_PROTOS - 1);
633
634 rcu_read_lock(); 633 rcu_read_lock();
635 ipprot = rcu_dereference(inet6_protos[hash]); 634 ipprot = rcu_dereference(inet6_protos[nexthdr]);
636 if (ipprot && ipprot->err_handler) 635 if (ipprot && ipprot->err_handler)
637 ipprot->err_handler(skb, NULL, type, code, inner_offset, info); 636 ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
638 rcu_read_unlock(); 637 rcu_read_unlock();
@@ -649,7 +648,6 @@ static int icmpv6_rcv(struct sk_buff *skb)
649 struct net_device *dev = skb->dev; 648 struct net_device *dev = skb->dev;
650 struct inet6_dev *idev = __in6_dev_get(dev); 649 struct inet6_dev *idev = __in6_dev_get(dev);
651 const struct in6_addr *saddr, *daddr; 650 const struct in6_addr *saddr, *daddr;
652 const struct ipv6hdr *orig_hdr;
653 struct icmp6hdr *hdr; 651 struct icmp6hdr *hdr;
654 u8 type; 652 u8 type;
655 653
@@ -661,7 +659,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
661 XFRM_STATE_ICMP)) 659 XFRM_STATE_ICMP))
662 goto drop_no_count; 660 goto drop_no_count;
663 661
664 if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(*orig_hdr))) 662 if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(struct ipv6hdr)))
665 goto drop_no_count; 663 goto drop_no_count;
666 664
667 nh = skb_network_offset(skb); 665 nh = skb_network_offset(skb);
@@ -722,9 +720,6 @@ static int icmpv6_rcv(struct sk_buff *skb)
722 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 720 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
723 goto discard_it; 721 goto discard_it;
724 hdr = icmp6_hdr(skb); 722 hdr = icmp6_hdr(skb);
725 orig_hdr = (struct ipv6hdr *) (hdr + 1);
726 rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev,
727 ntohl(hdr->icmp6_mtu));
728 723
729 /* 724 /*
730 * Drop through to notify 725 * Drop through to notify
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index e6cee5292a0..bceb14450a1 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -55,26 +55,26 @@ int inet6_csk_bind_conflict(const struct sock *sk,
55EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict); 55EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
56 56
57struct dst_entry *inet6_csk_route_req(struct sock *sk, 57struct dst_entry *inet6_csk_route_req(struct sock *sk,
58 struct flowi6 *fl6,
58 const struct request_sock *req) 59 const struct request_sock *req)
59{ 60{
60 struct inet6_request_sock *treq = inet6_rsk(req); 61 struct inet6_request_sock *treq = inet6_rsk(req);
61 struct ipv6_pinfo *np = inet6_sk(sk); 62 struct ipv6_pinfo *np = inet6_sk(sk);
62 struct in6_addr *final_p, final; 63 struct in6_addr *final_p, final;
63 struct dst_entry *dst; 64 struct dst_entry *dst;
64 struct flowi6 fl6;
65
66 memset(&fl6, 0, sizeof(fl6));
67 fl6.flowi6_proto = IPPROTO_TCP;
68 fl6.daddr = treq->rmt_addr;
69 final_p = fl6_update_dst(&fl6, np->opt, &final);
70 fl6.saddr = treq->loc_addr;
71 fl6.flowi6_oif = sk->sk_bound_dev_if;
72 fl6.flowi6_mark = sk->sk_mark;
73 fl6.fl6_dport = inet_rsk(req)->rmt_port;
74 fl6.fl6_sport = inet_rsk(req)->loc_port;
75 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
76 65
77 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false); 66 memset(fl6, 0, sizeof(*fl6));
67 fl6->flowi6_proto = IPPROTO_TCP;
68 fl6->daddr = treq->rmt_addr;
69 final_p = fl6_update_dst(fl6, np->opt, &final);
70 fl6->saddr = treq->loc_addr;
71 fl6->flowi6_oif = treq->iif;
72 fl6->flowi6_mark = sk->sk_mark;
73 fl6->fl6_dport = inet_rsk(req)->rmt_port;
74 fl6->fl6_sport = inet_rsk(req)->loc_port;
75 security_req_classify_flow(req, flowi6_to_flowi(fl6));
76
77 dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
78 if (IS_ERR(dst)) 78 if (IS_ERR(dst))
79 return NULL; 79 return NULL;
80 80
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 60832766196..13690d650c3 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -197,6 +197,7 @@ static struct fib6_table *fib6_alloc_table(struct net *net, u32 id)
197 table->tb6_id = id; 197 table->tb6_id = id;
198 table->tb6_root.leaf = net->ipv6.ip6_null_entry; 198 table->tb6_root.leaf = net->ipv6.ip6_null_entry;
199 table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; 199 table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
200 inet_peer_base_init(&table->tb6_peers);
200 } 201 }
201 202
202 return table; 203 return table;
@@ -1633,6 +1634,7 @@ static int __net_init fib6_net_init(struct net *net)
1633 net->ipv6.fib6_main_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry; 1634 net->ipv6.fib6_main_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry;
1634 net->ipv6.fib6_main_tbl->tb6_root.fn_flags = 1635 net->ipv6.fib6_main_tbl->tb6_root.fn_flags =
1635 RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; 1636 RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
1637 inet_peer_base_init(&net->ipv6.fib6_main_tbl->tb6_peers);
1636 1638
1637#ifdef CONFIG_IPV6_MULTIPLE_TABLES 1639#ifdef CONFIG_IPV6_MULTIPLE_TABLES
1638 net->ipv6.fib6_local_tbl = kzalloc(sizeof(*net->ipv6.fib6_local_tbl), 1640 net->ipv6.fib6_local_tbl = kzalloc(sizeof(*net->ipv6.fib6_local_tbl),
@@ -1643,6 +1645,7 @@ static int __net_init fib6_net_init(struct net *net)
1643 net->ipv6.fib6_local_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry; 1645 net->ipv6.fib6_local_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry;
1644 net->ipv6.fib6_local_tbl->tb6_root.fn_flags = 1646 net->ipv6.fib6_local_tbl->tb6_root.fn_flags =
1645 RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; 1647 RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
1648 inet_peer_base_init(&net->ipv6.fib6_local_tbl->tb6_peers);
1646#endif 1649#endif
1647 fib6_tables_init(net); 1650 fib6_tables_init(net);
1648 1651
@@ -1666,8 +1669,10 @@ static void fib6_net_exit(struct net *net)
1666 del_timer_sync(&net->ipv6.ip6_fib_timer); 1669 del_timer_sync(&net->ipv6.ip6_fib_timer);
1667 1670
1668#ifdef CONFIG_IPV6_MULTIPLE_TABLES 1671#ifdef CONFIG_IPV6_MULTIPLE_TABLES
1672 inetpeer_invalidate_tree(&net->ipv6.fib6_local_tbl->tb6_peers);
1669 kfree(net->ipv6.fib6_local_tbl); 1673 kfree(net->ipv6.fib6_local_tbl);
1670#endif 1674#endif
1675 inetpeer_invalidate_tree(&net->ipv6.fib6_main_tbl->tb6_peers);
1671 kfree(net->ipv6.fib6_main_tbl); 1676 kfree(net->ipv6.fib6_main_tbl);
1672 kfree(net->ipv6.fib_table_hash); 1677 kfree(net->ipv6.fib_table_hash);
1673 kfree(net->ipv6.rt6_stats); 1678 kfree(net->ipv6.rt6_stats);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 21a15dfe4a9..5ab923e51af 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -168,13 +168,12 @@ drop:
168 168
169static int ip6_input_finish(struct sk_buff *skb) 169static int ip6_input_finish(struct sk_buff *skb)
170{ 170{
171 struct net *net = dev_net(skb_dst(skb)->dev);
171 const struct inet6_protocol *ipprot; 172 const struct inet6_protocol *ipprot;
173 struct inet6_dev *idev;
172 unsigned int nhoff; 174 unsigned int nhoff;
173 int nexthdr; 175 int nexthdr;
174 bool raw; 176 bool raw;
175 u8 hash;
176 struct inet6_dev *idev;
177 struct net *net = dev_net(skb_dst(skb)->dev);
178 177
179 /* 178 /*
180 * Parse extension headers 179 * Parse extension headers
@@ -189,9 +188,7 @@ resubmit:
189 nexthdr = skb_network_header(skb)[nhoff]; 188 nexthdr = skb_network_header(skb)[nhoff];
190 189
191 raw = raw6_local_deliver(skb, nexthdr); 190 raw = raw6_local_deliver(skb, nexthdr);
192 191 if ((ipprot = rcu_dereference(inet6_protos[nexthdr])) != NULL) {
193 hash = nexthdr & (MAX_INET_PROTOS - 1);
194 if ((ipprot = rcu_dereference(inet6_protos[hash])) != NULL) {
195 int ret; 192 int ret;
196 193
197 if (ipprot->flags & INET6_PROTO_FINAL) { 194 if (ipprot->flags & INET6_PROTO_FINAL) {
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index decc21d19c5..5b2d63ed793 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -83,24 +83,12 @@ int ip6_local_out(struct sk_buff *skb)
83} 83}
84EXPORT_SYMBOL_GPL(ip6_local_out); 84EXPORT_SYMBOL_GPL(ip6_local_out);
85 85
86/* dev_loopback_xmit for use with netfilter. */
87static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
88{
89 skb_reset_mac_header(newskb);
90 __skb_pull(newskb, skb_network_offset(newskb));
91 newskb->pkt_type = PACKET_LOOPBACK;
92 newskb->ip_summed = CHECKSUM_UNNECESSARY;
93 WARN_ON(!skb_dst(newskb));
94
95 netif_rx_ni(newskb);
96 return 0;
97}
98
99static int ip6_finish_output2(struct sk_buff *skb) 86static int ip6_finish_output2(struct sk_buff *skb)
100{ 87{
101 struct dst_entry *dst = skb_dst(skb); 88 struct dst_entry *dst = skb_dst(skb);
102 struct net_device *dev = dst->dev; 89 struct net_device *dev = dst->dev;
103 struct neighbour *neigh; 90 struct neighbour *neigh;
91 struct rt6_info *rt;
104 92
105 skb->protocol = htons(ETH_P_IPV6); 93 skb->protocol = htons(ETH_P_IPV6);
106 skb->dev = dev; 94 skb->dev = dev;
@@ -121,7 +109,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
121 if (newskb) 109 if (newskb)
122 NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, 110 NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
123 newskb, NULL, newskb->dev, 111 newskb, NULL, newskb->dev,
124 ip6_dev_loopback_xmit); 112 dev_loopback_xmit);
125 113
126 if (ipv6_hdr(skb)->hop_limit == 0) { 114 if (ipv6_hdr(skb)->hop_limit == 0) {
127 IP6_INC_STATS(dev_net(dev), idev, 115 IP6_INC_STATS(dev_net(dev), idev,
@@ -136,9 +124,10 @@ static int ip6_finish_output2(struct sk_buff *skb)
136 } 124 }
137 125
138 rcu_read_lock(); 126 rcu_read_lock();
139 neigh = dst_get_neighbour_noref(dst); 127 rt = (struct rt6_info *) dst;
128 neigh = rt->n;
140 if (neigh) { 129 if (neigh) {
141 int res = neigh_output(neigh, skb); 130 int res = dst_neigh_output(dst, neigh, skb);
142 131
143 rcu_read_unlock(); 132 rcu_read_unlock();
144 return res; 133 return res;
@@ -463,6 +452,7 @@ int ip6_forward(struct sk_buff *skb)
463 */ 452 */
464 if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) { 453 if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
465 struct in6_addr *target = NULL; 454 struct in6_addr *target = NULL;
455 struct inet_peer *peer;
466 struct rt6_info *rt; 456 struct rt6_info *rt;
467 457
468 /* 458 /*
@@ -476,14 +466,15 @@ int ip6_forward(struct sk_buff *skb)
476 else 466 else
477 target = &hdr->daddr; 467 target = &hdr->daddr;
478 468
479 if (!rt->rt6i_peer) 469 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
480 rt6_bind_peer(rt, 1);
481 470
482 /* Limit redirects both by destination (here) 471 /* Limit redirects both by destination (here)
483 and by source (inside ndisc_send_redirect) 472 and by source (inside ndisc_send_redirect)
484 */ 473 */
485 if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ)) 474 if (inet_peer_xrlim_allow(peer, 1*HZ))
486 ndisc_send_redirect(skb, target); 475 ndisc_send_redirect(skb, target);
476 if (peer)
477 inet_putpeer(peer);
487 } else { 478 } else {
488 int addrtype = ipv6_addr_type(&hdr->saddr); 479 int addrtype = ipv6_addr_type(&hdr->saddr);
489 480
@@ -604,12 +595,13 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
604 595
605 if (rt && !(rt->dst.flags & DST_NOPEER)) { 596 if (rt && !(rt->dst.flags & DST_NOPEER)) {
606 struct inet_peer *peer; 597 struct inet_peer *peer;
598 struct net *net;
607 599
608 if (!rt->rt6i_peer) 600 net = dev_net(rt->dst.dev);
609 rt6_bind_peer(rt, 1); 601 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
610 peer = rt->rt6i_peer;
611 if (peer) { 602 if (peer) {
612 fhdr->identification = htonl(inet_getid(peer, 0)); 603 fhdr->identification = htonl(inet_getid(peer, 0));
604 inet_putpeer(peer);
613 return; 605 return;
614 } 606 }
615 } 607 }
@@ -960,6 +952,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
960 struct net *net = sock_net(sk); 952 struct net *net = sock_net(sk);
961#ifdef CONFIG_IPV6_OPTIMISTIC_DAD 953#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
962 struct neighbour *n; 954 struct neighbour *n;
955 struct rt6_info *rt;
963#endif 956#endif
964 int err; 957 int err;
965 958
@@ -988,7 +981,8 @@ static int ip6_dst_lookup_tail(struct sock *sk,
988 * dst entry of the nexthop router 981 * dst entry of the nexthop router
989 */ 982 */
990 rcu_read_lock(); 983 rcu_read_lock();
991 n = dst_get_neighbour_noref(*dst); 984 rt = (struct rt6_info *) *dst;
985 n = rt->n;
992 if (n && !(n->nud_state & NUD_VALID)) { 986 if (n && !(n->nud_state & NUD_VALID)) {
993 struct inet6_ifaddr *ifp; 987 struct inet6_ifaddr *ifp;
994 struct flowi6 fl_gw6; 988 struct flowi6 fl_gw6;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index c9015fad8d6..6af3fcfdcbb 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -252,7 +252,7 @@ static void ip6_dev_free(struct net_device *dev)
252} 252}
253 253
254/** 254/**
255 * ip6_tnl_create() - create a new tunnel 255 * ip6_tnl_create - create a new tunnel
256 * @p: tunnel parameters 256 * @p: tunnel parameters
257 * @pt: pointer to new tunnel 257 * @pt: pointer to new tunnel
258 * 258 *
@@ -684,24 +684,50 @@ static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
684 IP6_ECN_set_ce(ipv6_hdr(skb)); 684 IP6_ECN_set_ce(ipv6_hdr(skb));
685} 685}
686 686
687static __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
688 const struct in6_addr *laddr,
689 const struct in6_addr *raddr)
690{
691 struct ip6_tnl_parm *p = &t->parms;
692 int ltype = ipv6_addr_type(laddr);
693 int rtype = ipv6_addr_type(raddr);
694 __u32 flags = 0;
695
696 if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
697 flags = IP6_TNL_F_CAP_PER_PACKET;
698 } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
699 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
700 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
701 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
702 if (ltype&IPV6_ADDR_UNICAST)
703 flags |= IP6_TNL_F_CAP_XMIT;
704 if (rtype&IPV6_ADDR_UNICAST)
705 flags |= IP6_TNL_F_CAP_RCV;
706 }
707 return flags;
708}
709
687/* called with rcu_read_lock() */ 710/* called with rcu_read_lock() */
688static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t) 711static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
712 const struct in6_addr *laddr,
713 const struct in6_addr *raddr)
689{ 714{
690 struct ip6_tnl_parm *p = &t->parms; 715 struct ip6_tnl_parm *p = &t->parms;
691 int ret = 0; 716 int ret = 0;
692 struct net *net = dev_net(t->dev); 717 struct net *net = dev_net(t->dev);
693 718
694 if (p->flags & IP6_TNL_F_CAP_RCV) { 719 if ((p->flags & IP6_TNL_F_CAP_RCV) ||
720 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
721 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
695 struct net_device *ldev = NULL; 722 struct net_device *ldev = NULL;
696 723
697 if (p->link) 724 if (p->link)
698 ldev = dev_get_by_index_rcu(net, p->link); 725 ldev = dev_get_by_index_rcu(net, p->link);
699 726
700 if ((ipv6_addr_is_multicast(&p->laddr) || 727 if ((ipv6_addr_is_multicast(laddr) ||
701 likely(ipv6_chk_addr(net, &p->laddr, ldev, 0))) && 728 likely(ipv6_chk_addr(net, laddr, ldev, 0))) &&
702 likely(!ipv6_chk_addr(net, &p->raddr, NULL, 0))) 729 likely(!ipv6_chk_addr(net, raddr, NULL, 0)))
703 ret = 1; 730 ret = 1;
704
705 } 731 }
706 return ret; 732 return ret;
707} 733}
@@ -740,7 +766,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
740 goto discard; 766 goto discard;
741 } 767 }
742 768
743 if (!ip6_tnl_rcv_ctl(t)) { 769 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
744 t->dev->stats.rx_dropped++; 770 t->dev->stats.rx_dropped++;
745 rcu_read_unlock(); 771 rcu_read_unlock();
746 goto discard; 772 goto discard;
@@ -1114,25 +1140,6 @@ tx_err:
1114 return NETDEV_TX_OK; 1140 return NETDEV_TX_OK;
1115} 1141}
1116 1142
1117static void ip6_tnl_set_cap(struct ip6_tnl *t)
1118{
1119 struct ip6_tnl_parm *p = &t->parms;
1120 int ltype = ipv6_addr_type(&p->laddr);
1121 int rtype = ipv6_addr_type(&p->raddr);
1122
1123 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV);
1124
1125 if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
1126 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
1127 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
1128 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
1129 if (ltype&IPV6_ADDR_UNICAST)
1130 p->flags |= IP6_TNL_F_CAP_XMIT;
1131 if (rtype&IPV6_ADDR_UNICAST)
1132 p->flags |= IP6_TNL_F_CAP_RCV;
1133 }
1134}
1135
1136static void ip6_tnl_link_config(struct ip6_tnl *t) 1143static void ip6_tnl_link_config(struct ip6_tnl *t)
1137{ 1144{
1138 struct net_device *dev = t->dev; 1145 struct net_device *dev = t->dev;
@@ -1153,7 +1160,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
1153 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) 1160 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1154 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; 1161 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1155 1162
1156 ip6_tnl_set_cap(t); 1163 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1164 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1157 1165
1158 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV) 1166 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1159 dev->flags |= IFF_POINTOPOINT; 1167 dev->flags |= IFF_POINTOPOINT;
@@ -1438,6 +1446,9 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1438 1446
1439 t->parms.proto = IPPROTO_IPV6; 1447 t->parms.proto = IPPROTO_IPV6;
1440 dev_hold(dev); 1448 dev_hold(dev);
1449
1450 ip6_tnl_link_config(t);
1451
1441 rcu_assign_pointer(ip6n->tnls_wc[0], t); 1452 rcu_assign_pointer(ip6n->tnls_wc[0], t);
1442 return 0; 1453 return 0;
1443} 1454}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 461e47c8e95..4532973f0dd 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2104,8 +2104,9 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2104 if (c->mf6c_parent >= MAXMIFS) 2104 if (c->mf6c_parent >= MAXMIFS)
2105 return -ENOENT; 2105 return -ENOENT;
2106 2106
2107 if (MIF_EXISTS(mrt, c->mf6c_parent)) 2107 if (MIF_EXISTS(mrt, c->mf6c_parent) &&
2108 RTA_PUT(skb, RTA_IIF, 4, &mrt->vif6_table[c->mf6c_parent].dev->ifindex); 2108 nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
2109 return -EMSGSIZE;
2109 2110
2110 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); 2111 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
2111 2112
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 5cb75bfe45b..92832385a8e 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -46,6 +46,7 @@
46#include <linux/list.h> 46#include <linux/list.h>
47#include <linux/vmalloc.h> 47#include <linux/vmalloc.h>
48#include <linux/rtnetlink.h> 48#include <linux/rtnetlink.h>
49#include <net/ip6_route.h>
49#include <net/icmp.h> 50#include <net/icmp.h>
50#include <net/ipv6.h> 51#include <net/ipv6.h>
51#include <net/protocol.h> 52#include <net/protocol.h>
@@ -74,6 +75,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
74 75
75 pr_debug("pmtu discovery on SA IPCOMP/%08x/%pI6\n", 76 pr_debug("pmtu discovery on SA IPCOMP/%08x/%pI6\n",
76 spi, &iph->daddr); 77 spi, &iph->daddr);
78 ip6_update_pmtu(skb, net, info, 0, 0);
77 xfrm_state_put(x); 79 xfrm_state_put(x);
78} 80}
79 81
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 54f62d3b8dd..0fddd571400 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1472,6 +1472,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1472 struct net *net = dev_net(dev); 1472 struct net *net = dev_net(dev);
1473 struct sock *sk = net->ipv6.ndisc_sk; 1473 struct sock *sk = net->ipv6.ndisc_sk;
1474 int len = sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr); 1474 int len = sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr);
1475 struct inet_peer *peer;
1475 struct sk_buff *buff; 1476 struct sk_buff *buff;
1476 struct icmp6hdr *icmph; 1477 struct icmp6hdr *icmph;
1477 struct in6_addr saddr_buf; 1478 struct in6_addr saddr_buf;
@@ -1485,6 +1486,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1485 int rd_len; 1486 int rd_len;
1486 int err; 1487 int err;
1487 u8 ha_buf[MAX_ADDR_LEN], *ha = NULL; 1488 u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;
1489 bool ret;
1488 1490
1489 if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) { 1491 if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) {
1490 ND_PRINTK(2, warn, "Redirect: no link-local address on %s\n", 1492 ND_PRINTK(2, warn, "Redirect: no link-local address on %s\n",
@@ -1518,9 +1520,11 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1518 "Redirect: destination is not a neighbour\n"); 1520 "Redirect: destination is not a neighbour\n");
1519 goto release; 1521 goto release;
1520 } 1522 }
1521 if (!rt->rt6i_peer) 1523 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
1522 rt6_bind_peer(rt, 1); 1524 ret = inet_peer_xrlim_allow(peer, 1*HZ);
1523 if (!inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ)) 1525 if (peer)
1526 inet_putpeer(peer);
1527 if (!ret)
1524 goto release; 1528 goto release;
1525 1529
1526 if (dev->addr_len) { 1530 if (dev->addr_len) {
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 3224ef90a21..4794f96cf2e 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -143,11 +143,11 @@ static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
143 return NF_ACCEPT; 143 return NF_ACCEPT;
144} 144}
145 145
146static unsigned int ipv6_confirm(unsigned int hooknum, 146static unsigned int ipv6_helper(unsigned int hooknum,
147 struct sk_buff *skb, 147 struct sk_buff *skb,
148 const struct net_device *in, 148 const struct net_device *in,
149 const struct net_device *out, 149 const struct net_device *out,
150 int (*okfn)(struct sk_buff *)) 150 int (*okfn)(struct sk_buff *))
151{ 151{
152 struct nf_conn *ct; 152 struct nf_conn *ct;
153 const struct nf_conn_help *help; 153 const struct nf_conn_help *help;
@@ -161,15 +161,15 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
161 /* This is where we call the helper: as the packet goes out. */ 161 /* This is where we call the helper: as the packet goes out. */
162 ct = nf_ct_get(skb, &ctinfo); 162 ct = nf_ct_get(skb, &ctinfo);
163 if (!ct || ctinfo == IP_CT_RELATED_REPLY) 163 if (!ct || ctinfo == IP_CT_RELATED_REPLY)
164 goto out; 164 return NF_ACCEPT;
165 165
166 help = nfct_help(ct); 166 help = nfct_help(ct);
167 if (!help) 167 if (!help)
168 goto out; 168 return NF_ACCEPT;
169 /* rcu_read_lock()ed by nf_hook_slow */ 169 /* rcu_read_lock()ed by nf_hook_slow */
170 helper = rcu_dereference(help->helper); 170 helper = rcu_dereference(help->helper);
171 if (!helper) 171 if (!helper)
172 goto out; 172 return NF_ACCEPT;
173 173
174 protoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &pnum, 174 protoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &pnum,
175 skb->len - extoff); 175 skb->len - extoff);
@@ -179,12 +179,19 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
179 } 179 }
180 180
181 ret = helper->help(skb, protoff, ct, ctinfo); 181 ret = helper->help(skb, protoff, ct, ctinfo);
182 if (ret != NF_ACCEPT) { 182 if (ret != NF_ACCEPT && (ret & NF_VERDICT_MASK) != NF_QUEUE) {
183 nf_log_packet(NFPROTO_IPV6, hooknum, skb, in, out, NULL, 183 nf_log_packet(NFPROTO_IPV6, hooknum, skb, in, out, NULL,
184 "nf_ct_%s: dropping packet", helper->name); 184 "nf_ct_%s: dropping packet", helper->name);
185 return ret;
186 } 185 }
187out: 186 return ret;
187}
188
189static unsigned int ipv6_confirm(unsigned int hooknum,
190 struct sk_buff *skb,
191 const struct net_device *in,
192 const struct net_device *out,
193 int (*okfn)(struct sk_buff *))
194{
188 /* We've seen it coming out the other side: confirm it */ 195 /* We've seen it coming out the other side: confirm it */
189 return nf_conntrack_confirm(skb); 196 return nf_conntrack_confirm(skb);
190} 197}
@@ -254,6 +261,13 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
254 .priority = NF_IP6_PRI_CONNTRACK, 261 .priority = NF_IP6_PRI_CONNTRACK,
255 }, 262 },
256 { 263 {
264 .hook = ipv6_helper,
265 .owner = THIS_MODULE,
266 .pf = NFPROTO_IPV6,
267 .hooknum = NF_INET_POST_ROUTING,
268 .priority = NF_IP6_PRI_CONNTRACK_HELPER,
269 },
270 {
257 .hook = ipv6_confirm, 271 .hook = ipv6_confirm,
258 .owner = THIS_MODULE, 272 .owner = THIS_MODULE,
259 .pf = NFPROTO_IPV6, 273 .pf = NFPROTO_IPV6,
@@ -261,6 +275,13 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
261 .priority = NF_IP6_PRI_LAST, 275 .priority = NF_IP6_PRI_LAST,
262 }, 276 },
263 { 277 {
278 .hook = ipv6_helper,
279 .owner = THIS_MODULE,
280 .pf = NFPROTO_IPV6,
281 .hooknum = NF_INET_LOCAL_IN,
282 .priority = NF_IP6_PRI_CONNTRACK_HELPER,
283 },
284 {
264 .hook = ipv6_confirm, 285 .hook = ipv6_confirm,
265 .owner = THIS_MODULE, 286 .owner = THIS_MODULE,
266 .pf = NFPROTO_IPV6, 287 .pf = NFPROTO_IPV6,
@@ -333,37 +354,75 @@ MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET6));
333MODULE_LICENSE("GPL"); 354MODULE_LICENSE("GPL");
334MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>"); 355MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>");
335 356
336static int __init nf_conntrack_l3proto_ipv6_init(void) 357static int ipv6_net_init(struct net *net)
337{ 358{
338 int ret = 0; 359 int ret = 0;
339 360
340 need_conntrack(); 361 ret = nf_conntrack_l4proto_register(net,
341 nf_defrag_ipv6_enable(); 362 &nf_conntrack_l4proto_tcp6);
342
343 ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp6);
344 if (ret < 0) { 363 if (ret < 0) {
345 pr_err("nf_conntrack_ipv6: can't register tcp.\n"); 364 printk(KERN_ERR "nf_conntrack_l4proto_tcp6: protocol register failed\n");
346 return ret; 365 goto out;
347 } 366 }
348 367 ret = nf_conntrack_l4proto_register(net,
349 ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp6); 368 &nf_conntrack_l4proto_udp6);
350 if (ret < 0) { 369 if (ret < 0) {
351 pr_err("nf_conntrack_ipv6: can't register udp.\n"); 370 printk(KERN_ERR "nf_conntrack_l4proto_udp6: protocol register failed\n");
352 goto cleanup_tcp; 371 goto cleanup_tcp6;
353 } 372 }
354 373 ret = nf_conntrack_l4proto_register(net,
355 ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmpv6); 374 &nf_conntrack_l4proto_icmpv6);
356 if (ret < 0) { 375 if (ret < 0) {
357 pr_err("nf_conntrack_ipv6: can't register icmpv6.\n"); 376 printk(KERN_ERR "nf_conntrack_l4proto_icmp6: protocol register failed\n");
358 goto cleanup_udp; 377 goto cleanup_udp6;
359 } 378 }
360 379 ret = nf_conntrack_l3proto_register(net,
361 ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv6); 380 &nf_conntrack_l3proto_ipv6);
362 if (ret < 0) { 381 if (ret < 0) {
363 pr_err("nf_conntrack_ipv6: can't register ipv6\n"); 382 printk(KERN_ERR "nf_conntrack_l3proto_ipv6: protocol register failed\n");
364 goto cleanup_icmpv6; 383 goto cleanup_icmpv6;
365 } 384 }
385 return 0;
386 cleanup_icmpv6:
387 nf_conntrack_l4proto_unregister(net,
388 &nf_conntrack_l4proto_icmpv6);
389 cleanup_udp6:
390 nf_conntrack_l4proto_unregister(net,
391 &nf_conntrack_l4proto_udp6);
392 cleanup_tcp6:
393 nf_conntrack_l4proto_unregister(net,
394 &nf_conntrack_l4proto_tcp6);
395 out:
396 return ret;
397}
366 398
399static void ipv6_net_exit(struct net *net)
400{
401 nf_conntrack_l3proto_unregister(net,
402 &nf_conntrack_l3proto_ipv6);
403 nf_conntrack_l4proto_unregister(net,
404 &nf_conntrack_l4proto_icmpv6);
405 nf_conntrack_l4proto_unregister(net,
406 &nf_conntrack_l4proto_udp6);
407 nf_conntrack_l4proto_unregister(net,
408 &nf_conntrack_l4proto_tcp6);
409}
410
411static struct pernet_operations ipv6_net_ops = {
412 .init = ipv6_net_init,
413 .exit = ipv6_net_exit,
414};
415
416static int __init nf_conntrack_l3proto_ipv6_init(void)
417{
418 int ret = 0;
419
420 need_conntrack();
421 nf_defrag_ipv6_enable();
422
423 ret = register_pernet_subsys(&ipv6_net_ops);
424 if (ret < 0)
425 goto cleanup_pernet;
367 ret = nf_register_hooks(ipv6_conntrack_ops, 426 ret = nf_register_hooks(ipv6_conntrack_ops,
368 ARRAY_SIZE(ipv6_conntrack_ops)); 427 ARRAY_SIZE(ipv6_conntrack_ops));
369 if (ret < 0) { 428 if (ret < 0) {
@@ -374,13 +433,8 @@ static int __init nf_conntrack_l3proto_ipv6_init(void)
374 return ret; 433 return ret;
375 434
376 cleanup_ipv6: 435 cleanup_ipv6:
377 nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6); 436 unregister_pernet_subsys(&ipv6_net_ops);
378 cleanup_icmpv6: 437 cleanup_pernet:
379 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmpv6);
380 cleanup_udp:
381 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp6);
382 cleanup_tcp:
383 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp6);
384 return ret; 438 return ret;
385} 439}
386 440
@@ -388,10 +442,7 @@ static void __exit nf_conntrack_l3proto_ipv6_fini(void)
388{ 442{
389 synchronize_net(); 443 synchronize_net();
390 nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops)); 444 nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops));
391 nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6); 445 unregister_pernet_subsys(&ipv6_net_ops);
392 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmpv6);
393 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp6);
394 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp6);
395} 446}
396 447
397module_init(nf_conntrack_l3proto_ipv6_init); 448module_init(nf_conntrack_l3proto_ipv6_init);
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 3e81904fbbc..2d54b2061d6 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -29,6 +29,11 @@
29 29
30static unsigned int nf_ct_icmpv6_timeout __read_mostly = 30*HZ; 30static unsigned int nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
31 31
32static inline struct nf_icmp_net *icmpv6_pernet(struct net *net)
33{
34 return &net->ct.nf_ct_proto.icmpv6;
35}
36
32static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb, 37static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
33 unsigned int dataoff, 38 unsigned int dataoff,
34 struct nf_conntrack_tuple *tuple) 39 struct nf_conntrack_tuple *tuple)
@@ -90,7 +95,7 @@ static int icmpv6_print_tuple(struct seq_file *s,
90 95
91static unsigned int *icmpv6_get_timeouts(struct net *net) 96static unsigned int *icmpv6_get_timeouts(struct net *net)
92{ 97{
93 return &nf_ct_icmpv6_timeout; 98 return &icmpv6_pernet(net)->timeout;
94} 99}
95 100
96/* Returns verdict for packet, or -1 for invalid. */ 101/* Returns verdict for packet, or -1 for invalid. */
@@ -281,16 +286,18 @@ static int icmpv6_nlattr_tuple_size(void)
281#include <linux/netfilter/nfnetlink.h> 286#include <linux/netfilter/nfnetlink.h>
282#include <linux/netfilter/nfnetlink_cttimeout.h> 287#include <linux/netfilter/nfnetlink_cttimeout.h>
283 288
284static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[], void *data) 289static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[],
290 struct net *net, void *data)
285{ 291{
286 unsigned int *timeout = data; 292 unsigned int *timeout = data;
293 struct nf_icmp_net *in = icmpv6_pernet(net);
287 294
288 if (tb[CTA_TIMEOUT_ICMPV6_TIMEOUT]) { 295 if (tb[CTA_TIMEOUT_ICMPV6_TIMEOUT]) {
289 *timeout = 296 *timeout =
290 ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMPV6_TIMEOUT])) * HZ; 297 ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMPV6_TIMEOUT])) * HZ;
291 } else { 298 } else {
292 /* Set default ICMPv6 timeout. */ 299 /* Set default ICMPv6 timeout. */
293 *timeout = nf_ct_icmpv6_timeout; 300 *timeout = in->timeout;
294 } 301 }
295 return 0; 302 return 0;
296} 303}
@@ -315,11 +322,9 @@ icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = {
315#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 322#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
316 323
317#ifdef CONFIG_SYSCTL 324#ifdef CONFIG_SYSCTL
318static struct ctl_table_header *icmpv6_sysctl_header;
319static struct ctl_table icmpv6_sysctl_table[] = { 325static struct ctl_table icmpv6_sysctl_table[] = {
320 { 326 {
321 .procname = "nf_conntrack_icmpv6_timeout", 327 .procname = "nf_conntrack_icmpv6_timeout",
322 .data = &nf_ct_icmpv6_timeout,
323 .maxlen = sizeof(unsigned int), 328 .maxlen = sizeof(unsigned int),
324 .mode = 0644, 329 .mode = 0644,
325 .proc_handler = proc_dointvec_jiffies, 330 .proc_handler = proc_dointvec_jiffies,
@@ -328,6 +333,36 @@ static struct ctl_table icmpv6_sysctl_table[] = {
328}; 333};
329#endif /* CONFIG_SYSCTL */ 334#endif /* CONFIG_SYSCTL */
330 335
336static int icmpv6_kmemdup_sysctl_table(struct nf_proto_net *pn,
337 struct nf_icmp_net *in)
338{
339#ifdef CONFIG_SYSCTL
340 pn->ctl_table = kmemdup(icmpv6_sysctl_table,
341 sizeof(icmpv6_sysctl_table),
342 GFP_KERNEL);
343 if (!pn->ctl_table)
344 return -ENOMEM;
345
346 pn->ctl_table[0].data = &in->timeout;
347#endif
348 return 0;
349}
350
351static int icmpv6_init_net(struct net *net, u_int16_t proto)
352{
353 struct nf_icmp_net *in = icmpv6_pernet(net);
354 struct nf_proto_net *pn = &in->pn;
355
356 in->timeout = nf_ct_icmpv6_timeout;
357
358 return icmpv6_kmemdup_sysctl_table(pn, in);
359}
360
361static struct nf_proto_net *icmpv6_get_net_proto(struct net *net)
362{
363 return &net->ct.nf_ct_proto.icmpv6.pn;
364}
365
331struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly = 366struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
332{ 367{
333 .l3proto = PF_INET6, 368 .l3proto = PF_INET6,
@@ -355,8 +390,6 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
355 .nla_policy = icmpv6_timeout_nla_policy, 390 .nla_policy = icmpv6_timeout_nla_policy,
356 }, 391 },
357#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 392#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
358#ifdef CONFIG_SYSCTL 393 .init_net = icmpv6_init_net,
359 .ctl_table_header = &icmpv6_sysctl_header, 394 .get_net_proto = icmpv6_get_net_proto,
360 .ctl_table = icmpv6_sysctl_table,
361#endif
362}; 395};
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index 9a7978fdc02..053082dfc93 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -29,9 +29,7 @@ const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly;
29 29
30int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol) 30int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol)
31{ 31{
32 int hash = protocol & (MAX_INET_PROTOS - 1); 32 return !cmpxchg((const struct inet6_protocol **)&inet6_protos[protocol],
33
34 return !cmpxchg((const struct inet6_protocol **)&inet6_protos[hash],
35 NULL, prot) ? 0 : -1; 33 NULL, prot) ? 0 : -1;
36} 34}
37EXPORT_SYMBOL(inet6_add_protocol); 35EXPORT_SYMBOL(inet6_add_protocol);
@@ -42,9 +40,9 @@ EXPORT_SYMBOL(inet6_add_protocol);
42 40
43int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol) 41int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol)
44{ 42{
45 int ret, hash = protocol & (MAX_INET_PROTOS - 1); 43 int ret;
46 44
47 ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[hash], 45 ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[protocol],
48 prot, NULL) == prot) ? 0 : -1; 46 prot, NULL) == prot) ? 0 : -1;
49 47
50 synchronize_net(); 48 synchronize_net();
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 93d69836fde..b5c1dcb2773 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -165,7 +165,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
165 saddr = &ipv6_hdr(skb)->saddr; 165 saddr = &ipv6_hdr(skb)->saddr;
166 daddr = saddr + 1; 166 daddr = saddr + 1;
167 167
168 hash = nexthdr & (MAX_INET_PROTOS - 1); 168 hash = nexthdr & (RAW_HTABLE_SIZE - 1);
169 169
170 read_lock(&raw_v6_hashinfo.lock); 170 read_lock(&raw_v6_hashinfo.lock);
171 sk = sk_head(&raw_v6_hashinfo.ht[hash]); 171 sk = sk_head(&raw_v6_hashinfo.ht[hash]);
@@ -229,7 +229,7 @@ bool raw6_local_deliver(struct sk_buff *skb, int nexthdr)
229{ 229{
230 struct sock *raw_sk; 230 struct sock *raw_sk;
231 231
232 raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (MAX_INET_PROTOS - 1)]); 232 raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (RAW_HTABLE_SIZE - 1)]);
233 if (raw_sk && !ipv6_raw_deliver(skb, nexthdr)) 233 if (raw_sk && !ipv6_raw_deliver(skb, nexthdr))
234 raw_sk = NULL; 234 raw_sk = NULL;
235 235
@@ -328,9 +328,10 @@ static void rawv6_err(struct sock *sk, struct sk_buff *skb,
328 return; 328 return;
329 329
330 harderr = icmpv6_err_convert(type, code, &err); 330 harderr = icmpv6_err_convert(type, code, &err);
331 if (type == ICMPV6_PKT_TOOBIG) 331 if (type == ICMPV6_PKT_TOOBIG) {
332 ip6_sk_update_pmtu(skb, sk, info);
332 harderr = (np->pmtudisc == IPV6_PMTUDISC_DO); 333 harderr = (np->pmtudisc == IPV6_PMTUDISC_DO);
333 334 }
334 if (np->recverr) { 335 if (np->recverr) {
335 u8 *payload = skb->data; 336 u8 *payload = skb->data;
336 if (!inet->hdrincl) 337 if (!inet->hdrincl)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index becb048d18d..563f12c1c99 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -99,10 +99,7 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
99 if (!(rt->dst.flags & DST_HOST)) 99 if (!(rt->dst.flags & DST_HOST))
100 return NULL; 100 return NULL;
101 101
102 if (!rt->rt6i_peer) 102 peer = rt6_get_peer_create(rt);
103 rt6_bind_peer(rt, 1);
104
105 peer = rt->rt6i_peer;
106 if (peer) { 103 if (peer) {
107 u32 *old_p = __DST_METRICS_PTR(old); 104 u32 *old_p = __DST_METRICS_PTR(old);
108 unsigned long prev, new; 105 unsigned long prev, new;
@@ -123,21 +120,27 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
123 return p; 120 return p;
124} 121}
125 122
126static inline const void *choose_neigh_daddr(struct rt6_info *rt, const void *daddr) 123static inline const void *choose_neigh_daddr(struct rt6_info *rt,
124 struct sk_buff *skb,
125 const void *daddr)
127{ 126{
128 struct in6_addr *p = &rt->rt6i_gateway; 127 struct in6_addr *p = &rt->rt6i_gateway;
129 128
130 if (!ipv6_addr_any(p)) 129 if (!ipv6_addr_any(p))
131 return (const void *) p; 130 return (const void *) p;
131 else if (skb)
132 return &ipv6_hdr(skb)->daddr;
132 return daddr; 133 return daddr;
133} 134}
134 135
135static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, const void *daddr) 136static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
137 struct sk_buff *skb,
138 const void *daddr)
136{ 139{
137 struct rt6_info *rt = (struct rt6_info *) dst; 140 struct rt6_info *rt = (struct rt6_info *) dst;
138 struct neighbour *n; 141 struct neighbour *n;
139 142
140 daddr = choose_neigh_daddr(rt, daddr); 143 daddr = choose_neigh_daddr(rt, skb, daddr);
141 n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr); 144 n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr);
142 if (n) 145 if (n)
143 return n; 146 return n;
@@ -152,7 +155,7 @@ static int rt6_bind_neighbour(struct rt6_info *rt, struct net_device *dev)
152 if (IS_ERR(n)) 155 if (IS_ERR(n))
153 return PTR_ERR(n); 156 return PTR_ERR(n);
154 } 157 }
155 dst_set_neighbour(&rt->dst, n); 158 rt->n = n;
156 159
157 return 0; 160 return 0;
158} 161}
@@ -261,16 +264,19 @@ static struct rt6_info ip6_blk_hole_entry_template = {
261#endif 264#endif
262 265
263/* allocate dst with ip6_dst_ops */ 266/* allocate dst with ip6_dst_ops */
264static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops, 267static inline struct rt6_info *ip6_dst_alloc(struct net *net,
265 struct net_device *dev, 268 struct net_device *dev,
266 int flags) 269 int flags,
270 struct fib6_table *table)
267{ 271{
268 struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags); 272 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
273 0, 0, flags);
269 274
270 if (rt) 275 if (rt) {
271 memset(&rt->rt6i_table, 0, 276 memset(&rt->n, 0,
272 sizeof(*rt) - sizeof(struct dst_entry)); 277 sizeof(*rt) - sizeof(struct dst_entry));
273 278 rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
279 }
274 return rt; 280 return rt;
275} 281}
276 282
@@ -278,7 +284,9 @@ static void ip6_dst_destroy(struct dst_entry *dst)
278{ 284{
279 struct rt6_info *rt = (struct rt6_info *)dst; 285 struct rt6_info *rt = (struct rt6_info *)dst;
280 struct inet6_dev *idev = rt->rt6i_idev; 286 struct inet6_dev *idev = rt->rt6i_idev;
281 struct inet_peer *peer = rt->rt6i_peer; 287
288 if (rt->n)
289 neigh_release(rt->n);
282 290
283 if (!(rt->dst.flags & DST_HOST)) 291 if (!(rt->dst.flags & DST_HOST))
284 dst_destroy_metrics_generic(dst); 292 dst_destroy_metrics_generic(dst);
@@ -291,8 +299,8 @@ static void ip6_dst_destroy(struct dst_entry *dst)
291 if (!(rt->rt6i_flags & RTF_EXPIRES) && dst->from) 299 if (!(rt->rt6i_flags & RTF_EXPIRES) && dst->from)
292 dst_release(dst->from); 300 dst_release(dst->from);
293 301
294 if (peer) { 302 if (rt6_has_peer(rt)) {
295 rt->rt6i_peer = NULL; 303 struct inet_peer *peer = rt6_peer_ptr(rt);
296 inet_putpeer(peer); 304 inet_putpeer(peer);
297 } 305 }
298} 306}
@@ -306,13 +314,20 @@ static u32 rt6_peer_genid(void)
306 314
307void rt6_bind_peer(struct rt6_info *rt, int create) 315void rt6_bind_peer(struct rt6_info *rt, int create)
308{ 316{
317 struct inet_peer_base *base;
309 struct inet_peer *peer; 318 struct inet_peer *peer;
310 319
311 peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create); 320 base = inetpeer_base_ptr(rt->_rt6i_peer);
312 if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL) 321 if (!base)
313 inet_putpeer(peer); 322 return;
314 else 323
315 rt->rt6i_peer_genid = rt6_peer_genid(); 324 peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
325 if (peer) {
326 if (!rt6_set_peer(rt, peer))
327 inet_putpeer(peer);
328 else
329 rt->rt6i_peer_genid = rt6_peer_genid();
330 }
316} 331}
317 332
318static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, 333static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -323,12 +338,19 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
323 struct net_device *loopback_dev = 338 struct net_device *loopback_dev =
324 dev_net(dev)->loopback_dev; 339 dev_net(dev)->loopback_dev;
325 340
326 if (dev != loopback_dev && idev && idev->dev == dev) { 341 if (dev != loopback_dev) {
327 struct inet6_dev *loopback_idev = 342 if (idev && idev->dev == dev) {
328 in6_dev_get(loopback_dev); 343 struct inet6_dev *loopback_idev =
329 if (loopback_idev) { 344 in6_dev_get(loopback_dev);
330 rt->rt6i_idev = loopback_idev; 345 if (loopback_idev) {
331 in6_dev_put(idev); 346 rt->rt6i_idev = loopback_idev;
347 in6_dev_put(idev);
348 }
349 }
350 if (rt->n && rt->n->dev == dev) {
351 rt->n->dev = loopback_dev;
352 dev_hold(loopback_dev);
353 dev_put(dev);
332 } 354 }
333 } 355 }
334} 356}
@@ -418,7 +440,7 @@ static void rt6_probe(struct rt6_info *rt)
418 * to no more than one per minute. 440 * to no more than one per minute.
419 */ 441 */
420 rcu_read_lock(); 442 rcu_read_lock();
421 neigh = rt ? dst_get_neighbour_noref(&rt->dst) : NULL; 443 neigh = rt ? rt->n : NULL;
422 if (!neigh || (neigh->nud_state & NUD_VALID)) 444 if (!neigh || (neigh->nud_state & NUD_VALID))
423 goto out; 445 goto out;
424 read_lock_bh(&neigh->lock); 446 read_lock_bh(&neigh->lock);
@@ -465,7 +487,7 @@ static inline int rt6_check_neigh(struct rt6_info *rt)
465 int m; 487 int m;
466 488
467 rcu_read_lock(); 489 rcu_read_lock();
468 neigh = dst_get_neighbour_noref(&rt->dst); 490 neigh = rt->n;
469 if (rt->rt6i_flags & RTF_NONEXTHOP || 491 if (rt->rt6i_flags & RTF_NONEXTHOP ||
470 !(rt->rt6i_flags & RTF_GATEWAY)) 492 !(rt->rt6i_flags & RTF_GATEWAY))
471 m = 1; 493 m = 1;
@@ -812,7 +834,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
812 834
813 if (rt) { 835 if (rt) {
814 rt->rt6i_flags |= RTF_CACHE; 836 rt->rt6i_flags |= RTF_CACHE;
815 dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_noref_raw(&ort->dst))); 837 rt->n = neigh_clone(ort->n);
816 } 838 }
817 return rt; 839 return rt;
818} 840}
@@ -846,7 +868,7 @@ restart:
846 dst_hold(&rt->dst); 868 dst_hold(&rt->dst);
847 read_unlock_bh(&table->tb6_lock); 869 read_unlock_bh(&table->tb6_lock);
848 870
849 if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP)) 871 if (!rt->n && !(rt->rt6i_flags & RTF_NONEXTHOP))
850 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); 872 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
851 else if (!(rt->dst.flags & DST_HOST)) 873 else if (!(rt->dst.flags & DST_HOST))
852 nrt = rt6_alloc_clone(rt, &fl6->daddr); 874 nrt = rt6_alloc_clone(rt, &fl6->daddr);
@@ -931,6 +953,8 @@ struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
931{ 953{
932 int flags = 0; 954 int flags = 0;
933 955
956 fl6->flowi6_iif = net->loopback_dev->ifindex;
957
934 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr)) 958 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
935 flags |= RT6_LOOKUP_F_IFACE; 959 flags |= RT6_LOOKUP_F_IFACE;
936 960
@@ -952,6 +976,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
952 rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, 0, 0); 976 rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, 0, 0);
953 if (rt) { 977 if (rt) {
954 memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry)); 978 memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
979 rt6_init_peer(rt, net->ipv6.peers);
955 980
956 new = &rt->dst; 981 new = &rt->dst;
957 982
@@ -996,7 +1021,7 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
996 1021
997 if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) { 1022 if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) {
998 if (rt->rt6i_peer_genid != rt6_peer_genid()) { 1023 if (rt->rt6i_peer_genid != rt6_peer_genid()) {
999 if (!rt->rt6i_peer) 1024 if (!rt6_has_peer(rt))
1000 rt6_bind_peer(rt, 0); 1025 rt6_bind_peer(rt, 0);
1001 rt->rt6i_peer_genid = rt6_peer_genid(); 1026 rt->rt6i_peer_genid = rt6_peer_genid();
1002 } 1027 }
@@ -1042,7 +1067,10 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1042{ 1067{
1043 struct rt6_info *rt6 = (struct rt6_info*)dst; 1068 struct rt6_info *rt6 = (struct rt6_info*)dst;
1044 1069
1070 dst_confirm(dst);
1045 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) { 1071 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
1072 struct net *net = dev_net(dst->dev);
1073
1046 rt6->rt6i_flags |= RTF_MODIFIED; 1074 rt6->rt6i_flags |= RTF_MODIFIED;
1047 if (mtu < IPV6_MIN_MTU) { 1075 if (mtu < IPV6_MIN_MTU) {
1048 u32 features = dst_metric(dst, RTAX_FEATURES); 1076 u32 features = dst_metric(dst, RTAX_FEATURES);
@@ -1051,9 +1079,39 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1051 dst_metric_set(dst, RTAX_FEATURES, features); 1079 dst_metric_set(dst, RTAX_FEATURES, features);
1052 } 1080 }
1053 dst_metric_set(dst, RTAX_MTU, mtu); 1081 dst_metric_set(dst, RTAX_MTU, mtu);
1082 rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
1054 } 1083 }
1055} 1084}
1056 1085
1086void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1087 int oif, u32 mark)
1088{
1089 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1090 struct dst_entry *dst;
1091 struct flowi6 fl6;
1092
1093 memset(&fl6, 0, sizeof(fl6));
1094 fl6.flowi6_oif = oif;
1095 fl6.flowi6_mark = mark;
1096 fl6.flowi6_flags = 0;
1097 fl6.daddr = iph->daddr;
1098 fl6.saddr = iph->saddr;
1099 fl6.flowlabel = (*(__be32 *) iph) & IPV6_FLOWINFO_MASK;
1100
1101 dst = ip6_route_output(net, NULL, &fl6);
1102 if (!dst->error)
1103 ip6_rt_update_pmtu(dst, ntohl(mtu));
1104 dst_release(dst);
1105}
1106EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1107
1108void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1109{
1110 ip6_update_pmtu(skb, sock_net(sk), mtu,
1111 sk->sk_bound_dev_if, sk->sk_mark);
1112}
1113EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1114
1057static unsigned int ip6_default_advmss(const struct dst_entry *dst) 1115static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1058{ 1116{
1059 struct net_device *dev = dst->dev; 1117 struct net_device *dev = dst->dev;
@@ -1110,7 +1168,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1110 if (unlikely(!idev)) 1168 if (unlikely(!idev))
1111 return ERR_PTR(-ENODEV); 1169 return ERR_PTR(-ENODEV);
1112 1170
1113 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0); 1171 rt = ip6_dst_alloc(net, dev, 0, NULL);
1114 if (unlikely(!rt)) { 1172 if (unlikely(!rt)) {
1115 in6_dev_put(idev); 1173 in6_dev_put(idev);
1116 dst = ERR_PTR(-ENOMEM); 1174 dst = ERR_PTR(-ENOMEM);
@@ -1120,7 +1178,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1120 if (neigh) 1178 if (neigh)
1121 neigh_hold(neigh); 1179 neigh_hold(neigh);
1122 else { 1180 else {
1123 neigh = ip6_neigh_lookup(&rt->dst, &fl6->daddr); 1181 neigh = ip6_neigh_lookup(&rt->dst, NULL, &fl6->daddr);
1124 if (IS_ERR(neigh)) { 1182 if (IS_ERR(neigh)) {
1125 in6_dev_put(idev); 1183 in6_dev_put(idev);
1126 dst_free(&rt->dst); 1184 dst_free(&rt->dst);
@@ -1130,7 +1188,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1130 1188
1131 rt->dst.flags |= DST_HOST; 1189 rt->dst.flags |= DST_HOST;
1132 rt->dst.output = ip6_output; 1190 rt->dst.output = ip6_output;
1133 dst_set_neighbour(&rt->dst, neigh); 1191 rt->n = neigh;
1134 atomic_set(&rt->dst.__refcnt, 1); 1192 atomic_set(&rt->dst.__refcnt, 1);
1135 rt->rt6i_dst.addr = fl6->daddr; 1193 rt->rt6i_dst.addr = fl6->daddr;
1136 rt->rt6i_dst.plen = 128; 1194 rt->rt6i_dst.plen = 128;
@@ -1292,7 +1350,7 @@ int ip6_route_add(struct fib6_config *cfg)
1292 if (!table) 1350 if (!table)
1293 goto out; 1351 goto out;
1294 1352
1295 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT); 1353 rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table);
1296 1354
1297 if (!rt) { 1355 if (!rt) {
1298 err = -ENOMEM; 1356 err = -ENOMEM;
@@ -1639,6 +1697,7 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
1639 struct rt6_info *rt, *nrt = NULL; 1697 struct rt6_info *rt, *nrt = NULL;
1640 struct netevent_redirect netevent; 1698 struct netevent_redirect netevent;
1641 struct net *net = dev_net(neigh->dev); 1699 struct net *net = dev_net(neigh->dev);
1700 struct neighbour *old_neigh;
1642 1701
1643 rt = ip6_route_redirect(dest, src, saddr, neigh->dev); 1702 rt = ip6_route_redirect(dest, src, saddr, neigh->dev);
1644 1703
@@ -1666,7 +1725,8 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
1666 dst_confirm(&rt->dst); 1725 dst_confirm(&rt->dst);
1667 1726
1668 /* Duplicate redirect: silently ignore. */ 1727 /* Duplicate redirect: silently ignore. */
1669 if (neigh == dst_get_neighbour_noref_raw(&rt->dst)) 1728 old_neigh = rt->n;
1729 if (neigh == old_neigh)
1670 goto out; 1730 goto out;
1671 1731
1672 nrt = ip6_rt_copy(rt, dest); 1732 nrt = ip6_rt_copy(rt, dest);
@@ -1678,13 +1738,16 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
1678 nrt->rt6i_flags &= ~RTF_GATEWAY; 1738 nrt->rt6i_flags &= ~RTF_GATEWAY;
1679 1739
1680 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; 1740 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
1681 dst_set_neighbour(&nrt->dst, neigh_clone(neigh)); 1741 nrt->n = neigh_clone(neigh);
1682 1742
1683 if (ip6_ins_rt(nrt)) 1743 if (ip6_ins_rt(nrt))
1684 goto out; 1744 goto out;
1685 1745
1686 netevent.old = &rt->dst; 1746 netevent.old = &rt->dst;
1747 netevent.old_neigh = old_neigh;
1687 netevent.new = &nrt->dst; 1748 netevent.new = &nrt->dst;
1749 netevent.new_neigh = neigh;
1750 netevent.daddr = dest;
1688 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); 1751 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1689 1752
1690 if (rt->rt6i_flags & RTF_CACHE) { 1753 if (rt->rt6i_flags & RTF_CACHE) {
@@ -1697,116 +1760,6 @@ out:
1697} 1760}
1698 1761
1699/* 1762/*
1700 * Handle ICMP "packet too big" messages
1701 * i.e. Path MTU discovery
1702 */
1703
1704static void rt6_do_pmtu_disc(const struct in6_addr *daddr, const struct in6_addr *saddr,
1705 struct net *net, u32 pmtu, int ifindex)
1706{
1707 struct rt6_info *rt, *nrt;
1708 int allfrag = 0;
1709again:
1710 rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
1711 if (!rt)
1712 return;
1713
1714 if (rt6_check_expired(rt)) {
1715 ip6_del_rt(rt);
1716 goto again;
1717 }
1718
1719 if (pmtu >= dst_mtu(&rt->dst))
1720 goto out;
1721
1722 if (pmtu < IPV6_MIN_MTU) {
1723 /*
1724 * According to RFC2460, PMTU is set to the IPv6 Minimum Link
1725 * MTU (1280) and a fragment header should always be included
1726 * after a node receiving Too Big message reporting PMTU is
1727 * less than the IPv6 Minimum Link MTU.
1728 */
1729 pmtu = IPV6_MIN_MTU;
1730 allfrag = 1;
1731 }
1732
1733 /* New mtu received -> path was valid.
1734 They are sent only in response to data packets,
1735 so that this nexthop apparently is reachable. --ANK
1736 */
1737 dst_confirm(&rt->dst);
1738
1739 /* Host route. If it is static, it would be better
1740 not to override it, but add new one, so that
1741 when cache entry will expire old pmtu
1742 would return automatically.
1743 */
1744 if (rt->rt6i_flags & RTF_CACHE) {
1745 dst_metric_set(&rt->dst, RTAX_MTU, pmtu);
1746 if (allfrag) {
1747 u32 features = dst_metric(&rt->dst, RTAX_FEATURES);
1748 features |= RTAX_FEATURE_ALLFRAG;
1749 dst_metric_set(&rt->dst, RTAX_FEATURES, features);
1750 }
1751 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1752 rt->rt6i_flags |= RTF_MODIFIED;
1753 goto out;
1754 }
1755
1756 /* Network route.
1757 Two cases are possible:
1758 1. It is connected route. Action: COW
1759 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
1760 */
1761 if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
1762 nrt = rt6_alloc_cow(rt, daddr, saddr);
1763 else
1764 nrt = rt6_alloc_clone(rt, daddr);
1765
1766 if (nrt) {
1767 dst_metric_set(&nrt->dst, RTAX_MTU, pmtu);
1768 if (allfrag) {
1769 u32 features = dst_metric(&nrt->dst, RTAX_FEATURES);
1770 features |= RTAX_FEATURE_ALLFRAG;
1771 dst_metric_set(&nrt->dst, RTAX_FEATURES, features);
1772 }
1773
1774 /* According to RFC 1981, detecting PMTU increase shouldn't be
1775 * happened within 5 mins, the recommended timer is 10 mins.
1776 * Here this route expiration time is set to ip6_rt_mtu_expires
1777 * which is 10 mins. After 10 mins the decreased pmtu is expired
1778 * and detecting PMTU increase will be automatically happened.
1779 */
1780 rt6_update_expires(nrt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1781 nrt->rt6i_flags |= RTF_DYNAMIC;
1782 ip6_ins_rt(nrt);
1783 }
1784out:
1785 dst_release(&rt->dst);
1786}
1787
1788void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *saddr,
1789 struct net_device *dev, u32 pmtu)
1790{
1791 struct net *net = dev_net(dev);
1792
1793 /*
1794 * RFC 1981 states that a node "MUST reduce the size of the packets it
1795 * is sending along the path" that caused the Packet Too Big message.
1796 * Since it's not possible in the general case to determine which
1797 * interface was used to send the original packet, we update the MTU
1798 * on the interface that will be used to send future packets. We also
1799 * update the MTU on the interface that received the Packet Too Big in
1800 * case the original packet was forced out that interface with
1801 * SO_BINDTODEVICE or similar. This is the next best thing to the
1802 * correct behaviour, which would be to update the MTU on all
1803 * interfaces.
1804 */
1805 rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0);
1806 rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex);
1807}
1808
1809/*
1810 * Misc support functions 1763 * Misc support functions
1811 */ 1764 */
1812 1765
@@ -1814,8 +1767,8 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
1814 const struct in6_addr *dest) 1767 const struct in6_addr *dest)
1815{ 1768{
1816 struct net *net = dev_net(ort->dst.dev); 1769 struct net *net = dev_net(ort->dst.dev);
1817 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, 1770 struct rt6_info *rt = ip6_dst_alloc(net, ort->dst.dev, 0,
1818 ort->dst.dev, 0); 1771 ort->rt6i_table);
1819 1772
1820 if (rt) { 1773 if (rt) {
1821 rt->dst.input = ort->dst.input; 1774 rt->dst.input = ort->dst.input;
@@ -2099,8 +2052,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2099 bool anycast) 2052 bool anycast)
2100{ 2053{
2101 struct net *net = dev_net(idev->dev); 2054 struct net *net = dev_net(idev->dev);
2102 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, 2055 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 0, NULL);
2103 net->loopback_dev, 0);
2104 int err; 2056 int err;
2105 2057
2106 if (!rt) { 2058 if (!rt) {
@@ -2396,13 +2348,11 @@ static int rt6_fill_node(struct net *net,
2396 int iif, int type, u32 pid, u32 seq, 2348 int iif, int type, u32 pid, u32 seq,
2397 int prefix, int nowait, unsigned int flags) 2349 int prefix, int nowait, unsigned int flags)
2398{ 2350{
2399 const struct inet_peer *peer;
2400 struct rtmsg *rtm; 2351 struct rtmsg *rtm;
2401 struct nlmsghdr *nlh; 2352 struct nlmsghdr *nlh;
2402 long expires; 2353 long expires;
2403 u32 table; 2354 u32 table;
2404 struct neighbour *n; 2355 struct neighbour *n;
2405 u32 ts, tsage;
2406 2356
2407 if (prefix) { /* user wants prefix routes only */ 2357 if (prefix) { /* user wants prefix routes only */
2408 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) { 2358 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
@@ -2500,7 +2450,7 @@ static int rt6_fill_node(struct net *net,
2500 goto nla_put_failure; 2450 goto nla_put_failure;
2501 2451
2502 rcu_read_lock(); 2452 rcu_read_lock();
2503 n = dst_get_neighbour_noref(&rt->dst); 2453 n = rt->n;
2504 if (n) { 2454 if (n) {
2505 if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) { 2455 if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) {
2506 rcu_read_unlock(); 2456 rcu_read_unlock();
@@ -2521,15 +2471,7 @@ static int rt6_fill_node(struct net *net,
2521 else 2471 else
2522 expires = INT_MAX; 2472 expires = INT_MAX;
2523 2473
2524 peer = rt->rt6i_peer; 2474 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
2525 ts = tsage = 0;
2526 if (peer && peer->tcp_ts_stamp) {
2527 ts = peer->tcp_ts;
2528 tsage = get_seconds() - peer->tcp_ts_stamp;
2529 }
2530
2531 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, ts, tsage,
2532 expires, rt->dst.error) < 0)
2533 goto nla_put_failure; 2475 goto nla_put_failure;
2534 2476
2535 return nlmsg_end(skb, nlh); 2477 return nlmsg_end(skb, nlh);
@@ -2722,7 +2664,7 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2722 seq_puts(m, "00000000000000000000000000000000 00 "); 2664 seq_puts(m, "00000000000000000000000000000000 00 ");
2723#endif 2665#endif
2724 rcu_read_lock(); 2666 rcu_read_lock();
2725 n = dst_get_neighbour_noref(&rt->dst); 2667 n = rt->n;
2726 if (n) { 2668 if (n) {
2727 seq_printf(m, "%pi6", n->primary_key); 2669 seq_printf(m, "%pi6", n->primary_key);
2728 } else { 2670 } else {
@@ -3007,6 +2949,31 @@ static struct pernet_operations ip6_route_net_ops = {
3007 .exit = ip6_route_net_exit, 2949 .exit = ip6_route_net_exit,
3008}; 2950};
3009 2951
2952static int __net_init ipv6_inetpeer_init(struct net *net)
2953{
2954 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
2955
2956 if (!bp)
2957 return -ENOMEM;
2958 inet_peer_base_init(bp);
2959 net->ipv6.peers = bp;
2960 return 0;
2961}
2962
2963static void __net_exit ipv6_inetpeer_exit(struct net *net)
2964{
2965 struct inet_peer_base *bp = net->ipv6.peers;
2966
2967 net->ipv6.peers = NULL;
2968 inetpeer_invalidate_tree(bp);
2969 kfree(bp);
2970}
2971
2972static struct pernet_operations ipv6_inetpeer_ops = {
2973 .init = ipv6_inetpeer_init,
2974 .exit = ipv6_inetpeer_exit,
2975};
2976
3010static struct pernet_operations ip6_route_net_late_ops = { 2977static struct pernet_operations ip6_route_net_late_ops = {
3011 .init = ip6_route_net_init_late, 2978 .init = ip6_route_net_init_late,
3012 .exit = ip6_route_net_exit_late, 2979 .exit = ip6_route_net_exit_late,
@@ -3032,10 +2999,14 @@ int __init ip6_route_init(void)
3032 if (ret) 2999 if (ret)
3033 goto out_kmem_cache; 3000 goto out_kmem_cache;
3034 3001
3035 ret = register_pernet_subsys(&ip6_route_net_ops); 3002 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
3036 if (ret) 3003 if (ret)
3037 goto out_dst_entries; 3004 goto out_dst_entries;
3038 3005
3006 ret = register_pernet_subsys(&ip6_route_net_ops);
3007 if (ret)
3008 goto out_register_inetpeer;
3009
3039 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep; 3010 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
3040 3011
3041 /* Registering of the loopback is done before this portion of code, 3012 /* Registering of the loopback is done before this portion of code,
@@ -3088,6 +3059,8 @@ out_fib6_init:
3088 fib6_gc_cleanup(); 3059 fib6_gc_cleanup();
3089out_register_subsys: 3060out_register_subsys:
3090 unregister_pernet_subsys(&ip6_route_net_ops); 3061 unregister_pernet_subsys(&ip6_route_net_ops);
3062out_register_inetpeer:
3063 unregister_pernet_subsys(&ipv6_inetpeer_ops);
3091out_dst_entries: 3064out_dst_entries:
3092 dst_entries_destroy(&ip6_dst_blackhole_ops); 3065 dst_entries_destroy(&ip6_dst_blackhole_ops);
3093out_kmem_cache: 3066out_kmem_cache:
@@ -3102,6 +3075,7 @@ void ip6_route_cleanup(void)
3102 fib6_rules_cleanup(); 3075 fib6_rules_cleanup();
3103 xfrm6_fini(); 3076 xfrm6_fini();
3104 fib6_gc_cleanup(); 3077 fib6_gc_cleanup();
3078 unregister_pernet_subsys(&ipv6_inetpeer_ops);
3105 unregister_pernet_subsys(&ip6_route_net_ops); 3079 unregister_pernet_subsys(&ip6_route_net_ops);
3106 dst_entries_destroy(&ip6_dst_blackhole_ops); 3080 dst_entries_destroy(&ip6_dst_blackhole_ops);
3107 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); 3081 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 60415711563..49aea94c9be 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -527,9 +527,6 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
527 case ICMP_PORT_UNREACH: 527 case ICMP_PORT_UNREACH:
528 /* Impossible event. */ 528 /* Impossible event. */
529 return 0; 529 return 0;
530 case ICMP_FRAG_NEEDED:
531 /* Soft state for pmtu is maintained by IP core. */
532 return 0;
533 default: 530 default:
534 /* All others are translated to HOST_UNREACH. 531 /* All others are translated to HOST_UNREACH.
535 rfc2003 contains "deep thoughts" about NET_UNREACH, 532 rfc2003 contains "deep thoughts" about NET_UNREACH,
@@ -551,7 +548,17 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
551 skb->dev, 548 skb->dev,
552 iph->daddr, 549 iph->daddr,
553 iph->saddr); 550 iph->saddr);
554 if (t == NULL || t->parms.iph.daddr == 0) 551 if (t == NULL)
552 goto out;
553
554 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
555 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
556 t->dev->ifindex, 0, IPPROTO_IPV6, 0);
557 err = 0;
558 goto out;
559 }
560
561 if (t->parms.iph.daddr == 0)
555 goto out; 562 goto out;
556 563
557 err = 0; 564 err = 0;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 8e951d8d3b8..7bf3cc427c2 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -21,9 +21,6 @@
21#include <net/ipv6.h> 21#include <net/ipv6.h>
22#include <net/tcp.h> 22#include <net/tcp.h>
23 23
24extern int sysctl_tcp_syncookies;
25extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
26
27#define COOKIEBITS 24 /* Upper bits store count */ 24#define COOKIEBITS 24 /* Upper bits store count */
28#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) 25#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
29 26
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 9df64a50b07..61175cb2478 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -277,22 +277,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
277 rt = (struct rt6_info *) dst; 277 rt = (struct rt6_info *) dst;
278 if (tcp_death_row.sysctl_tw_recycle && 278 if (tcp_death_row.sysctl_tw_recycle &&
279 !tp->rx_opt.ts_recent_stamp && 279 !tp->rx_opt.ts_recent_stamp &&
280 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) { 280 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr))
281 struct inet_peer *peer = rt6_get_peer(rt); 281 tcp_fetch_timewait_stamp(sk, dst);
282 /*
283 * VJ's idea. We save last timestamp seen from
284 * the destination in peer table, when entering state
285 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
286 * when trying new connection.
287 */
288 if (peer) {
289 inet_peer_refcheck(peer);
290 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
291 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
292 tp->rx_opt.ts_recent = peer->tcp_ts;
293 }
294 }
295 }
296 282
297 icsk->icsk_ext_hdr_len = 0; 283 icsk->icsk_ext_hdr_len = 0;
298 if (np->opt) 284 if (np->opt)
@@ -415,6 +401,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
415 } else 401 } else
416 dst_hold(dst); 402 dst_hold(dst);
417 403
404 dst->ops->update_pmtu(dst, ntohl(info));
405
418 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { 406 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
419 tcp_sync_mss(sk, dst_mtu(dst)); 407 tcp_sync_mss(sk, dst_mtu(dst));
420 tcp_simple_retransmit(sk); 408 tcp_simple_retransmit(sk);
@@ -475,62 +463,43 @@ out:
475} 463}
476 464
477 465
478static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, 466static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
467 struct flowi6 *fl6,
468 struct request_sock *req,
479 struct request_values *rvp, 469 struct request_values *rvp,
480 u16 queue_mapping) 470 u16 queue_mapping)
481{ 471{
482 struct inet6_request_sock *treq = inet6_rsk(req); 472 struct inet6_request_sock *treq = inet6_rsk(req);
483 struct ipv6_pinfo *np = inet6_sk(sk); 473 struct ipv6_pinfo *np = inet6_sk(sk);
484 struct sk_buff * skb; 474 struct sk_buff * skb;
485 struct ipv6_txoptions *opt = NULL; 475 int err = -ENOMEM;
486 struct in6_addr * final_p, final;
487 struct flowi6 fl6;
488 struct dst_entry *dst;
489 int err;
490
491 memset(&fl6, 0, sizeof(fl6));
492 fl6.flowi6_proto = IPPROTO_TCP;
493 fl6.daddr = treq->rmt_addr;
494 fl6.saddr = treq->loc_addr;
495 fl6.flowlabel = 0;
496 fl6.flowi6_oif = treq->iif;
497 fl6.flowi6_mark = sk->sk_mark;
498 fl6.fl6_dport = inet_rsk(req)->rmt_port;
499 fl6.fl6_sport = inet_rsk(req)->loc_port;
500 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
501 476
502 opt = np->opt; 477 /* First, grab a route. */
503 final_p = fl6_update_dst(&fl6, opt, &final); 478 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
504
505 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
506 if (IS_ERR(dst)) {
507 err = PTR_ERR(dst);
508 dst = NULL;
509 goto done; 479 goto done;
510 } 480
511 skb = tcp_make_synack(sk, dst, req, rvp); 481 skb = tcp_make_synack(sk, dst, req, rvp);
512 err = -ENOMEM; 482
513 if (skb) { 483 if (skb) {
514 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); 484 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
515 485
516 fl6.daddr = treq->rmt_addr; 486 fl6->daddr = treq->rmt_addr;
517 skb_set_queue_mapping(skb, queue_mapping); 487 skb_set_queue_mapping(skb, queue_mapping);
518 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); 488 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
519 err = net_xmit_eval(err); 489 err = net_xmit_eval(err);
520 } 490 }
521 491
522done: 492done:
523 if (opt && opt != np->opt)
524 sock_kfree_s(sk, opt, opt->tot_len);
525 dst_release(dst);
526 return err; 493 return err;
527} 494}
528 495
529static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req, 496static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
530 struct request_values *rvp) 497 struct request_values *rvp)
531{ 498{
499 struct flowi6 fl6;
500
532 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 501 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
533 return tcp_v6_send_synack(sk, req, rvp, 0); 502 return tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0);
534} 503}
535 504
536static void tcp_v6_reqsk_destructor(struct request_sock *req) 505static void tcp_v6_reqsk_destructor(struct request_sock *req)
@@ -1057,6 +1026,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1057 struct tcp_sock *tp = tcp_sk(sk); 1026 struct tcp_sock *tp = tcp_sk(sk);
1058 __u32 isn = TCP_SKB_CB(skb)->when; 1027 __u32 isn = TCP_SKB_CB(skb)->when;
1059 struct dst_entry *dst = NULL; 1028 struct dst_entry *dst = NULL;
1029 struct flowi6 fl6;
1060 bool want_cookie = false; 1030 bool want_cookie = false;
1061 1031
1062 if (skb->protocol == htons(ETH_P_IP)) 1032 if (skb->protocol == htons(ETH_P_IP))
@@ -1150,8 +1120,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1150 treq->iif = inet6_iif(skb); 1120 treq->iif = inet6_iif(skb);
1151 1121
1152 if (!isn) { 1122 if (!isn) {
1153 struct inet_peer *peer = NULL;
1154
1155 if (ipv6_opt_accepted(sk, skb) || 1123 if (ipv6_opt_accepted(sk, skb) ||
1156 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || 1124 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1157 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { 1125 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
@@ -1176,14 +1144,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1176 */ 1144 */
1177 if (tmp_opt.saw_tstamp && 1145 if (tmp_opt.saw_tstamp &&
1178 tcp_death_row.sysctl_tw_recycle && 1146 tcp_death_row.sysctl_tw_recycle &&
1179 (dst = inet6_csk_route_req(sk, req)) != NULL && 1147 (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) {
1180 (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL && 1148 if (!tcp_peer_is_proven(req, dst, true)) {
1181 ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
1182 &treq->rmt_addr)) {
1183 inet_peer_refcheck(peer);
1184 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1185 (s32)(peer->tcp_ts - req->ts_recent) >
1186 TCP_PAWS_WINDOW) {
1187 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); 1149 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1188 goto drop_and_release; 1150 goto drop_and_release;
1189 } 1151 }
@@ -1192,8 +1154,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1192 else if (!sysctl_tcp_syncookies && 1154 else if (!sysctl_tcp_syncookies &&
1193 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < 1155 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1194 (sysctl_max_syn_backlog >> 2)) && 1156 (sysctl_max_syn_backlog >> 2)) &&
1195 (!peer || !peer->tcp_ts_stamp) && 1157 !tcp_peer_is_proven(req, dst, false)) {
1196 (!dst || !dst_metric(dst, RTAX_RTT))) {
1197 /* Without syncookies last quarter of 1158 /* Without syncookies last quarter of
1198 * backlog is filled with destinations, 1159 * backlog is filled with destinations,
1199 * proven to be alive. 1160 * proven to be alive.
@@ -1215,7 +1176,7 @@ have_isn:
1215 if (security_inet_conn_request(sk, skb, req)) 1176 if (security_inet_conn_request(sk, skb, req))
1216 goto drop_and_release; 1177 goto drop_and_release;
1217 1178
1218 if (tcp_v6_send_synack(sk, req, 1179 if (tcp_v6_send_synack(sk, dst, &fl6, req,
1219 (struct request_values *)&tmp_ext, 1180 (struct request_values *)&tmp_ext,
1220 skb_get_queue_mapping(skb)) || 1181 skb_get_queue_mapping(skb)) ||
1221 want_cookie) 1182 want_cookie)
@@ -1242,10 +1203,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1242 struct inet_sock *newinet; 1203 struct inet_sock *newinet;
1243 struct tcp_sock *newtp; 1204 struct tcp_sock *newtp;
1244 struct sock *newsk; 1205 struct sock *newsk;
1245 struct ipv6_txoptions *opt;
1246#ifdef CONFIG_TCP_MD5SIG 1206#ifdef CONFIG_TCP_MD5SIG
1247 struct tcp_md5sig_key *key; 1207 struct tcp_md5sig_key *key;
1248#endif 1208#endif
1209 struct flowi6 fl6;
1249 1210
1250 if (skb->protocol == htons(ETH_P_IP)) { 1211 if (skb->protocol == htons(ETH_P_IP)) {
1251 /* 1212 /*
@@ -1302,13 +1263,12 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1302 } 1263 }
1303 1264
1304 treq = inet6_rsk(req); 1265 treq = inet6_rsk(req);
1305 opt = np->opt;
1306 1266
1307 if (sk_acceptq_is_full(sk)) 1267 if (sk_acceptq_is_full(sk))
1308 goto out_overflow; 1268 goto out_overflow;
1309 1269
1310 if (!dst) { 1270 if (!dst) {
1311 dst = inet6_csk_route_req(sk, req); 1271 dst = inet6_csk_route_req(sk, &fl6, req);
1312 if (!dst) 1272 if (!dst)
1313 goto out; 1273 goto out;
1314 } 1274 }
@@ -1371,11 +1331,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1371 but we make one more one thing there: reattach optmem 1331 but we make one more one thing there: reattach optmem
1372 to newsk. 1332 to newsk.
1373 */ 1333 */
1374 if (opt) { 1334 if (np->opt)
1375 newnp->opt = ipv6_dup_options(newsk, opt); 1335 newnp->opt = ipv6_dup_options(newsk, np->opt);
1376 if (opt != np->opt)
1377 sock_kfree_s(sk, opt, opt->tot_len);
1378 }
1379 1336
1380 inet_csk(newsk)->icsk_ext_hdr_len = 0; 1337 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1381 if (newnp->opt) 1338 if (newnp->opt)
@@ -1422,8 +1379,6 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1422out_overflow: 1379out_overflow:
1423 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 1380 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1424out_nonewsk: 1381out_nonewsk:
1425 if (opt && opt != np->opt)
1426 sock_kfree_s(sk, opt, opt->tot_len);
1427 dst_release(dst); 1382 dst_release(dst);
1428out: 1383out:
1429 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1384 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
@@ -1734,42 +1689,10 @@ do_time_wait:
1734 goto discard_it; 1689 goto discard_it;
1735} 1690}
1736 1691
1737static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1738{
1739 struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1740 struct ipv6_pinfo *np = inet6_sk(sk);
1741 struct inet_peer *peer;
1742
1743 if (!rt ||
1744 !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1745 peer = inet_getpeer_v6(&np->daddr, 1);
1746 *release_it = true;
1747 } else {
1748 if (!rt->rt6i_peer)
1749 rt6_bind_peer(rt, 1);
1750 peer = rt->rt6i_peer;
1751 *release_it = false;
1752 }
1753
1754 return peer;
1755}
1756
1757static void *tcp_v6_tw_get_peer(struct sock *sk)
1758{
1759 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1760 const struct inet_timewait_sock *tw = inet_twsk(sk);
1761
1762 if (tw->tw_family == AF_INET)
1763 return tcp_v4_tw_get_peer(sk);
1764
1765 return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
1766}
1767
1768static struct timewait_sock_ops tcp6_timewait_sock_ops = { 1692static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1769 .twsk_obj_size = sizeof(struct tcp6_timewait_sock), 1693 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1770 .twsk_unique = tcp_twsk_unique, 1694 .twsk_unique = tcp_twsk_unique,
1771 .twsk_destructor= tcp_twsk_destructor, 1695 .twsk_destructor= tcp_twsk_destructor,
1772 .twsk_getpeer = tcp_v6_tw_get_peer,
1773}; 1696};
1774 1697
1775static const struct inet_connection_sock_af_ops ipv6_specific = { 1698static const struct inet_connection_sock_af_ops ipv6_specific = {
@@ -1778,7 +1701,6 @@ static const struct inet_connection_sock_af_ops ipv6_specific = {
1778 .rebuild_header = inet6_sk_rebuild_header, 1701 .rebuild_header = inet6_sk_rebuild_header,
1779 .conn_request = tcp_v6_conn_request, 1702 .conn_request = tcp_v6_conn_request,
1780 .syn_recv_sock = tcp_v6_syn_recv_sock, 1703 .syn_recv_sock = tcp_v6_syn_recv_sock,
1781 .get_peer = tcp_v6_get_peer,
1782 .net_header_len = sizeof(struct ipv6hdr), 1704 .net_header_len = sizeof(struct ipv6hdr),
1783 .net_frag_header_len = sizeof(struct frag_hdr), 1705 .net_frag_header_len = sizeof(struct frag_hdr),
1784 .setsockopt = ipv6_setsockopt, 1706 .setsockopt = ipv6_setsockopt,
@@ -1810,7 +1732,6 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = {
1810 .rebuild_header = inet_sk_rebuild_header, 1732 .rebuild_header = inet_sk_rebuild_header,
1811 .conn_request = tcp_v6_conn_request, 1733 .conn_request = tcp_v6_conn_request,
1812 .syn_recv_sock = tcp_v6_syn_recv_sock, 1734 .syn_recv_sock = tcp_v6_syn_recv_sock,
1813 .get_peer = tcp_v4_get_peer,
1814 .net_header_len = sizeof(struct iphdr), 1735 .net_header_len = sizeof(struct iphdr),
1815 .setsockopt = ipv6_setsockopt, 1736 .setsockopt = ipv6_setsockopt,
1816 .getsockopt = ipv6_getsockopt, 1737 .getsockopt = ipv6_getsockopt,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index f05099fc590..1ecd1024948 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -48,6 +48,7 @@
48 48
49#include <linux/proc_fs.h> 49#include <linux/proc_fs.h>
50#include <linux/seq_file.h> 50#include <linux/seq_file.h>
51#include <trace/events/skb.h>
51#include "udp_impl.h" 52#include "udp_impl.h"
52 53
53int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) 54int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
@@ -385,15 +386,16 @@ try_again:
385 386
386 if (skb_csum_unnecessary(skb)) 387 if (skb_csum_unnecessary(skb))
387 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), 388 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
388 msg->msg_iov, copied ); 389 msg->msg_iov, copied);
389 else { 390 else {
390 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); 391 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
391 if (err == -EINVAL) 392 if (err == -EINVAL)
392 goto csum_copy_err; 393 goto csum_copy_err;
393 } 394 }
394 if (err) 395 if (unlikely(err)) {
396 trace_kfree_skb(skb, udpv6_recvmsg);
395 goto out_free; 397 goto out_free;
396 398 }
397 if (!peeked) { 399 if (!peeked) {
398 if (is_udp4) 400 if (is_udp4)
399 UDP_INC_STATS_USER(sock_net(sk), 401 UDP_INC_STATS_USER(sock_net(sk),
@@ -479,6 +481,9 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
479 if (sk == NULL) 481 if (sk == NULL)
480 return; 482 return;
481 483
484 if (type == ICMPV6_PKT_TOOBIG)
485 ip6_sk_update_pmtu(skb, sk, info);
486
482 np = inet6_sk(sk); 487 np = inet6_sk(sk);
483 488
484 if (!icmpv6_err_convert(type, code, &err) && !np->recverr) 489 if (!icmpv6_err_convert(type, code, &err) && !np->recverr)
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 8625fba96db..bb02038b822 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -99,12 +99,11 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
99 if (!xdst->u.rt6.rt6i_idev) 99 if (!xdst->u.rt6.rt6i_idev)
100 return -ENODEV; 100 return -ENODEV;
101 101
102 xdst->u.rt6.rt6i_peer = rt->rt6i_peer; 102 rt6_transfer_peer(&xdst->u.rt6, rt);
103 if (rt->rt6i_peer)
104 atomic_inc(&rt->rt6i_peer->refcnt);
105 103
106 /* Sheit... I remember I did this right. Apparently, 104 /* Sheit... I remember I did this right. Apparently,
107 * it was magically lost, so this code needs audit */ 105 * it was magically lost, so this code needs audit */
106 xdst->u.rt6.n = neigh_clone(rt->n);
108 xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST | 107 xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST |
109 RTF_LOCAL); 108 RTF_LOCAL);
110 xdst->u.rt6.rt6i_metric = rt->rt6i_metric; 109 xdst->u.rt6.rt6i_metric = rt->rt6i_metric;
@@ -223,8 +222,10 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
223 if (likely(xdst->u.rt6.rt6i_idev)) 222 if (likely(xdst->u.rt6.rt6i_idev))
224 in6_dev_put(xdst->u.rt6.rt6i_idev); 223 in6_dev_put(xdst->u.rt6.rt6i_idev);
225 dst_destroy_metrics_generic(dst); 224 dst_destroy_metrics_generic(dst);
226 if (likely(xdst->u.rt6.rt6i_peer)) 225 if (rt6_has_peer(&xdst->u.rt6)) {
227 inet_putpeer(xdst->u.rt6.rt6i_peer); 226 struct inet_peer *peer = rt6_peer_ptr(&xdst->u.rt6);
227 inet_putpeer(peer);
228 }
228 xfrm_dst_destroy(xdst); 229 xfrm_dst_destroy(xdst);
229} 230}
230 231
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
index f06947c4fa8..7152624ed5f 100644
--- a/net/irda/irqueue.c
+++ b/net/irda/irqueue.c
@@ -523,7 +523,7 @@ void *hashbin_remove_first( hashbin_t *hashbin)
523 * Dequeue the entry... 523 * Dequeue the entry...
524 */ 524 */
525 dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], 525 dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
526 (irda_queue_t*) entry ); 526 entry);
527 hashbin->hb_size--; 527 hashbin->hb_size--;
528 entry->q_next = NULL; 528 entry->q_next = NULL;
529 entry->q_prev = NULL; 529 entry->q_prev = NULL;
@@ -615,7 +615,7 @@ void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name)
615 */ 615 */
616 if ( found ) { 616 if ( found ) {
617 dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], 617 dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
618 (irda_queue_t*) entry ); 618 entry);
619 hashbin->hb_size--; 619 hashbin->hb_size--;
620 620
621 /* 621 /*
@@ -685,7 +685,7 @@ void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry)
685 * Dequeue the entry... 685 * Dequeue the entry...
686 */ 686 */
687 dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], 687 dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
688 (irda_queue_t*) entry ); 688 entry);
689 hashbin->hb_size--; 689 hashbin->hb_size--;
690 entry->q_next = NULL; 690 entry->q_next = NULL;
691 entry->q_prev = NULL; 691 entry->q_prev = NULL;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 32b2155e7ab..393355d37b4 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1128,6 +1128,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1128 int headroom; 1128 int headroom;
1129 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; 1129 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1130 int udp_len; 1130 int udp_len;
1131 int ret = NET_XMIT_SUCCESS;
1131 1132
1132 /* Check that there's enough headroom in the skb to insert IP, 1133 /* Check that there's enough headroom in the skb to insert IP,
1133 * UDP and L2TP headers. If not enough, expand it to 1134 * UDP and L2TP headers. If not enough, expand it to
@@ -1137,8 +1138,8 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1137 uhlen + hdr_len; 1138 uhlen + hdr_len;
1138 old_headroom = skb_headroom(skb); 1139 old_headroom = skb_headroom(skb);
1139 if (skb_cow_head(skb, headroom)) { 1140 if (skb_cow_head(skb, headroom)) {
1140 dev_kfree_skb(skb); 1141 kfree_skb(skb);
1141 goto abort; 1142 return NET_XMIT_DROP;
1142 } 1143 }
1143 1144
1144 new_headroom = skb_headroom(skb); 1145 new_headroom = skb_headroom(skb);
@@ -1156,7 +1157,8 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1156 1157
1157 bh_lock_sock(sk); 1158 bh_lock_sock(sk);
1158 if (sock_owned_by_user(sk)) { 1159 if (sock_owned_by_user(sk)) {
1159 dev_kfree_skb(skb); 1160 kfree_skb(skb);
1161 ret = NET_XMIT_DROP;
1160 goto out_unlock; 1162 goto out_unlock;
1161 } 1163 }
1162 1164
@@ -1215,8 +1217,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1215out_unlock: 1217out_unlock:
1216 bh_unlock_sock(sk); 1218 bh_unlock_sock(sk);
1217 1219
1218abort: 1220 return ret;
1219 return 0;
1220} 1221}
1221EXPORT_SYMBOL_GPL(l2tp_xmit_skb); 1222EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1222 1223
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 47b259fccd2..f9ee74deeac 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -44,6 +44,7 @@ struct l2tp_eth {
44 struct list_head list; 44 struct list_head list;
45 atomic_long_t tx_bytes; 45 atomic_long_t tx_bytes;
46 atomic_long_t tx_packets; 46 atomic_long_t tx_packets;
47 atomic_long_t tx_dropped;
47 atomic_long_t rx_bytes; 48 atomic_long_t rx_bytes;
48 atomic_long_t rx_packets; 49 atomic_long_t rx_packets;
49 atomic_long_t rx_errors; 50 atomic_long_t rx_errors;
@@ -92,12 +93,15 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
92{ 93{
93 struct l2tp_eth *priv = netdev_priv(dev); 94 struct l2tp_eth *priv = netdev_priv(dev);
94 struct l2tp_session *session = priv->session; 95 struct l2tp_session *session = priv->session;
96 unsigned int len = skb->len;
97 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
95 98
96 atomic_long_add(skb->len, &priv->tx_bytes); 99 if (likely(ret == NET_XMIT_SUCCESS)) {
97 atomic_long_inc(&priv->tx_packets); 100 atomic_long_add(len, &priv->tx_bytes);
98 101 atomic_long_inc(&priv->tx_packets);
99 l2tp_xmit_skb(session, skb, session->hdr_len); 102 } else {
100 103 atomic_long_inc(&priv->tx_dropped);
104 }
101 return NETDEV_TX_OK; 105 return NETDEV_TX_OK;
102} 106}
103 107
@@ -108,6 +112,7 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
108 112
109 stats->tx_bytes = atomic_long_read(&priv->tx_bytes); 113 stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
110 stats->tx_packets = atomic_long_read(&priv->tx_packets); 114 stats->tx_packets = atomic_long_read(&priv->tx_packets);
115 stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
111 stats->rx_bytes = atomic_long_read(&priv->rx_bytes); 116 stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
112 stats->rx_packets = atomic_long_read(&priv->rx_packets); 117 stats->rx_packets = atomic_long_read(&priv->rx_packets);
113 stats->rx_errors = atomic_long_read(&priv->rx_errors); 118 stats->rx_errors = atomic_long_read(&priv->rx_errors);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index ddc553e7667..d71cd9229a4 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -72,7 +72,7 @@ static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
72 void *hdr; 72 void *hdr;
73 int ret = -ENOBUFS; 73 int ret = -ENOBUFS;
74 74
75 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 75 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
76 if (!msg) { 76 if (!msg) {
77 ret = -ENOMEM; 77 ret = -ENOMEM;
78 goto out; 78 goto out;
@@ -353,7 +353,7 @@ static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
353 goto out; 353 goto out;
354 } 354 }
355 355
356 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 356 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
357 if (!msg) { 357 if (!msg) {
358 ret = -ENOMEM; 358 ret = -ENOMEM;
359 goto out; 359 goto out;
@@ -699,7 +699,7 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
699 goto out; 699 goto out;
700 } 700 }
701 701
702 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 702 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
703 if (!msg) { 703 if (!msg) {
704 ret = -ENOMEM; 704 ret = -ENOMEM;
705 goto out; 705 goto out;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 8ef6b9416cb..286366ef893 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1522,8 +1522,8 @@ static int pppol2tp_session_getsockopt(struct sock *sk,
1522 * handler, according to whether the PPPoX socket is a for a regular session 1522 * handler, according to whether the PPPoX socket is a for a regular session
1523 * or the special tunnel type. 1523 * or the special tunnel type.
1524 */ 1524 */
1525static int pppol2tp_getsockopt(struct socket *sock, int level, 1525static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
1526 int optname, char __user *optval, int __user *optlen) 1526 char __user *optval, int __user *optlen)
1527{ 1527{
1528 struct sock *sk = sock->sk; 1528 struct sock *sk = sock->sk;
1529 struct l2tp_session *session; 1529 struct l2tp_session *session;
@@ -1535,7 +1535,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level,
1535 if (level != SOL_PPPOL2TP) 1535 if (level != SOL_PPPOL2TP)
1536 return udp_prot.getsockopt(sk, level, optname, optval, optlen); 1536 return udp_prot.getsockopt(sk, level, optname, optval, optlen);
1537 1537
1538 if (get_user(len, (int __user *) optlen)) 1538 if (get_user(len, optlen))
1539 return -EFAULT; 1539 return -EFAULT;
1540 1540
1541 len = min_t(unsigned int, len, sizeof(int)); 1541 len = min_t(unsigned int, len, sizeof(int));
@@ -1568,7 +1568,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level,
1568 err = pppol2tp_session_getsockopt(sk, session, optname, &val); 1568 err = pppol2tp_session_getsockopt(sk, session, optname, &val);
1569 1569
1570 err = -EFAULT; 1570 err = -EFAULT;
1571 if (put_user(len, (int __user *) optlen)) 1571 if (put_user(len, optlen))
1572 goto end_put_sess; 1572 goto end_put_sess;
1573 1573
1574 if (copy_to_user((void __user *) optval, &val, len)) 1574 if (copy_to_user((void __user *) optval, &val, len))
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index fe5453c3e71..f6fe4d40050 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -1024,7 +1024,7 @@ static int llc_ui_ioctl(struct socket *sock, unsigned int cmd,
1024 * @sock: Socket to set options on. 1024 * @sock: Socket to set options on.
1025 * @level: Socket level user is requesting operations on. 1025 * @level: Socket level user is requesting operations on.
1026 * @optname: Operation name. 1026 * @optname: Operation name.
1027 * @optval User provided operation data. 1027 * @optval: User provided operation data.
1028 * @optlen: Length of optval. 1028 * @optlen: Length of optval.
1029 * 1029 *
1030 * Set various connection specific parameters. 1030 * Set various connection specific parameters.
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index cf4aea3ba30..39a8d8924b9 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -30,12 +30,12 @@
30 * 30 *
31 * SAP and connection resource manager, one per adapter. 31 * SAP and connection resource manager, one per adapter.
32 * 32 *
33 * @state - state of station 33 * @state: state of station
34 * @xid_r_count - XID response PDU counter 34 * @xid_r_count: XID response PDU counter
35 * @mac_sa - MAC source address 35 * @mac_sa: MAC source address
36 * @sap_list - list of related SAPs 36 * @sap_list: list of related SAPs
37 * @ev_q - events entering state mach. 37 * @ev_q: events entering state mach.
38 * @mac_pdu_q - PDUs ready to send to MAC 38 * @mac_pdu_q: PDUs ready to send to MAC
39 */ 39 */
40struct llc_station { 40struct llc_station {
41 u8 state; 41 u8 state;
@@ -646,7 +646,7 @@ static void llc_station_service_events(void)
646} 646}
647 647
648/** 648/**
649 * llc_station_state_process: queue event and try to process queue. 649 * llc_station_state_process - queue event and try to process queue.
650 * @skb: Address of the event 650 * @skb: Address of the event
651 * 651 *
652 * Queues an event (on the station event queue) for handling by the 652 * Queues an event (on the station event queue) for handling by the
@@ -672,7 +672,7 @@ static void llc_station_ack_tmr_cb(unsigned long timeout_data)
672 } 672 }
673} 673}
674 674
675/* 675/**
676 * llc_station_rcv - send received pdu to the station state machine 676 * llc_station_rcv - send received pdu to the station state machine
677 * @skb: received frame. 677 * @skb: received frame.
678 * 678 *
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 8d249d70598..63af25458fd 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -107,6 +107,19 @@ config MAC80211_DEBUGFS
107 107
108 Say N unless you know you need this. 108 Say N unless you know you need this.
109 109
110config MAC80211_MESSAGE_TRACING
111 bool "Trace all mac80211 debug messages"
112 depends on MAC80211
113 ---help---
114 Select this option to have mac80211 register the
115 mac80211_msg trace subsystem with tracepoints to
116 collect all debugging messages, independent of
117 printing them into the kernel log.
118
119 The overhead in this option is that all the messages
120 need to be present in the binary and formatted at
121 runtime for tracing.
122
110menuconfig MAC80211_DEBUG_MENU 123menuconfig MAC80211_DEBUG_MENU
111 bool "Select mac80211 debugging features" 124 bool "Select mac80211 debugging features"
112 depends on MAC80211 125 depends on MAC80211
@@ -140,26 +153,35 @@ config MAC80211_VERBOSE_DEBUG
140 153
141 Do not select this option. 154 Do not select this option.
142 155
143config MAC80211_HT_DEBUG 156config MAC80211_MLME_DEBUG
144 bool "Verbose HT debugging" 157 bool "Verbose managed MLME output"
145 depends on MAC80211_DEBUG_MENU 158 depends on MAC80211_DEBUG_MENU
146 ---help--- 159 ---help---
147 This option enables 802.11n High Throughput features 160 Selecting this option causes mac80211 to print out
148 debug tracing output. 161 debugging messages for the managed-mode MLME. It
149 162 should not be selected on production systems as some
150 It should not be selected on production systems as some
151 of the messages are remotely triggerable. 163 of the messages are remotely triggerable.
152 164
153 Do not select this option. 165 Do not select this option.
154 166
155config MAC80211_TKIP_DEBUG 167config MAC80211_STA_DEBUG
156 bool "Verbose TKIP debugging" 168 bool "Verbose station debugging"
157 depends on MAC80211_DEBUG_MENU 169 depends on MAC80211_DEBUG_MENU
158 ---help--- 170 ---help---
159 Selecting this option causes mac80211 to print out 171 Selecting this option causes mac80211 to print out
160 very verbose TKIP debugging messages. It should not 172 debugging messages for station addition/removal.
161 be selected on production systems as those messages 173
162 are remotely triggerable. 174 Do not select this option.
175
176config MAC80211_HT_DEBUG
177 bool "Verbose HT debugging"
178 depends on MAC80211_DEBUG_MENU
179 ---help---
180 This option enables 802.11n High Throughput features
181 debug tracing output.
182
183 It should not be selected on production systems as some
184 of the messages are remotely triggerable.
163 185
164 Do not select this option. 186 Do not select this option.
165 187
@@ -174,7 +196,7 @@ config MAC80211_IBSS_DEBUG
174 196
175 Do not select this option. 197 Do not select this option.
176 198
177config MAC80211_VERBOSE_PS_DEBUG 199config MAC80211_PS_DEBUG
178 bool "Verbose powersave mode debugging" 200 bool "Verbose powersave mode debugging"
179 depends on MAC80211_DEBUG_MENU 201 depends on MAC80211_DEBUG_MENU
180 ---help--- 202 ---help---
@@ -186,7 +208,7 @@ config MAC80211_VERBOSE_PS_DEBUG
186 208
187 Do not select this option. 209 Do not select this option.
188 210
189config MAC80211_VERBOSE_MPL_DEBUG 211config MAC80211_MPL_DEBUG
190 bool "Verbose mesh peer link debugging" 212 bool "Verbose mesh peer link debugging"
191 depends on MAC80211_DEBUG_MENU 213 depends on MAC80211_DEBUG_MENU
192 depends on MAC80211_MESH 214 depends on MAC80211_MESH
@@ -199,7 +221,7 @@ config MAC80211_VERBOSE_MPL_DEBUG
199 221
200 Do not select this option. 222 Do not select this option.
201 223
202config MAC80211_VERBOSE_MPATH_DEBUG 224config MAC80211_MPATH_DEBUG
203 bool "Verbose mesh path debugging" 225 bool "Verbose mesh path debugging"
204 depends on MAC80211_DEBUG_MENU 226 depends on MAC80211_DEBUG_MENU
205 depends on MAC80211_MESH 227 depends on MAC80211_MESH
@@ -212,7 +234,7 @@ config MAC80211_VERBOSE_MPATH_DEBUG
212 234
213 Do not select this option. 235 Do not select this option.
214 236
215config MAC80211_VERBOSE_MHWMP_DEBUG 237config MAC80211_MHWMP_DEBUG
216 bool "Verbose mesh HWMP routing debugging" 238 bool "Verbose mesh HWMP routing debugging"
217 depends on MAC80211_DEBUG_MENU 239 depends on MAC80211_DEBUG_MENU
218 depends on MAC80211_MESH 240 depends on MAC80211_MESH
@@ -225,7 +247,7 @@ config MAC80211_VERBOSE_MHWMP_DEBUG
225 247
226 Do not select this option. 248 Do not select this option.
227 249
228config MAC80211_VERBOSE_MESH_SYNC_DEBUG 250config MAC80211_MESH_SYNC_DEBUG
229 bool "Verbose mesh mesh synchronization debugging" 251 bool "Verbose mesh mesh synchronization debugging"
230 depends on MAC80211_DEBUG_MENU 252 depends on MAC80211_DEBUG_MENU
231 depends on MAC80211_MESH 253 depends on MAC80211_MESH
@@ -236,7 +258,7 @@ config MAC80211_VERBOSE_MESH_SYNC_DEBUG
236 258
237 Do not select this option. 259 Do not select this option.
238 260
239config MAC80211_VERBOSE_TDLS_DEBUG 261config MAC80211_TDLS_DEBUG
240 bool "Verbose TDLS debugging" 262 bool "Verbose TDLS debugging"
241 depends on MAC80211_DEBUG_MENU 263 depends on MAC80211_DEBUG_MENU
242 ---help--- 264 ---help---
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 3e9d931bba3..a7dd110faaf 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -9,7 +9,6 @@ mac80211-y := \
9 scan.o offchannel.o \ 9 scan.o offchannel.o \
10 ht.o agg-tx.o agg-rx.o \ 10 ht.o agg-tx.o agg-rx.o \
11 ibss.o \ 11 ibss.o \
12 work.o \
13 iface.o \ 12 iface.o \
14 rate.o \ 13 rate.o \
15 michael.o \ 14 michael.o \
@@ -25,7 +24,7 @@ mac80211-y := \
25 wme.o \ 24 wme.o \
26 event.o \ 25 event.o \
27 chan.o \ 26 chan.o \
28 driver-trace.o mlme.o 27 trace.o mlme.o
29 28
30mac80211-$(CONFIG_MAC80211_LEDS) += led.o 29mac80211-$(CONFIG_MAC80211_LEDS) += led.o
31mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ 30mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
@@ -43,7 +42,7 @@ mac80211-$(CONFIG_MAC80211_MESH) += \
43 42
44mac80211-$(CONFIG_PM) += pm.o 43mac80211-$(CONFIG_PM) += pm.o
45 44
46CFLAGS_driver-trace.o := -I$(src) 45CFLAGS_trace.o := -I$(src)
47 46
48# objects for PID algorithm 47# objects for PID algorithm
49rc80211_pid-y := rc80211_pid_algo.o 48rc80211_pid-y := rc80211_pid_algo.o
@@ -59,4 +58,4 @@ mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc80211_pid-y)
59mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y) 58mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y)
60mac80211-$(CONFIG_MAC80211_RC_MINSTREL_HT) += $(rc80211_minstrel_ht-y) 59mac80211-$(CONFIG_MAC80211_RC_MINSTREL_HT) += $(rc80211_minstrel_ht-y)
61 60
62ccflags-y += -D__CHECK_ENDIAN__ 61ccflags-y += -D__CHECK_ENDIAN__ -DDEBUG
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index c649188314c..186d9919b04 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -74,18 +74,17 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
74 74
75 RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL); 75 RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL);
76 76
77#ifdef CONFIG_MAC80211_HT_DEBUG 77 ht_dbg(sta->sdata,
78 printk(KERN_DEBUG
79 "Rx BA session stop requested for %pM tid %u %s reason: %d\n", 78 "Rx BA session stop requested for %pM tid %u %s reason: %d\n",
80 sta->sta.addr, tid, 79 sta->sta.addr, tid,
81 initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator", 80 initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator",
82 (int)reason); 81 (int)reason);
83#endif /* CONFIG_MAC80211_HT_DEBUG */
84 82
85 if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP, 83 if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
86 &sta->sta, tid, NULL, 0)) 84 &sta->sta, tid, NULL, 0))
87 printk(KERN_DEBUG "HW problem - can not stop rx " 85 sdata_info(sta->sdata,
88 "aggregation for tid %d\n", tid); 86 "HW problem - can not stop rx aggregation for tid %d\n",
87 tid);
89 88
90 /* check if this is a self generated aggregation halt */ 89 /* check if this is a self generated aggregation halt */
91 if (initiator == WLAN_BACK_RECIPIENT && tx) 90 if (initiator == WLAN_BACK_RECIPIENT && tx)
@@ -160,9 +159,8 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
160 } 159 }
161 rcu_read_unlock(); 160 rcu_read_unlock();
162 161
163#ifdef CONFIG_MAC80211_HT_DEBUG 162 ht_dbg(sta->sdata, "rx session timer expired on tid %d\n", (u16)*ptid);
164 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); 163
165#endif
166 set_bit(*ptid, sta->ampdu_mlme.tid_rx_timer_expired); 164 set_bit(*ptid, sta->ampdu_mlme.tid_rx_timer_expired);
167 ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work); 165 ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
168} 166}
@@ -249,10 +247,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
249 status = WLAN_STATUS_REQUEST_DECLINED; 247 status = WLAN_STATUS_REQUEST_DECLINED;
250 248
251 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) { 249 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
252#ifdef CONFIG_MAC80211_HT_DEBUG 250 ht_dbg(sta->sdata, "Suspend in progress - Denying ADDBA request\n");
253 printk(KERN_DEBUG "Suspend in progress. "
254 "Denying ADDBA request\n");
255#endif
256 goto end_no_lock; 251 goto end_no_lock;
257 } 252 }
258 253
@@ -264,10 +259,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
264 (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) || 259 (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) ||
265 (buf_size > IEEE80211_MAX_AMPDU_BUF)) { 260 (buf_size > IEEE80211_MAX_AMPDU_BUF)) {
266 status = WLAN_STATUS_INVALID_QOS_PARAM; 261 status = WLAN_STATUS_INVALID_QOS_PARAM;
267#ifdef CONFIG_MAC80211_HT_DEBUG 262 ht_dbg_ratelimited(sta->sdata,
268 net_dbg_ratelimited("AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n", 263 "AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n",
269 mgmt->sa, tid, ba_policy, buf_size); 264 mgmt->sa, tid, ba_policy, buf_size);
270#endif /* CONFIG_MAC80211_HT_DEBUG */
271 goto end_no_lock; 265 goto end_no_lock;
272 } 266 }
273 /* determine default buffer size */ 267 /* determine default buffer size */
@@ -282,10 +276,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
282 mutex_lock(&sta->ampdu_mlme.mtx); 276 mutex_lock(&sta->ampdu_mlme.mtx);
283 277
284 if (sta->ampdu_mlme.tid_rx[tid]) { 278 if (sta->ampdu_mlme.tid_rx[tid]) {
285#ifdef CONFIG_MAC80211_HT_DEBUG 279 ht_dbg_ratelimited(sta->sdata,
286 net_dbg_ratelimited("unexpected AddBA Req from %pM on tid %u\n", 280 "unexpected AddBA Req from %pM on tid %u\n",
287 mgmt->sa, tid); 281 mgmt->sa, tid);
288#endif /* CONFIG_MAC80211_HT_DEBUG */
289 282
290 /* delete existing Rx BA session on the same tid */ 283 /* delete existing Rx BA session on the same tid */
291 ___ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, 284 ___ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT,
@@ -324,10 +317,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
324 317
325 ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START, 318 ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
326 &sta->sta, tid, &start_seq_num, 0); 319 &sta->sta, tid, &start_seq_num, 0);
327#ifdef CONFIG_MAC80211_HT_DEBUG 320 ht_dbg(sta->sdata, "Rx A-MPDU request on tid %d result %d\n", tid, ret);
328 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
329#endif /* CONFIG_MAC80211_HT_DEBUG */
330
331 if (ret) { 321 if (ret) {
332 kfree(tid_agg_rx->reorder_buf); 322 kfree(tid_agg_rx->reorder_buf);
333 kfree(tid_agg_rx->reorder_time); 323 kfree(tid_agg_rx->reorder_time);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 7cf07158805..5cc1bf7d803 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -184,10 +184,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
184 184
185 spin_unlock_bh(&sta->lock); 185 spin_unlock_bh(&sta->lock);
186 186
187#ifdef CONFIG_MAC80211_HT_DEBUG 187 ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n",
188 printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
189 sta->sta.addr, tid); 188 sta->sta.addr, tid);
190#endif /* CONFIG_MAC80211_HT_DEBUG */
191 189
192 del_timer_sync(&tid_tx->addba_resp_timer); 190 del_timer_sync(&tid_tx->addba_resp_timer);
193 del_timer_sync(&tid_tx->session_timer); 191 del_timer_sync(&tid_tx->session_timer);
@@ -253,17 +251,13 @@ static void sta_addba_resp_timer_expired(unsigned long data)
253 if (!tid_tx || 251 if (!tid_tx ||
254 test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) { 252 test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
255 rcu_read_unlock(); 253 rcu_read_unlock();
256#ifdef CONFIG_MAC80211_HT_DEBUG 254 ht_dbg(sta->sdata,
257 printk(KERN_DEBUG "timer expired on tid %d but we are not " 255 "timer expired on tid %d but we are not (or no longer) expecting addBA response there\n",
258 "(or no longer) expecting addBA response there\n", 256 tid);
259 tid);
260#endif
261 return; 257 return;
262 } 258 }
263 259
264#ifdef CONFIG_MAC80211_HT_DEBUG 260 ht_dbg(sta->sdata, "addBA response timer expired on tid %d\n", tid);
265 printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
266#endif
267 261
268 ieee80211_stop_tx_ba_session(&sta->sta, tid); 262 ieee80211_stop_tx_ba_session(&sta->sta, tid);
269 rcu_read_unlock(); 263 rcu_read_unlock();
@@ -323,8 +317,9 @@ ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata,
323 317
324 ieee80211_stop_queue_agg(sdata, tid); 318 ieee80211_stop_queue_agg(sdata, tid);
325 319
326 if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates" 320 if (WARN(!tid_tx,
327 " from the pending queue\n", tid)) 321 "TID %d gone but expected when splicing aggregates from the pending queue\n",
322 tid))
328 return; 323 return;
329 324
330 if (!skb_queue_empty(&tid_tx->pending)) { 325 if (!skb_queue_empty(&tid_tx->pending)) {
@@ -372,10 +367,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
372 ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START, 367 ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
373 &sta->sta, tid, &start_seq_num, 0); 368 &sta->sta, tid, &start_seq_num, 0);
374 if (ret) { 369 if (ret) {
375#ifdef CONFIG_MAC80211_HT_DEBUG 370 ht_dbg(sdata,
376 printk(KERN_DEBUG "BA request denied - HW unavailable for" 371 "BA request denied - HW unavailable for tid %d\n", tid);
377 " tid %d\n", tid);
378#endif
379 spin_lock_bh(&sta->lock); 372 spin_lock_bh(&sta->lock);
380 ieee80211_agg_splice_packets(sdata, tid_tx, tid); 373 ieee80211_agg_splice_packets(sdata, tid_tx, tid);
381 ieee80211_assign_tid_tx(sta, tid, NULL); 374 ieee80211_assign_tid_tx(sta, tid, NULL);
@@ -388,9 +381,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
388 381
389 /* activate the timer for the recipient's addBA response */ 382 /* activate the timer for the recipient's addBA response */
390 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); 383 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
391#ifdef CONFIG_MAC80211_HT_DEBUG 384 ht_dbg(sdata, "activated addBA response timer on tid %d\n", tid);
392 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
393#endif
394 385
395 spin_lock_bh(&sta->lock); 386 spin_lock_bh(&sta->lock);
396 sta->ampdu_mlme.last_addba_req_time[tid] = jiffies; 387 sta->ampdu_mlme.last_addba_req_time[tid] = jiffies;
@@ -437,9 +428,7 @@ static void sta_tx_agg_session_timer_expired(unsigned long data)
437 428
438 rcu_read_unlock(); 429 rcu_read_unlock();
439 430
440#ifdef CONFIG_MAC80211_HT_DEBUG 431 ht_dbg(sta->sdata, "tx session timer expired on tid %d\n", (u16)*ptid);
441 printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid);
442#endif
443 432
444 ieee80211_stop_tx_ba_session(&sta->sta, *ptid); 433 ieee80211_stop_tx_ba_session(&sta->sta, *ptid);
445} 434}
@@ -463,10 +452,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
463 (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)) 452 (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW))
464 return -EINVAL; 453 return -EINVAL;
465 454
466#ifdef CONFIG_MAC80211_HT_DEBUG 455 ht_dbg(sdata, "Open BA session requested for %pM tid %u\n",
467 printk(KERN_DEBUG "Open BA session requested for %pM tid %u\n",
468 pubsta->addr, tid); 456 pubsta->addr, tid);
469#endif /* CONFIG_MAC80211_HT_DEBUG */
470 457
471 if (sdata->vif.type != NL80211_IFTYPE_STATION && 458 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
472 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 459 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
@@ -476,10 +463,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
476 return -EINVAL; 463 return -EINVAL;
477 464
478 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) { 465 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
479#ifdef CONFIG_MAC80211_HT_DEBUG 466 ht_dbg(sdata,
480 printk(KERN_DEBUG "BA sessions blocked. " 467 "BA sessions blocked - Denying BA session request\n");
481 "Denying BA session request\n");
482#endif
483 return -EINVAL; 468 return -EINVAL;
484 } 469 }
485 470
@@ -497,10 +482,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
497 */ 482 */
498 if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC && 483 if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC &&
499 !sta->sta.ht_cap.ht_supported) { 484 !sta->sta.ht_cap.ht_supported) {
500#ifdef CONFIG_MAC80211_HT_DEBUG 485 ht_dbg(sdata,
501 printk(KERN_DEBUG "BA request denied - IBSS STA %pM" 486 "BA request denied - IBSS STA %pM does not advertise HT support\n",
502 "does not advertise HT support\n", pubsta->addr); 487 pubsta->addr);
503#endif /* CONFIG_MAC80211_HT_DEBUG */
504 return -EINVAL; 488 return -EINVAL;
505 } 489 }
506 490
@@ -520,12 +504,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
520 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES && 504 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES &&
521 time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] + 505 time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] +
522 HT_AGG_RETRIES_PERIOD)) { 506 HT_AGG_RETRIES_PERIOD)) {
523#ifdef CONFIG_MAC80211_HT_DEBUG 507 ht_dbg(sdata,
524 printk(KERN_DEBUG "BA request denied - " 508 "BA request denied - waiting a grace period after %d failed requests on tid %u\n",
525 "waiting a grace period after %d failed requests "
526 "on tid %u\n",
527 sta->ampdu_mlme.addba_req_num[tid], tid); 509 sta->ampdu_mlme.addba_req_num[tid], tid);
528#endif /* CONFIG_MAC80211_HT_DEBUG */
529 ret = -EBUSY; 510 ret = -EBUSY;
530 goto err_unlock_sta; 511 goto err_unlock_sta;
531 } 512 }
@@ -533,10 +514,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
533 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 514 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
534 /* check if the TID is not in aggregation flow already */ 515 /* check if the TID is not in aggregation flow already */
535 if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) { 516 if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
536#ifdef CONFIG_MAC80211_HT_DEBUG 517 ht_dbg(sdata,
537 printk(KERN_DEBUG "BA request denied - session is not " 518 "BA request denied - session is not idle on tid %u\n",
538 "idle on tid %u\n", tid); 519 tid);
539#endif /* CONFIG_MAC80211_HT_DEBUG */
540 ret = -EAGAIN; 520 ret = -EAGAIN;
541 goto err_unlock_sta; 521 goto err_unlock_sta;
542 } 522 }
@@ -591,9 +571,7 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
591 571
592 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 572 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
593 573
594#ifdef CONFIG_MAC80211_HT_DEBUG 574 ht_dbg(sta->sdata, "Aggregation is on for tid %d\n", tid);
595 printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid);
596#endif
597 575
598 drv_ampdu_action(local, sta->sdata, 576 drv_ampdu_action(local, sta->sdata,
599 IEEE80211_AMPDU_TX_OPERATIONAL, 577 IEEE80211_AMPDU_TX_OPERATIONAL,
@@ -627,10 +605,8 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
627 trace_api_start_tx_ba_cb(sdata, ra, tid); 605 trace_api_start_tx_ba_cb(sdata, ra, tid);
628 606
629 if (tid >= STA_TID_NUM) { 607 if (tid >= STA_TID_NUM) {
630#ifdef CONFIG_MAC80211_HT_DEBUG 608 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
631 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", 609 tid, STA_TID_NUM);
632 tid, STA_TID_NUM);
633#endif
634 return; 610 return;
635 } 611 }
636 612
@@ -638,9 +614,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
638 sta = sta_info_get_bss(sdata, ra); 614 sta = sta_info_get_bss(sdata, ra);
639 if (!sta) { 615 if (!sta) {
640 mutex_unlock(&local->sta_mtx); 616 mutex_unlock(&local->sta_mtx);
641#ifdef CONFIG_MAC80211_HT_DEBUG 617 ht_dbg(sdata, "Could not find station: %pM\n", ra);
642 printk(KERN_DEBUG "Could not find station: %pM\n", ra);
643#endif
644 return; 618 return;
645 } 619 }
646 620
@@ -648,9 +622,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
648 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 622 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
649 623
650 if (WARN_ON(!tid_tx)) { 624 if (WARN_ON(!tid_tx)) {
651#ifdef CONFIG_MAC80211_HT_DEBUG 625 ht_dbg(sdata, "addBA was not requested!\n");
652 printk(KERN_DEBUG "addBA was not requested!\n");
653#endif
654 goto unlock; 626 goto unlock;
655 } 627 }
656 628
@@ -750,25 +722,18 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
750 trace_api_stop_tx_ba_cb(sdata, ra, tid); 722 trace_api_stop_tx_ba_cb(sdata, ra, tid);
751 723
752 if (tid >= STA_TID_NUM) { 724 if (tid >= STA_TID_NUM) {
753#ifdef CONFIG_MAC80211_HT_DEBUG 725 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
754 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", 726 tid, STA_TID_NUM);
755 tid, STA_TID_NUM);
756#endif
757 return; 727 return;
758 } 728 }
759 729
760#ifdef CONFIG_MAC80211_HT_DEBUG 730 ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid);
761 printk(KERN_DEBUG "Stopping Tx BA session for %pM tid %d\n",
762 ra, tid);
763#endif /* CONFIG_MAC80211_HT_DEBUG */
764 731
765 mutex_lock(&local->sta_mtx); 732 mutex_lock(&local->sta_mtx);
766 733
767 sta = sta_info_get_bss(sdata, ra); 734 sta = sta_info_get_bss(sdata, ra);
768 if (!sta) { 735 if (!sta) {
769#ifdef CONFIG_MAC80211_HT_DEBUG 736 ht_dbg(sdata, "Could not find station: %pM\n", ra);
770 printk(KERN_DEBUG "Could not find station: %pM\n", ra);
771#endif
772 goto unlock; 737 goto unlock;
773 } 738 }
774 739
@@ -777,9 +742,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
777 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 742 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
778 743
779 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 744 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
780#ifdef CONFIG_MAC80211_HT_DEBUG 745 ht_dbg(sdata, "unexpected callback to A-MPDU stop\n");
781 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
782#endif
783 goto unlock_sta; 746 goto unlock_sta;
784 } 747 }
785 748
@@ -855,17 +818,13 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
855 goto out; 818 goto out;
856 819
857 if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) { 820 if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
858#ifdef CONFIG_MAC80211_HT_DEBUG 821 ht_dbg(sta->sdata, "wrong addBA response token, tid %d\n", tid);
859 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
860#endif
861 goto out; 822 goto out;
862 } 823 }
863 824
864 del_timer_sync(&tid_tx->addba_resp_timer); 825 del_timer_sync(&tid_tx->addba_resp_timer);
865 826
866#ifdef CONFIG_MAC80211_HT_DEBUG 827 ht_dbg(sta->sdata, "switched off addBA timer for tid %d\n", tid);
867 printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
868#endif
869 828
870 /* 829 /*
871 * addba_resp_timer may have fired before we got here, and 830 * addba_resp_timer may have fired before we got here, and
@@ -874,11 +833,9 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
874 */ 833 */
875 if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) || 834 if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
876 test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 835 test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
877#ifdef CONFIG_MAC80211_HT_DEBUG 836 ht_dbg(sta->sdata,
878 printk(KERN_DEBUG
879 "got addBA resp for tid %d but we already gave up\n", 837 "got addBA resp for tid %d but we already gave up\n",
880 tid); 838 tid);
881#endif
882 goto out; 839 goto out;
883 } 840 }
884 841
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 7d5108a867a..c2a2dcbfdf0 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -353,6 +353,7 @@ void sta_set_rate_info_tx(struct sta_info *sta,
353static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) 353static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
354{ 354{
355 struct ieee80211_sub_if_data *sdata = sta->sdata; 355 struct ieee80211_sub_if_data *sdata = sta->sdata;
356 struct ieee80211_local *local = sdata->local;
356 struct timespec uptime; 357 struct timespec uptime;
357 358
358 sinfo->generation = sdata->local->sta_generation; 359 sinfo->generation = sdata->local->sta_generation;
@@ -388,7 +389,9 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
388 if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) || 389 if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) ||
389 (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) { 390 (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) {
390 sinfo->filled |= STATION_INFO_SIGNAL | STATION_INFO_SIGNAL_AVG; 391 sinfo->filled |= STATION_INFO_SIGNAL | STATION_INFO_SIGNAL_AVG;
391 sinfo->signal = (s8)sta->last_signal; 392 if (!local->ops->get_rssi ||
393 drv_get_rssi(local, sdata, &sta->sta, &sinfo->signal))
394 sinfo->signal = (s8)sta->last_signal;
392 sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal); 395 sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal);
393 } 396 }
394 397
@@ -517,7 +520,7 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy,
517 * network device. 520 * network device.
518 */ 521 */
519 522
520 rcu_read_lock(); 523 mutex_lock(&local->sta_mtx);
521 524
522 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 525 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
523 sta = sta_info_get_bss(sdata, sdata->u.mgd.bssid); 526 sta = sta_info_get_bss(sdata, sdata->u.mgd.bssid);
@@ -546,7 +549,7 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy,
546 data[i] = (u8)sinfo.signal_avg; 549 data[i] = (u8)sinfo.signal_avg;
547 i++; 550 i++;
548 } else { 551 } else {
549 list_for_each_entry_rcu(sta, &local->sta_list, list) { 552 list_for_each_entry(sta, &local->sta_list, list) {
550 /* Make sure this station belongs to the proper dev */ 553 /* Make sure this station belongs to the proper dev */
551 if (sta->sdata->dev != dev) 554 if (sta->sdata->dev != dev)
552 continue; 555 continue;
@@ -603,7 +606,7 @@ do_survey:
603 else 606 else
604 data[i++] = -1LL; 607 data[i++] = -1LL;
605 608
606 rcu_read_unlock(); 609 mutex_unlock(&local->sta_mtx);
607 610
608 if (WARN_ON(i != STA_STATS_LEN)) 611 if (WARN_ON(i != STA_STATS_LEN))
609 return; 612 return;
@@ -629,10 +632,11 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
629 int idx, u8 *mac, struct station_info *sinfo) 632 int idx, u8 *mac, struct station_info *sinfo)
630{ 633{
631 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 634 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
635 struct ieee80211_local *local = sdata->local;
632 struct sta_info *sta; 636 struct sta_info *sta;
633 int ret = -ENOENT; 637 int ret = -ENOENT;
634 638
635 rcu_read_lock(); 639 mutex_lock(&local->sta_mtx);
636 640
637 sta = sta_info_get_by_idx(sdata, idx); 641 sta = sta_info_get_by_idx(sdata, idx);
638 if (sta) { 642 if (sta) {
@@ -641,7 +645,7 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
641 sta_set_sinfo(sta, sinfo); 645 sta_set_sinfo(sta, sinfo);
642 } 646 }
643 647
644 rcu_read_unlock(); 648 mutex_unlock(&local->sta_mtx);
645 649
646 return ret; 650 return ret;
647} 651}
@@ -658,10 +662,11 @@ static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
658 u8 *mac, struct station_info *sinfo) 662 u8 *mac, struct station_info *sinfo)
659{ 663{
660 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 664 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
665 struct ieee80211_local *local = sdata->local;
661 struct sta_info *sta; 666 struct sta_info *sta;
662 int ret = -ENOENT; 667 int ret = -ENOENT;
663 668
664 rcu_read_lock(); 669 mutex_lock(&local->sta_mtx);
665 670
666 sta = sta_info_get_bss(sdata, mac); 671 sta = sta_info_get_bss(sdata, mac);
667 if (sta) { 672 if (sta) {
@@ -669,11 +674,54 @@ static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
669 sta_set_sinfo(sta, sinfo); 674 sta_set_sinfo(sta, sinfo);
670 } 675 }
671 676
672 rcu_read_unlock(); 677 mutex_unlock(&local->sta_mtx);
673 678
674 return ret; 679 return ret;
675} 680}
676 681
682static int ieee80211_set_channel(struct wiphy *wiphy,
683 struct net_device *netdev,
684 struct ieee80211_channel *chan,
685 enum nl80211_channel_type channel_type)
686{
687 struct ieee80211_local *local = wiphy_priv(wiphy);
688 struct ieee80211_sub_if_data *sdata = NULL;
689
690 if (netdev)
691 sdata = IEEE80211_DEV_TO_SUB_IF(netdev);
692
693 switch (ieee80211_get_channel_mode(local, NULL)) {
694 case CHAN_MODE_HOPPING:
695 return -EBUSY;
696 case CHAN_MODE_FIXED:
697 if (local->oper_channel != chan ||
698 (!sdata && local->_oper_channel_type != channel_type))
699 return -EBUSY;
700 if (!sdata && local->_oper_channel_type == channel_type)
701 return 0;
702 break;
703 case CHAN_MODE_UNDEFINED:
704 break;
705 }
706
707 if (!ieee80211_set_channel_type(local, sdata, channel_type))
708 return -EBUSY;
709
710 local->oper_channel = chan;
711
712 /* auto-detects changes */
713 ieee80211_hw_config(local, 0);
714
715 return 0;
716}
717
718static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
719 struct ieee80211_channel *chan,
720 enum nl80211_channel_type channel_type)
721{
722 return ieee80211_set_channel(wiphy, NULL, chan, channel_type);
723}
724
677static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata, 725static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
678 const u8 *resp, size_t resp_len) 726 const u8 *resp, size_t resp_len)
679{ 727{
@@ -788,6 +836,11 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
788 if (old) 836 if (old)
789 return -EALREADY; 837 return -EALREADY;
790 838
839 err = ieee80211_set_channel(wiphy, dev, params->channel,
840 params->channel_type);
841 if (err)
842 return err;
843
791 /* 844 /*
792 * Apply control port protocol, this allows us to 845 * Apply control port protocol, this allows us to
793 * not encrypt dynamic WEP control frames. 846 * not encrypt dynamic WEP control frames.
@@ -1482,7 +1535,7 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
1482 if (_chg_mesh_attr(NL80211_MESHCONF_TTL, mask)) 1535 if (_chg_mesh_attr(NL80211_MESHCONF_TTL, mask))
1483 conf->dot11MeshTTL = nconf->dot11MeshTTL; 1536 conf->dot11MeshTTL = nconf->dot11MeshTTL;
1484 if (_chg_mesh_attr(NL80211_MESHCONF_ELEMENT_TTL, mask)) 1537 if (_chg_mesh_attr(NL80211_MESHCONF_ELEMENT_TTL, mask))
1485 conf->dot11MeshTTL = nconf->element_ttl; 1538 conf->element_ttl = nconf->element_ttl;
1486 if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask)) 1539 if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask))
1487 conf->auto_open_plinks = nconf->auto_open_plinks; 1540 conf->auto_open_plinks = nconf->auto_open_plinks;
1488 if (_chg_mesh_attr(NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, mask)) 1541 if (_chg_mesh_attr(NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, mask))
@@ -1517,17 +1570,16 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
1517 * announcements, so require this ifmsh to also be a root node 1570 * announcements, so require this ifmsh to also be a root node
1518 * */ 1571 * */
1519 if (nconf->dot11MeshGateAnnouncementProtocol && 1572 if (nconf->dot11MeshGateAnnouncementProtocol &&
1520 !conf->dot11MeshHWMPRootMode) { 1573 !(conf->dot11MeshHWMPRootMode > IEEE80211_ROOTMODE_ROOT)) {
1521 conf->dot11MeshHWMPRootMode = 1; 1574 conf->dot11MeshHWMPRootMode = IEEE80211_PROACTIVE_RANN;
1522 ieee80211_mesh_root_setup(ifmsh); 1575 ieee80211_mesh_root_setup(ifmsh);
1523 } 1576 }
1524 conf->dot11MeshGateAnnouncementProtocol = 1577 conf->dot11MeshGateAnnouncementProtocol =
1525 nconf->dot11MeshGateAnnouncementProtocol; 1578 nconf->dot11MeshGateAnnouncementProtocol;
1526 } 1579 }
1527 if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_RANN_INTERVAL, mask)) { 1580 if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_RANN_INTERVAL, mask))
1528 conf->dot11MeshHWMPRannInterval = 1581 conf->dot11MeshHWMPRannInterval =
1529 nconf->dot11MeshHWMPRannInterval; 1582 nconf->dot11MeshHWMPRannInterval;
1530 }
1531 if (_chg_mesh_attr(NL80211_MESHCONF_FORWARDING, mask)) 1583 if (_chg_mesh_attr(NL80211_MESHCONF_FORWARDING, mask))
1532 conf->dot11MeshForwarding = nconf->dot11MeshForwarding; 1584 conf->dot11MeshForwarding = nconf->dot11MeshForwarding;
1533 if (_chg_mesh_attr(NL80211_MESHCONF_RSSI_THRESHOLD, mask)) { 1585 if (_chg_mesh_attr(NL80211_MESHCONF_RSSI_THRESHOLD, mask)) {
@@ -1543,6 +1595,15 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
1543 sdata->vif.bss_conf.ht_operation_mode = nconf->ht_opmode; 1595 sdata->vif.bss_conf.ht_operation_mode = nconf->ht_opmode;
1544 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT); 1596 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
1545 } 1597 }
1598 if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, mask))
1599 conf->dot11MeshHWMPactivePathToRootTimeout =
1600 nconf->dot11MeshHWMPactivePathToRootTimeout;
1601 if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_ROOT_INTERVAL, mask))
1602 conf->dot11MeshHWMProotInterval =
1603 nconf->dot11MeshHWMProotInterval;
1604 if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, mask))
1605 conf->dot11MeshHWMPconfirmationInterval =
1606 nconf->dot11MeshHWMPconfirmationInterval;
1546 return 0; 1607 return 0;
1547} 1608}
1548 1609
@@ -1558,6 +1619,12 @@ static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
1558 err = copy_mesh_setup(ifmsh, setup); 1619 err = copy_mesh_setup(ifmsh, setup);
1559 if (err) 1620 if (err)
1560 return err; 1621 return err;
1622
1623 err = ieee80211_set_channel(wiphy, dev, setup->channel,
1624 setup->channel_type);
1625 if (err)
1626 return err;
1627
1561 ieee80211_start_mesh(sdata); 1628 ieee80211_start_mesh(sdata);
1562 1629
1563 return 0; 1630 return 0;
@@ -1677,55 +1744,6 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
1677 return 0; 1744 return 0;
1678} 1745}
1679 1746
1680static int ieee80211_set_channel(struct wiphy *wiphy,
1681 struct net_device *netdev,
1682 struct ieee80211_channel *chan,
1683 enum nl80211_channel_type channel_type)
1684{
1685 struct ieee80211_local *local = wiphy_priv(wiphy);
1686 struct ieee80211_sub_if_data *sdata = NULL;
1687 struct ieee80211_channel *old_oper;
1688 enum nl80211_channel_type old_oper_type;
1689 enum nl80211_channel_type old_vif_oper_type= NL80211_CHAN_NO_HT;
1690
1691 if (netdev)
1692 sdata = IEEE80211_DEV_TO_SUB_IF(netdev);
1693
1694 switch (ieee80211_get_channel_mode(local, NULL)) {
1695 case CHAN_MODE_HOPPING:
1696 return -EBUSY;
1697 case CHAN_MODE_FIXED:
1698 if (local->oper_channel != chan)
1699 return -EBUSY;
1700 if (!sdata && local->_oper_channel_type == channel_type)
1701 return 0;
1702 break;
1703 case CHAN_MODE_UNDEFINED:
1704 break;
1705 }
1706
1707 if (sdata)
1708 old_vif_oper_type = sdata->vif.bss_conf.channel_type;
1709 old_oper_type = local->_oper_channel_type;
1710
1711 if (!ieee80211_set_channel_type(local, sdata, channel_type))
1712 return -EBUSY;
1713
1714 old_oper = local->oper_channel;
1715 local->oper_channel = chan;
1716
1717 /* Update driver if changes were actually made. */
1718 if ((old_oper != local->oper_channel) ||
1719 (old_oper_type != local->_oper_channel_type))
1720 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
1721
1722 if (sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR &&
1723 old_vif_oper_type != sdata->vif.bss_conf.channel_type)
1724 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
1725
1726 return 0;
1727}
1728
1729#ifdef CONFIG_PM 1747#ifdef CONFIG_PM
1730static int ieee80211_suspend(struct wiphy *wiphy, 1748static int ieee80211_suspend(struct wiphy *wiphy,
1731 struct cfg80211_wowlan *wowlan) 1749 struct cfg80211_wowlan *wowlan)
@@ -2111,35 +2129,171 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
2111 return 0; 2129 return 0;
2112} 2130}
2113 2131
2114static int ieee80211_remain_on_channel_hw(struct ieee80211_local *local, 2132static int ieee80211_start_roc_work(struct ieee80211_local *local,
2115 struct net_device *dev, 2133 struct ieee80211_sub_if_data *sdata,
2116 struct ieee80211_channel *chan, 2134 struct ieee80211_channel *channel,
2117 enum nl80211_channel_type chantype, 2135 enum nl80211_channel_type channel_type,
2118 unsigned int duration, u64 *cookie) 2136 unsigned int duration, u64 *cookie,
2137 struct sk_buff *txskb)
2119{ 2138{
2139 struct ieee80211_roc_work *roc, *tmp;
2140 bool queued = false;
2120 int ret; 2141 int ret;
2121 u32 random_cookie;
2122 2142
2123 lockdep_assert_held(&local->mtx); 2143 lockdep_assert_held(&local->mtx);
2124 2144
2125 if (local->hw_roc_cookie) 2145 roc = kzalloc(sizeof(*roc), GFP_KERNEL);
2126 return -EBUSY; 2146 if (!roc)
2127 /* must be nonzero */ 2147 return -ENOMEM;
2128 random_cookie = random32() | 1; 2148
2129 2149 roc->chan = channel;
2130 *cookie = random_cookie; 2150 roc->chan_type = channel_type;
2131 local->hw_roc_dev = dev; 2151 roc->duration = duration;
2132 local->hw_roc_cookie = random_cookie; 2152 roc->req_duration = duration;
2133 local->hw_roc_channel = chan; 2153 roc->frame = txskb;
2134 local->hw_roc_channel_type = chantype; 2154 roc->mgmt_tx_cookie = (unsigned long)txskb;
2135 local->hw_roc_duration = duration; 2155 roc->sdata = sdata;
2136 ret = drv_remain_on_channel(local, chan, chantype, duration); 2156 INIT_DELAYED_WORK(&roc->work, ieee80211_sw_roc_work);
2157 INIT_LIST_HEAD(&roc->dependents);
2158
2159 /* if there's one pending or we're scanning, queue this one */
2160 if (!list_empty(&local->roc_list) || local->scanning)
2161 goto out_check_combine;
2162
2163 /* if not HW assist, just queue & schedule work */
2164 if (!local->ops->remain_on_channel) {
2165 ieee80211_queue_delayed_work(&local->hw, &roc->work, 0);
2166 goto out_queue;
2167 }
2168
2169 /* otherwise actually kick it off here (for error handling) */
2170
2171 /*
2172 * If the duration is zero, then the driver
2173 * wouldn't actually do anything. Set it to
2174 * 10 for now.
2175 *
2176 * TODO: cancel the off-channel operation
2177 * when we get the SKB's TX status and
2178 * the wait time was zero before.
2179 */
2180 if (!duration)
2181 duration = 10;
2182
2183 ret = drv_remain_on_channel(local, channel, channel_type, duration);
2137 if (ret) { 2184 if (ret) {
2138 local->hw_roc_channel = NULL; 2185 kfree(roc);
2139 local->hw_roc_cookie = 0; 2186 return ret;
2140 } 2187 }
2141 2188
2142 return ret; 2189 roc->started = true;
2190 goto out_queue;
2191
2192 out_check_combine:
2193 list_for_each_entry(tmp, &local->roc_list, list) {
2194 if (tmp->chan != channel || tmp->chan_type != channel_type)
2195 continue;
2196
2197 /*
2198 * Extend this ROC if possible:
2199 *
2200 * If it hasn't started yet, just increase the duration
2201 * and add the new one to the list of dependents.
2202 */
2203 if (!tmp->started) {
2204 list_add_tail(&roc->list, &tmp->dependents);
2205 tmp->duration = max(tmp->duration, roc->duration);
2206 queued = true;
2207 break;
2208 }
2209
2210 /* If it has already started, it's more difficult ... */
2211 if (local->ops->remain_on_channel) {
2212 unsigned long j = jiffies;
2213
2214 /*
2215 * In the offloaded ROC case, if it hasn't begun, add
2216 * this new one to the dependent list to be handled
2217 * when the the master one begins. If it has begun,
2218 * check that there's still a minimum time left and
2219 * if so, start this one, transmitting the frame, but
2220 * add it to the list directly after this one with a
2221 * a reduced time so we'll ask the driver to execute
2222 * it right after finishing the previous one, in the
2223 * hope that it'll also be executed right afterwards,
2224 * effectively extending the old one.
2225 * If there's no minimum time left, just add it to the
2226 * normal list.
2227 */
2228 if (!tmp->hw_begun) {
2229 list_add_tail(&roc->list, &tmp->dependents);
2230 queued = true;
2231 break;
2232 }
2233
2234 if (time_before(j + IEEE80211_ROC_MIN_LEFT,
2235 tmp->hw_start_time +
2236 msecs_to_jiffies(tmp->duration))) {
2237 int new_dur;
2238
2239 ieee80211_handle_roc_started(roc);
2240
2241 new_dur = roc->duration -
2242 jiffies_to_msecs(tmp->hw_start_time +
2243 msecs_to_jiffies(
2244 tmp->duration) -
2245 j);
2246
2247 if (new_dur > 0) {
2248 /* add right after tmp */
2249 list_add(&roc->list, &tmp->list);
2250 } else {
2251 list_add_tail(&roc->list,
2252 &tmp->dependents);
2253 }
2254 queued = true;
2255 }
2256 } else if (del_timer_sync(&tmp->work.timer)) {
2257 unsigned long new_end;
2258
2259 /*
2260 * In the software ROC case, cancel the timer, if
2261 * that fails then the finish work is already
2262 * queued/pending and thus we queue the new ROC
2263 * normally, if that succeeds then we can extend
2264 * the timer duration and TX the frame (if any.)
2265 */
2266
2267 list_add_tail(&roc->list, &tmp->dependents);
2268 queued = true;
2269
2270 new_end = jiffies + msecs_to_jiffies(roc->duration);
2271
2272 /* ok, it was started & we canceled timer */
2273 if (time_after(new_end, tmp->work.timer.expires))
2274 mod_timer(&tmp->work.timer, new_end);
2275 else
2276 add_timer(&tmp->work.timer);
2277
2278 ieee80211_handle_roc_started(roc);
2279 }
2280 break;
2281 }
2282
2283 out_queue:
2284 if (!queued)
2285 list_add_tail(&roc->list, &local->roc_list);
2286
2287 /*
2288 * cookie is either the roc (for normal roc)
2289 * or the SKB (for mgmt TX)
2290 */
2291 if (txskb)
2292 *cookie = (unsigned long)txskb;
2293 else
2294 *cookie = (unsigned long)roc;
2295
2296 return 0;
2143} 2297}
2144 2298
2145static int ieee80211_remain_on_channel(struct wiphy *wiphy, 2299static int ieee80211_remain_on_channel(struct wiphy *wiphy,
@@ -2151,86 +2305,98 @@ static int ieee80211_remain_on_channel(struct wiphy *wiphy,
2151{ 2305{
2152 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2306 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2153 struct ieee80211_local *local = sdata->local; 2307 struct ieee80211_local *local = sdata->local;
2308 int ret;
2154 2309
2155 if (local->ops->remain_on_channel) { 2310 mutex_lock(&local->mtx);
2156 int ret; 2311 ret = ieee80211_start_roc_work(local, sdata, chan, channel_type,
2157 2312 duration, cookie, NULL);
2158 mutex_lock(&local->mtx); 2313 mutex_unlock(&local->mtx);
2159 ret = ieee80211_remain_on_channel_hw(local, dev,
2160 chan, channel_type,
2161 duration, cookie);
2162 local->hw_roc_for_tx = false;
2163 mutex_unlock(&local->mtx);
2164
2165 return ret;
2166 }
2167 2314
2168 return ieee80211_wk_remain_on_channel(sdata, chan, channel_type, 2315 return ret;
2169 duration, cookie);
2170} 2316}
2171 2317
2172static int ieee80211_cancel_remain_on_channel_hw(struct ieee80211_local *local, 2318static int ieee80211_cancel_roc(struct ieee80211_local *local,
2173 u64 cookie) 2319 u64 cookie, bool mgmt_tx)
2174{ 2320{
2321 struct ieee80211_roc_work *roc, *tmp, *found = NULL;
2175 int ret; 2322 int ret;
2176 2323
2177 lockdep_assert_held(&local->mtx); 2324 mutex_lock(&local->mtx);
2325 list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
2326 struct ieee80211_roc_work *dep, *tmp2;
2178 2327
2179 if (local->hw_roc_cookie != cookie) 2328 list_for_each_entry_safe(dep, tmp2, &roc->dependents, list) {
2180 return -ENOENT; 2329 if (!mgmt_tx && (unsigned long)dep != cookie)
2330 continue;
2331 else if (mgmt_tx && dep->mgmt_tx_cookie != cookie)
2332 continue;
2333 /* found dependent item -- just remove it */
2334 list_del(&dep->list);
2335 mutex_unlock(&local->mtx);
2181 2336
2182 ret = drv_cancel_remain_on_channel(local); 2337 ieee80211_roc_notify_destroy(dep);
2183 if (ret) 2338 return 0;
2184 return ret; 2339 }
2185 2340
2186 local->hw_roc_cookie = 0; 2341 if (!mgmt_tx && (unsigned long)roc != cookie)
2187 local->hw_roc_channel = NULL; 2342 continue;
2343 else if (mgmt_tx && roc->mgmt_tx_cookie != cookie)
2344 continue;
2188 2345
2189 ieee80211_recalc_idle(local); 2346 found = roc;
2347 break;
2348 }
2190 2349
2191 return 0; 2350 if (!found) {
2192} 2351 mutex_unlock(&local->mtx);
2352 return -ENOENT;
2353 }
2193 2354
2194static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy, 2355 /*
2195 struct net_device *dev, 2356 * We found the item to cancel, so do that. Note that it
2196 u64 cookie) 2357 * may have dependents, which we also cancel (and send
2197{ 2358 * the expired signal for.) Not doing so would be quite
2198 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2359 * tricky here, but we may need to fix it later.
2199 struct ieee80211_local *local = sdata->local; 2360 */
2200 2361
2201 if (local->ops->cancel_remain_on_channel) { 2362 if (local->ops->remain_on_channel) {
2202 int ret; 2363 if (found->started) {
2364 ret = drv_cancel_remain_on_channel(local);
2365 if (WARN_ON_ONCE(ret)) {
2366 mutex_unlock(&local->mtx);
2367 return ret;
2368 }
2369 }
2203 2370
2204 mutex_lock(&local->mtx); 2371 list_del(&found->list);
2205 ret = ieee80211_cancel_remain_on_channel_hw(local, cookie); 2372
2373 if (found->started)
2374 ieee80211_start_next_roc(local);
2206 mutex_unlock(&local->mtx); 2375 mutex_unlock(&local->mtx);
2207 2376
2208 return ret; 2377 ieee80211_roc_notify_destroy(found);
2378 } else {
2379 /* work may be pending so use it all the time */
2380 found->abort = true;
2381 ieee80211_queue_delayed_work(&local->hw, &found->work, 0);
2382
2383 mutex_unlock(&local->mtx);
2384
2385 /* work will clean up etc */
2386 flush_delayed_work(&found->work);
2209 } 2387 }
2210 2388
2211 return ieee80211_wk_cancel_remain_on_channel(sdata, cookie); 2389 return 0;
2212} 2390}
2213 2391
2214static enum work_done_result 2392static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
2215ieee80211_offchan_tx_done(struct ieee80211_work *wk, struct sk_buff *skb) 2393 struct net_device *dev,
2394 u64 cookie)
2216{ 2395{
2217 /* 2396 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2218 * Use the data embedded in the work struct for reporting 2397 struct ieee80211_local *local = sdata->local;
2219 * here so if the driver mangled the SKB before dropping
2220 * it (which is the only way we really should get here)
2221 * then we don't report mangled data.
2222 *
2223 * If there was no wait time, then by the time we get here
2224 * the driver will likely not have reported the status yet,
2225 * so in that case userspace will have to deal with it.
2226 */
2227
2228 if (wk->offchan_tx.wait && !wk->offchan_tx.status)
2229 cfg80211_mgmt_tx_status(wk->sdata->dev,
2230 (unsigned long) wk->offchan_tx.frame,
2231 wk->data, wk->data_len, false, GFP_KERNEL);
2232 2398
2233 return WORK_DONE_DESTROY; 2399 return ieee80211_cancel_roc(local, cookie, false);
2234} 2400}
2235 2401
2236static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev, 2402static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
@@ -2244,10 +2410,10 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
2244 struct ieee80211_local *local = sdata->local; 2410 struct ieee80211_local *local = sdata->local;
2245 struct sk_buff *skb; 2411 struct sk_buff *skb;
2246 struct sta_info *sta; 2412 struct sta_info *sta;
2247 struct ieee80211_work *wk;
2248 const struct ieee80211_mgmt *mgmt = (void *)buf; 2413 const struct ieee80211_mgmt *mgmt = (void *)buf;
2414 bool need_offchan = false;
2249 u32 flags; 2415 u32 flags;
2250 bool is_offchan = false; 2416 int ret;
2251 2417
2252 if (dont_wait_for_ack) 2418 if (dont_wait_for_ack)
2253 flags = IEEE80211_TX_CTL_NO_ACK; 2419 flags = IEEE80211_TX_CTL_NO_ACK;
@@ -2255,33 +2421,28 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
2255 flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX | 2421 flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
2256 IEEE80211_TX_CTL_REQ_TX_STATUS; 2422 IEEE80211_TX_CTL_REQ_TX_STATUS;
2257 2423
2258 /* Check that we are on the requested channel for transmission */
2259 if (chan != local->tmp_channel &&
2260 chan != local->oper_channel)
2261 is_offchan = true;
2262 if (channel_type_valid &&
2263 (channel_type != local->tmp_channel_type &&
2264 channel_type != local->_oper_channel_type))
2265 is_offchan = true;
2266
2267 if (chan == local->hw_roc_channel) {
2268 /* TODO: check channel type? */
2269 is_offchan = false;
2270 flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
2271 }
2272
2273 if (no_cck) 2424 if (no_cck)
2274 flags |= IEEE80211_TX_CTL_NO_CCK_RATE; 2425 flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
2275 2426
2276 if (is_offchan && !offchan)
2277 return -EBUSY;
2278
2279 switch (sdata->vif.type) { 2427 switch (sdata->vif.type) {
2280 case NL80211_IFTYPE_ADHOC: 2428 case NL80211_IFTYPE_ADHOC:
2429 if (!sdata->vif.bss_conf.ibss_joined)
2430 need_offchan = true;
2431 /* fall through */
2432#ifdef CONFIG_MAC80211_MESH
2433 case NL80211_IFTYPE_MESH_POINT:
2434 if (ieee80211_vif_is_mesh(&sdata->vif) &&
2435 !sdata->u.mesh.mesh_id_len)
2436 need_offchan = true;
2437 /* fall through */
2438#endif
2281 case NL80211_IFTYPE_AP: 2439 case NL80211_IFTYPE_AP:
2282 case NL80211_IFTYPE_AP_VLAN: 2440 case NL80211_IFTYPE_AP_VLAN:
2283 case NL80211_IFTYPE_P2P_GO: 2441 case NL80211_IFTYPE_P2P_GO:
2284 case NL80211_IFTYPE_MESH_POINT: 2442 if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
2443 !ieee80211_vif_is_mesh(&sdata->vif) &&
2444 !rcu_access_pointer(sdata->bss->beacon))
2445 need_offchan = true;
2285 if (!ieee80211_is_action(mgmt->frame_control) || 2446 if (!ieee80211_is_action(mgmt->frame_control) ||
2286 mgmt->u.action.category == WLAN_CATEGORY_PUBLIC) 2447 mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)
2287 break; 2448 break;
@@ -2293,103 +2454,60 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
2293 break; 2454 break;
2294 case NL80211_IFTYPE_STATION: 2455 case NL80211_IFTYPE_STATION:
2295 case NL80211_IFTYPE_P2P_CLIENT: 2456 case NL80211_IFTYPE_P2P_CLIENT:
2457 if (!sdata->u.mgd.associated)
2458 need_offchan = true;
2296 break; 2459 break;
2297 default: 2460 default:
2298 return -EOPNOTSUPP; 2461 return -EOPNOTSUPP;
2299 } 2462 }
2300 2463
2464 mutex_lock(&local->mtx);
2465
2466 /* Check if the operating channel is the requested channel */
2467 if (!need_offchan) {
2468 need_offchan = chan != local->oper_channel;
2469 if (channel_type_valid &&
2470 channel_type != local->_oper_channel_type)
2471 need_offchan = true;
2472 }
2473
2474 if (need_offchan && !offchan) {
2475 ret = -EBUSY;
2476 goto out_unlock;
2477 }
2478
2301 skb = dev_alloc_skb(local->hw.extra_tx_headroom + len); 2479 skb = dev_alloc_skb(local->hw.extra_tx_headroom + len);
2302 if (!skb) 2480 if (!skb) {
2303 return -ENOMEM; 2481 ret = -ENOMEM;
2482 goto out_unlock;
2483 }
2304 skb_reserve(skb, local->hw.extra_tx_headroom); 2484 skb_reserve(skb, local->hw.extra_tx_headroom);
2305 2485
2306 memcpy(skb_put(skb, len), buf, len); 2486 memcpy(skb_put(skb, len), buf, len);
2307 2487
2308 IEEE80211_SKB_CB(skb)->flags = flags; 2488 IEEE80211_SKB_CB(skb)->flags = flags;
2309 2489
2310 if (flags & IEEE80211_TX_CTL_TX_OFFCHAN)
2311 IEEE80211_SKB_CB(skb)->hw_queue =
2312 local->hw.offchannel_tx_hw_queue;
2313
2314 skb->dev = sdata->dev; 2490 skb->dev = sdata->dev;
2315 2491
2316 *cookie = (unsigned long) skb; 2492 if (!need_offchan) {
2317 2493 ieee80211_tx_skb(sdata, skb);
2318 if (is_offchan && local->ops->remain_on_channel) { 2494 ret = 0;
2319 unsigned int duration; 2495 goto out_unlock;
2320 int ret; 2496 }
2321
2322 mutex_lock(&local->mtx);
2323 /*
2324 * If the duration is zero, then the driver
2325 * wouldn't actually do anything. Set it to
2326 * 100 for now.
2327 *
2328 * TODO: cancel the off-channel operation
2329 * when we get the SKB's TX status and
2330 * the wait time was zero before.
2331 */
2332 duration = 100;
2333 if (wait)
2334 duration = wait;
2335 ret = ieee80211_remain_on_channel_hw(local, dev, chan,
2336 channel_type,
2337 duration, cookie);
2338 if (ret) {
2339 kfree_skb(skb);
2340 mutex_unlock(&local->mtx);
2341 return ret;
2342 }
2343
2344 local->hw_roc_for_tx = true;
2345 local->hw_roc_duration = wait;
2346
2347 /*
2348 * queue up frame for transmission after
2349 * ieee80211_ready_on_channel call
2350 */
2351 2497
2352 /* modify cookie to prevent API mismatches */ 2498 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
2353 *cookie ^= 2; 2499 if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
2354 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
2355 IEEE80211_SKB_CB(skb)->hw_queue = 2500 IEEE80211_SKB_CB(skb)->hw_queue =
2356 local->hw.offchannel_tx_hw_queue; 2501 local->hw.offchannel_tx_hw_queue;
2357 local->hw_roc_skb = skb;
2358 local->hw_roc_skb_for_status = skb;
2359 mutex_unlock(&local->mtx);
2360 2502
2361 return 0; 2503 /* This will handle all kinds of coalescing and immediate TX */
2362 } 2504 ret = ieee80211_start_roc_work(local, sdata, chan, channel_type,
2363 2505 wait, cookie, skb);
2364 /* 2506 if (ret)
2365 * Can transmit right away if the channel was the
2366 * right one and there's no wait involved... If a
2367 * wait is involved, we might otherwise not be on
2368 * the right channel for long enough!
2369 */
2370 if (!is_offchan && !wait && !sdata->vif.bss_conf.idle) {
2371 ieee80211_tx_skb(sdata, skb);
2372 return 0;
2373 }
2374
2375 wk = kzalloc(sizeof(*wk) + len, GFP_KERNEL);
2376 if (!wk) {
2377 kfree_skb(skb); 2507 kfree_skb(skb);
2378 return -ENOMEM; 2508 out_unlock:
2379 } 2509 mutex_unlock(&local->mtx);
2380 2510 return ret;
2381 wk->type = IEEE80211_WORK_OFFCHANNEL_TX;
2382 wk->chan = chan;
2383 wk->chan_type = channel_type;
2384 wk->sdata = sdata;
2385 wk->done = ieee80211_offchan_tx_done;
2386 wk->offchan_tx.frame = skb;
2387 wk->offchan_tx.wait = wait;
2388 wk->data_len = len;
2389 memcpy(wk->data, buf, len);
2390
2391 ieee80211_add_work(wk);
2392 return 0;
2393} 2511}
2394 2512
2395static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy, 2513static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
@@ -2398,45 +2516,8 @@ static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
2398{ 2516{
2399 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2517 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2400 struct ieee80211_local *local = sdata->local; 2518 struct ieee80211_local *local = sdata->local;
2401 struct ieee80211_work *wk;
2402 int ret = -ENOENT;
2403
2404 mutex_lock(&local->mtx);
2405
2406 if (local->ops->cancel_remain_on_channel) {
2407 cookie ^= 2;
2408 ret = ieee80211_cancel_remain_on_channel_hw(local, cookie);
2409
2410 if (ret == 0) {
2411 kfree_skb(local->hw_roc_skb);
2412 local->hw_roc_skb = NULL;
2413 local->hw_roc_skb_for_status = NULL;
2414 }
2415
2416 mutex_unlock(&local->mtx);
2417 2519
2418 return ret; 2520 return ieee80211_cancel_roc(local, cookie, true);
2419 }
2420
2421 list_for_each_entry(wk, &local->work_list, list) {
2422 if (wk->sdata != sdata)
2423 continue;
2424
2425 if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX)
2426 continue;
2427
2428 if (cookie != (unsigned long) wk->offchan_tx.frame)
2429 continue;
2430
2431 wk->timeout = jiffies;
2432
2433 ieee80211_queue_work(&local->hw, &local->work_work);
2434 ret = 0;
2435 break;
2436 }
2437 mutex_unlock(&local->mtx);
2438
2439 return ret;
2440} 2521}
2441 2522
2442static void ieee80211_mgmt_frame_register(struct wiphy *wiphy, 2523static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
@@ -2444,16 +2525,30 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
2444 u16 frame_type, bool reg) 2525 u16 frame_type, bool reg)
2445{ 2526{
2446 struct ieee80211_local *local = wiphy_priv(wiphy); 2527 struct ieee80211_local *local = wiphy_priv(wiphy);
2528 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2447 2529
2448 if (frame_type != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ)) 2530 switch (frame_type) {
2449 return; 2531 case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH:
2532 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
2533 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
2450 2534
2451 if (reg) 2535 if (reg)
2452 local->probe_req_reg++; 2536 ifibss->auth_frame_registrations++;
2453 else 2537 else
2454 local->probe_req_reg--; 2538 ifibss->auth_frame_registrations--;
2539 }
2540 break;
2541 case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ:
2542 if (reg)
2543 local->probe_req_reg++;
2544 else
2545 local->probe_req_reg--;
2455 2546
2456 ieee80211_queue_work(&local->hw, &local->reconfig_filter); 2547 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
2548 break;
2549 default:
2550 break;
2551 }
2457} 2552}
2458 2553
2459static int ieee80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant) 2554static int ieee80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
@@ -2679,9 +2774,8 @@ static int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
2679 !sdata->u.mgd.associated) 2774 !sdata->u.mgd.associated)
2680 return -EINVAL; 2775 return -EINVAL;
2681 2776
2682#ifdef CONFIG_MAC80211_VERBOSE_TDLS_DEBUG 2777 tdls_dbg(sdata, "TDLS mgmt action %d peer %pM\n",
2683 printk(KERN_DEBUG "TDLS mgmt action %d peer %pM\n", action_code, peer); 2778 action_code, peer);
2684#endif
2685 2779
2686 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 2780 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
2687 max(sizeof(struct ieee80211_mgmt), 2781 max(sizeof(struct ieee80211_mgmt),
@@ -2790,9 +2884,7 @@ static int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
2790 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2884 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2791 return -EINVAL; 2885 return -EINVAL;
2792 2886
2793#ifdef CONFIG_MAC80211_VERBOSE_TDLS_DEBUG 2887 tdls_dbg(sdata, "TDLS oper %d peer %pM\n", oper, peer);
2794 printk(KERN_DEBUG "TDLS oper %d peer %pM\n", oper, peer);
2795#endif
2796 2888
2797 switch (oper) { 2889 switch (oper) {
2798 case NL80211_TDLS_ENABLE_LINK: 2890 case NL80211_TDLS_ENABLE_LINK:
@@ -2936,7 +3028,7 @@ struct cfg80211_ops mac80211_config_ops = {
2936#endif 3028#endif
2937 .change_bss = ieee80211_change_bss, 3029 .change_bss = ieee80211_change_bss,
2938 .set_txq_params = ieee80211_set_txq_params, 3030 .set_txq_params = ieee80211_set_txq_params,
2939 .set_channel = ieee80211_set_channel, 3031 .set_monitor_channel = ieee80211_set_monitor_channel,
2940 .suspend = ieee80211_suspend, 3032 .suspend = ieee80211_suspend,
2941 .resume = ieee80211_resume, 3033 .resume = ieee80211_resume,
2942 .scan = ieee80211_scan, 3034 .scan = ieee80211_scan,
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index c76cf7230c7..f0f87e5a1d3 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -41,6 +41,10 @@ __ieee80211_get_channel_mode(struct ieee80211_local *local,
41 if (!sdata->u.ap.beacon) 41 if (!sdata->u.ap.beacon)
42 continue; 42 continue;
43 break; 43 break;
44 case NL80211_IFTYPE_MESH_POINT:
45 if (!sdata->wdev.mesh_id_len)
46 continue;
47 break;
44 default: 48 default:
45 break; 49 break;
46 } 50 }
diff --git a/net/mac80211/debug.h b/net/mac80211/debug.h
new file mode 100644
index 00000000000..8f383a57601
--- /dev/null
+++ b/net/mac80211/debug.h
@@ -0,0 +1,170 @@
1#ifndef __MAC80211_DEBUG_H
2#define __MAC80211_DEBUG_H
3#include <net/cfg80211.h>
4
5#ifdef CONFIG_MAC80211_IBSS_DEBUG
6#define MAC80211_IBSS_DEBUG 1
7#else
8#define MAC80211_IBSS_DEBUG 0
9#endif
10
11#ifdef CONFIG_MAC80211_PS_DEBUG
12#define MAC80211_PS_DEBUG 1
13#else
14#define MAC80211_PS_DEBUG 0
15#endif
16
17#ifdef CONFIG_MAC80211_HT_DEBUG
18#define MAC80211_HT_DEBUG 1
19#else
20#define MAC80211_HT_DEBUG 0
21#endif
22
23#ifdef CONFIG_MAC80211_MPL_DEBUG
24#define MAC80211_MPL_DEBUG 1
25#else
26#define MAC80211_MPL_DEBUG 0
27#endif
28
29#ifdef CONFIG_MAC80211_MPATH_DEBUG
30#define MAC80211_MPATH_DEBUG 1
31#else
32#define MAC80211_MPATH_DEBUG 0
33#endif
34
35#ifdef CONFIG_MAC80211_MHWMP_DEBUG
36#define MAC80211_MHWMP_DEBUG 1
37#else
38#define MAC80211_MHWMP_DEBUG 0
39#endif
40
41#ifdef CONFIG_MAC80211_MESH_SYNC_DEBUG
42#define MAC80211_MESH_SYNC_DEBUG 1
43#else
44#define MAC80211_MESH_SYNC_DEBUG 0
45#endif
46
47#ifdef CONFIG_MAC80211_TDLS_DEBUG
48#define MAC80211_TDLS_DEBUG 1
49#else
50#define MAC80211_TDLS_DEBUG 0
51#endif
52
53#ifdef CONFIG_MAC80211_STA_DEBUG
54#define MAC80211_STA_DEBUG 1
55#else
56#define MAC80211_STA_DEBUG 0
57#endif
58
59#ifdef CONFIG_MAC80211_MLME_DEBUG
60#define MAC80211_MLME_DEBUG 1
61#else
62#define MAC80211_MLME_DEBUG 0
63#endif
64
65#ifdef CONFIG_MAC80211_MESSAGE_TRACING
66void __sdata_info(const char *fmt, ...) __printf(1, 2);
67void __sdata_dbg(bool print, const char *fmt, ...) __printf(2, 3);
68void __sdata_err(const char *fmt, ...) __printf(1, 2);
69void __wiphy_dbg(struct wiphy *wiphy, bool print, const char *fmt, ...)
70 __printf(3, 4);
71
72#define _sdata_info(sdata, fmt, ...) \
73 __sdata_info("%s: " fmt, (sdata)->name, ##__VA_ARGS__)
74#define _sdata_dbg(print, sdata, fmt, ...) \
75 __sdata_dbg(print, "%s: " fmt, (sdata)->name, ##__VA_ARGS__)
76#define _sdata_err(sdata, fmt, ...) \
77 __sdata_err("%s: " fmt, (sdata)->name, ##__VA_ARGS__)
78#define _wiphy_dbg(print, wiphy, fmt, ...) \
79 __wiphy_dbg(wiphy, print, fmt, ##__VA_ARGS__)
80#else
81#define _sdata_info(sdata, fmt, ...) \
82do { \
83 pr_info("%s: " fmt, \
84 (sdata)->name, ##__VA_ARGS__); \
85} while (0)
86
87#define _sdata_dbg(print, sdata, fmt, ...) \
88do { \
89 if (print) \
90 pr_debug("%s: " fmt, \
91 (sdata)->name, ##__VA_ARGS__); \
92} while (0)
93
94#define _sdata_err(sdata, fmt, ...) \
95do { \
96 pr_err("%s: " fmt, \
97 (sdata)->name, ##__VA_ARGS__); \
98} while (0)
99
100#define _wiphy_dbg(print, wiphy, fmt, ...) \
101do { \
102 if (print) \
103 wiphy_dbg((wiphy), fmt, ##__VA_ARGS__); \
104} while (0)
105#endif
106
107#define sdata_info(sdata, fmt, ...) \
108 _sdata_info(sdata, fmt, ##__VA_ARGS__)
109#define sdata_err(sdata, fmt, ...) \
110 _sdata_err(sdata, fmt, ##__VA_ARGS__)
111#define sdata_dbg(sdata, fmt, ...) \
112 _sdata_dbg(1, sdata, fmt, ##__VA_ARGS__)
113
114#define ht_dbg(sdata, fmt, ...) \
115 _sdata_dbg(MAC80211_HT_DEBUG, \
116 sdata, fmt, ##__VA_ARGS__)
117
118#define ht_dbg_ratelimited(sdata, fmt, ...) \
119 _sdata_dbg(MAC80211_HT_DEBUG && net_ratelimit(), \
120 sdata, fmt, ##__VA_ARGS__)
121
122#define ibss_dbg(sdata, fmt, ...) \
123 _sdata_dbg(MAC80211_IBSS_DEBUG, \
124 sdata, fmt, ##__VA_ARGS__)
125
126#define ps_dbg(sdata, fmt, ...) \
127 _sdata_dbg(MAC80211_PS_DEBUG, \
128 sdata, fmt, ##__VA_ARGS__)
129
130#define ps_dbg_hw(hw, fmt, ...) \
131 _wiphy_dbg(MAC80211_PS_DEBUG, \
132 (hw)->wiphy, fmt, ##__VA_ARGS__)
133
134#define ps_dbg_ratelimited(sdata, fmt, ...) \
135 _sdata_dbg(MAC80211_PS_DEBUG && net_ratelimit(), \
136 sdata, fmt, ##__VA_ARGS__)
137
138#define mpl_dbg(sdata, fmt, ...) \
139 _sdata_dbg(MAC80211_MPL_DEBUG, \
140 sdata, fmt, ##__VA_ARGS__)
141
142#define mpath_dbg(sdata, fmt, ...) \
143 _sdata_dbg(MAC80211_MPATH_DEBUG, \
144 sdata, fmt, ##__VA_ARGS__)
145
146#define mhwmp_dbg(sdata, fmt, ...) \
147 _sdata_dbg(MAC80211_MHWMP_DEBUG, \
148 sdata, fmt, ##__VA_ARGS__)
149
150#define msync_dbg(sdata, fmt, ...) \
151 _sdata_dbg(MAC80211_MESH_SYNC_DEBUG, \
152 sdata, fmt, ##__VA_ARGS__)
153
154#define tdls_dbg(sdata, fmt, ...) \
155 _sdata_dbg(MAC80211_TDLS_DEBUG, \
156 sdata, fmt, ##__VA_ARGS__)
157
158#define sta_dbg(sdata, fmt, ...) \
159 _sdata_dbg(MAC80211_STA_DEBUG, \
160 sdata, fmt, ##__VA_ARGS__)
161
162#define mlme_dbg(sdata, fmt, ...) \
163 _sdata_dbg(MAC80211_MLME_DEBUG, \
164 sdata, fmt, ##__VA_ARGS__)
165
166#define mlme_dbg_ratelimited(sdata, fmt, ...) \
167 _sdata_dbg(MAC80211_MLME_DEBUG && net_ratelimit(), \
168 sdata, fmt, ##__VA_ARGS__)
169
170#endif /* __MAC80211_DEBUG_H */
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 7ed433c66d6..6d5aec9418e 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -468,48 +468,54 @@ IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC);
468IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC); 468IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC);
469IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC); 469IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC);
470IEEE80211_IF_FILE(dropped_frames_congestion, 470IEEE80211_IF_FILE(dropped_frames_congestion,
471 u.mesh.mshstats.dropped_frames_congestion, DEC); 471 u.mesh.mshstats.dropped_frames_congestion, DEC);
472IEEE80211_IF_FILE(dropped_frames_no_route, 472IEEE80211_IF_FILE(dropped_frames_no_route,
473 u.mesh.mshstats.dropped_frames_no_route, DEC); 473 u.mesh.mshstats.dropped_frames_no_route, DEC);
474IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC); 474IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC);
475 475
476/* Mesh parameters */ 476/* Mesh parameters */
477IEEE80211_IF_FILE(dot11MeshMaxRetries, 477IEEE80211_IF_FILE(dot11MeshMaxRetries,
478 u.mesh.mshcfg.dot11MeshMaxRetries, DEC); 478 u.mesh.mshcfg.dot11MeshMaxRetries, DEC);
479IEEE80211_IF_FILE(dot11MeshRetryTimeout, 479IEEE80211_IF_FILE(dot11MeshRetryTimeout,
480 u.mesh.mshcfg.dot11MeshRetryTimeout, DEC); 480 u.mesh.mshcfg.dot11MeshRetryTimeout, DEC);
481IEEE80211_IF_FILE(dot11MeshConfirmTimeout, 481IEEE80211_IF_FILE(dot11MeshConfirmTimeout,
482 u.mesh.mshcfg.dot11MeshConfirmTimeout, DEC); 482 u.mesh.mshcfg.dot11MeshConfirmTimeout, DEC);
483IEEE80211_IF_FILE(dot11MeshHoldingTimeout, 483IEEE80211_IF_FILE(dot11MeshHoldingTimeout,
484 u.mesh.mshcfg.dot11MeshHoldingTimeout, DEC); 484 u.mesh.mshcfg.dot11MeshHoldingTimeout, DEC);
485IEEE80211_IF_FILE(dot11MeshTTL, u.mesh.mshcfg.dot11MeshTTL, DEC); 485IEEE80211_IF_FILE(dot11MeshTTL, u.mesh.mshcfg.dot11MeshTTL, DEC);
486IEEE80211_IF_FILE(element_ttl, u.mesh.mshcfg.element_ttl, DEC); 486IEEE80211_IF_FILE(element_ttl, u.mesh.mshcfg.element_ttl, DEC);
487IEEE80211_IF_FILE(auto_open_plinks, u.mesh.mshcfg.auto_open_plinks, DEC); 487IEEE80211_IF_FILE(auto_open_plinks, u.mesh.mshcfg.auto_open_plinks, DEC);
488IEEE80211_IF_FILE(dot11MeshMaxPeerLinks, 488IEEE80211_IF_FILE(dot11MeshMaxPeerLinks,
489 u.mesh.mshcfg.dot11MeshMaxPeerLinks, DEC); 489 u.mesh.mshcfg.dot11MeshMaxPeerLinks, DEC);
490IEEE80211_IF_FILE(dot11MeshHWMPactivePathTimeout, 490IEEE80211_IF_FILE(dot11MeshHWMPactivePathTimeout,
491 u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC); 491 u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC);
492IEEE80211_IF_FILE(dot11MeshHWMPpreqMinInterval, 492IEEE80211_IF_FILE(dot11MeshHWMPpreqMinInterval,
493 u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC); 493 u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC);
494IEEE80211_IF_FILE(dot11MeshHWMPperrMinInterval, 494IEEE80211_IF_FILE(dot11MeshHWMPperrMinInterval,
495 u.mesh.mshcfg.dot11MeshHWMPperrMinInterval, DEC); 495 u.mesh.mshcfg.dot11MeshHWMPperrMinInterval, DEC);
496IEEE80211_IF_FILE(dot11MeshHWMPnetDiameterTraversalTime, 496IEEE80211_IF_FILE(dot11MeshHWMPnetDiameterTraversalTime,
497 u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC); 497 u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC);
498IEEE80211_IF_FILE(dot11MeshHWMPmaxPREQretries, 498IEEE80211_IF_FILE(dot11MeshHWMPmaxPREQretries,
499 u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries, DEC); 499 u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries, DEC);
500IEEE80211_IF_FILE(path_refresh_time, 500IEEE80211_IF_FILE(path_refresh_time,
501 u.mesh.mshcfg.path_refresh_time, DEC); 501 u.mesh.mshcfg.path_refresh_time, DEC);
502IEEE80211_IF_FILE(min_discovery_timeout, 502IEEE80211_IF_FILE(min_discovery_timeout,
503 u.mesh.mshcfg.min_discovery_timeout, DEC); 503 u.mesh.mshcfg.min_discovery_timeout, DEC);
504IEEE80211_IF_FILE(dot11MeshHWMPRootMode, 504IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
505 u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC); 505 u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC);
506IEEE80211_IF_FILE(dot11MeshGateAnnouncementProtocol, 506IEEE80211_IF_FILE(dot11MeshGateAnnouncementProtocol,
507 u.mesh.mshcfg.dot11MeshGateAnnouncementProtocol, DEC); 507 u.mesh.mshcfg.dot11MeshGateAnnouncementProtocol, DEC);
508IEEE80211_IF_FILE(dot11MeshHWMPRannInterval, 508IEEE80211_IF_FILE(dot11MeshHWMPRannInterval,
509 u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC); 509 u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC);
510IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC); 510IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC);
511IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC); 511IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC);
512IEEE80211_IF_FILE(ht_opmode, u.mesh.mshcfg.ht_opmode, DEC); 512IEEE80211_IF_FILE(ht_opmode, u.mesh.mshcfg.ht_opmode, DEC);
513IEEE80211_IF_FILE(dot11MeshHWMPactivePathToRootTimeout,
514 u.mesh.mshcfg.dot11MeshHWMPactivePathToRootTimeout, DEC);
515IEEE80211_IF_FILE(dot11MeshHWMProotInterval,
516 u.mesh.mshcfg.dot11MeshHWMProotInterval, DEC);
517IEEE80211_IF_FILE(dot11MeshHWMPconfirmationInterval,
518 u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval, DEC);
513#endif 519#endif
514 520
515#define DEBUGFS_ADD_MODE(name, mode) \ 521#define DEBUGFS_ADD_MODE(name, mode) \
@@ -607,9 +613,13 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
607 MESHPARAMS_ADD(min_discovery_timeout); 613 MESHPARAMS_ADD(min_discovery_timeout);
608 MESHPARAMS_ADD(dot11MeshHWMPRootMode); 614 MESHPARAMS_ADD(dot11MeshHWMPRootMode);
609 MESHPARAMS_ADD(dot11MeshHWMPRannInterval); 615 MESHPARAMS_ADD(dot11MeshHWMPRannInterval);
616 MESHPARAMS_ADD(dot11MeshForwarding);
610 MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol); 617 MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol);
611 MESHPARAMS_ADD(rssi_threshold); 618 MESHPARAMS_ADD(rssi_threshold);
612 MESHPARAMS_ADD(ht_opmode); 619 MESHPARAMS_ADD(ht_opmode);
620 MESHPARAMS_ADD(dot11MeshHWMPactivePathToRootTimeout);
621 MESHPARAMS_ADD(dot11MeshHWMProotInterval);
622 MESHPARAMS_ADD(dot11MeshHWMPconfirmationInterval);
613#undef MESHPARAMS_ADD 623#undef MESHPARAMS_ADD
614} 624}
615#endif 625#endif
@@ -685,6 +695,7 @@ void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
685 695
686 sprintf(buf, "netdev:%s", sdata->name); 696 sprintf(buf, "netdev:%s", sdata->name);
687 if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf)) 697 if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf))
688 printk(KERN_ERR "mac80211: debugfs: failed to rename debugfs " 698 sdata_err(sdata,
689 "dir to %s\n", buf); 699 "debugfs: failed to rename debugfs dir to %s\n",
700 buf);
690} 701}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 6d33a0c743a..44e8c124278 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -3,7 +3,7 @@
3 3
4#include <net/mac80211.h> 4#include <net/mac80211.h>
5#include "ieee80211_i.h" 5#include "ieee80211_i.h"
6#include "driver-trace.h" 6#include "trace.h"
7 7
8static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata) 8static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
9{ 9{
@@ -845,4 +845,19 @@ drv_allow_buffered_frames(struct ieee80211_local *local,
845 more_data); 845 more_data);
846 trace_drv_return_void(local); 846 trace_drv_return_void(local);
847} 847}
848
849static inline int drv_get_rssi(struct ieee80211_local *local,
850 struct ieee80211_sub_if_data *sdata,
851 struct ieee80211_sta *sta,
852 s8 *rssi_dbm)
853{
854 int ret;
855
856 might_sleep();
857
858 ret = local->ops->get_rssi(&local->hw, &sdata->vif, sta, rssi_dbm);
859 trace_drv_get_rssi(local, sta, *rssi_dbm, ret);
860
861 return ret;
862}
848#endif /* __MAC80211_DRIVER_OPS */ 863#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.c b/net/mac80211/driver-trace.c
deleted file mode 100644
index 8ed8711b1a6..00000000000
--- a/net/mac80211/driver-trace.c
+++ /dev/null
@@ -1,9 +0,0 @@
1/* bug in tracepoint.h, it should include this */
2#include <linux/module.h>
3
4/* sparse isn't too happy with all macros... */
5#ifndef __CHECKER__
6#include "driver-ops.h"
7#define CREATE_TRACE_POINTS
8#include "driver-trace.h"
9#endif
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 6f8615c54b2..4b4538d6392 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -305,12 +305,10 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
305 tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12; 305 tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12;
306 initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11; 306 initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11;
307 307
308#ifdef CONFIG_MAC80211_HT_DEBUG 308 ht_dbg_ratelimited(sdata, "delba from %pM (%s) tid %d reason code %d\n",
309 net_dbg_ratelimited("delba from %pM (%s) tid %d reason code %d\n", 309 mgmt->sa, initiator ? "initiator" : "recipient",
310 mgmt->sa, initiator ? "initiator" : "recipient", 310 tid,
311 tid, 311 le16_to_cpu(mgmt->u.action.u.delba.reason_code));
312 le16_to_cpu(mgmt->u.action.u.delba.reason_code));
313#endif /* CONFIG_MAC80211_HT_DEBUG */
314 312
315 if (initiator == WLAN_BACK_INITIATOR) 313 if (initiator == WLAN_BACK_INITIATOR)
316 __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0, 314 __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 33d9d0c3e3d..5746d62faba 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -82,8 +82,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
82 82
83 local->oper_channel = chan; 83 local->oper_channel = chan;
84 channel_type = ifibss->channel_type; 84 channel_type = ifibss->channel_type;
85 if (channel_type > NL80211_CHAN_HT20 && 85 if (!cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type))
86 !cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type))
87 channel_type = NL80211_CHAN_HT20; 86 channel_type = NL80211_CHAN_HT20;
88 if (!ieee80211_set_channel_type(local, sdata, channel_type)) { 87 if (!ieee80211_set_channel_type(local, sdata, channel_type)) {
89 /* can only fail due to HT40+/- mismatch */ 88 /* can only fail due to HT40+/- mismatch */
@@ -262,11 +261,7 @@ static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
262 261
263 memcpy(addr, sta->sta.addr, ETH_ALEN); 262 memcpy(addr, sta->sta.addr, ETH_ALEN);
264 263
265#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 264 ibss_dbg(sdata, "Adding new IBSS station %pM\n", addr);
266 wiphy_debug(sdata->local->hw.wiphy,
267 "Adding new IBSS station %pM (dev=%s)\n",
268 addr, sdata->name);
269#endif
270 265
271 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); 266 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
272 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC); 267 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
@@ -280,12 +275,10 @@ static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
280 /* If it fails, maybe we raced another insertion? */ 275 /* If it fails, maybe we raced another insertion? */
281 if (sta_info_insert_rcu(sta)) 276 if (sta_info_insert_rcu(sta))
282 return sta_info_get(sdata, addr); 277 return sta_info_get(sdata, addr);
283 if (auth) { 278 if (auth && !sdata->u.ibss.auth_frame_registrations) {
284#ifdef CONFIG_MAC80211_IBSS_DEBUG 279 ibss_dbg(sdata,
285 printk(KERN_DEBUG "TX Auth SA=%pM DA=%pM BSSID=%pM" 280 "TX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=1)\n",
286 "(auth_transaction=1)\n", sdata->vif.addr, 281 sdata->vif.addr, sdata->u.ibss.bssid, addr);
287 sdata->u.ibss.bssid, addr);
288#endif
289 ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, NULL, 0, 282 ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, NULL, 0,
290 addr, sdata->u.ibss.bssid, NULL, 0, 0); 283 addr, sdata->u.ibss.bssid, NULL, 0, 0);
291 } 284 }
@@ -308,7 +301,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
308 * allow new one to be added. 301 * allow new one to be added.
309 */ 302 */
310 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { 303 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
311 net_dbg_ratelimited("%s: No room for a new IBSS STA entry %pM\n", 304 net_info_ratelimited("%s: No room for a new IBSS STA entry %pM\n",
312 sdata->name, addr); 305 sdata->name, addr);
313 rcu_read_lock(); 306 rcu_read_lock();
314 return NULL; 307 return NULL;
@@ -355,11 +348,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
355 348
356 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) 349 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
357 return; 350 return;
358#ifdef CONFIG_MAC80211_IBSS_DEBUG 351 ibss_dbg(sdata,
359 printk(KERN_DEBUG "%s: RX Auth SA=%pM DA=%pM BSSID=%pM." 352 "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
360 "(auth_transaction=%d)\n", 353 mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
361 sdata->name, mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
362#endif
363 sta_info_destroy_addr(sdata, mgmt->sa); 354 sta_info_destroy_addr(sdata, mgmt->sa);
364 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 0, false); 355 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 0, false);
365 rcu_read_unlock(); 356 rcu_read_unlock();
@@ -422,15 +413,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
422 ieee80211_mandatory_rates(local, band); 413 ieee80211_mandatory_rates(local, band);
423 414
424 if (sta->sta.supp_rates[band] != prev_rates) { 415 if (sta->sta.supp_rates[band] != prev_rates) {
425#ifdef CONFIG_MAC80211_IBSS_DEBUG 416 ibss_dbg(sdata,
426 printk(KERN_DEBUG 417 "updated supp_rates set for %pM based on beacon/probe_resp (0x%x -> 0x%x)\n",
427 "%s: updated supp_rates set " 418 sta->sta.addr, prev_rates,
428 "for %pM based on beacon" 419 sta->sta.supp_rates[band]);
429 "/probe_resp (0x%x -> 0x%x)\n",
430 sdata->name, sta->sta.addr,
431 prev_rates,
432 sta->sta.supp_rates[band]);
433#endif
434 rates_updated = true; 420 rates_updated = true;
435 } 421 }
436 } else { 422 } else {
@@ -545,22 +531,18 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
545 rx_timestamp = drv_get_tsf(local, sdata); 531 rx_timestamp = drv_get_tsf(local, sdata);
546 } 532 }
547 533
548#ifdef CONFIG_MAC80211_IBSS_DEBUG 534 ibss_dbg(sdata,
549 printk(KERN_DEBUG "RX beacon SA=%pM BSSID=" 535 "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
550 "%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n", 536 mgmt->sa, mgmt->bssid,
551 mgmt->sa, mgmt->bssid, 537 (unsigned long long)rx_timestamp,
552 (unsigned long long)rx_timestamp, 538 (unsigned long long)beacon_timestamp,
553 (unsigned long long)beacon_timestamp, 539 (unsigned long long)(rx_timestamp - beacon_timestamp),
554 (unsigned long long)(rx_timestamp - beacon_timestamp), 540 jiffies);
555 jiffies);
556#endif
557 541
558 if (beacon_timestamp > rx_timestamp) { 542 if (beacon_timestamp > rx_timestamp) {
559#ifdef CONFIG_MAC80211_IBSS_DEBUG 543 ibss_dbg(sdata,
560 printk(KERN_DEBUG "%s: beacon TSF higher than " 544 "beacon TSF higher than local TSF - IBSS merge with BSSID %pM\n",
561 "local TSF - IBSS merge with BSSID %pM\n", 545 mgmt->bssid);
562 sdata->name, mgmt->bssid);
563#endif
564 ieee80211_sta_join_ibss(sdata, bss); 546 ieee80211_sta_join_ibss(sdata, bss);
565 supp_rates = ieee80211_sta_get_rates(local, elems, band, NULL); 547 supp_rates = ieee80211_sta_get_rates(local, elems, band, NULL);
566 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 548 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
@@ -586,7 +568,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
586 * allow new one to be added. 568 * allow new one to be added.
587 */ 569 */
588 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { 570 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
589 net_dbg_ratelimited("%s: No room for a new IBSS STA entry %pM\n", 571 net_info_ratelimited("%s: No room for a new IBSS STA entry %pM\n",
590 sdata->name, addr); 572 sdata->name, addr);
591 return; 573 return;
592 } 574 }
@@ -662,8 +644,8 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
662 if (ifibss->fixed_channel) 644 if (ifibss->fixed_channel)
663 return; 645 return;
664 646
665 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other " 647 sdata_info(sdata,
666 "IBSS networks with same SSID (merge)\n", sdata->name); 648 "No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n");
667 649
668 ieee80211_request_internal_scan(sdata, 650 ieee80211_request_internal_scan(sdata,
669 ifibss->ssid, ifibss->ssid_len, NULL); 651 ifibss->ssid, ifibss->ssid_len, NULL);
@@ -691,8 +673,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
691 bssid[0] |= 0x02; 673 bssid[0] |= 0x02;
692 } 674 }
693 675
694 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n", 676 sdata_info(sdata, "Creating new IBSS network, BSSID %pM\n", bssid);
695 sdata->name, bssid);
696 677
697 capability = WLAN_CAPABILITY_IBSS; 678 capability = WLAN_CAPABILITY_IBSS;
698 679
@@ -723,10 +704,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
723 lockdep_assert_held(&ifibss->mtx); 704 lockdep_assert_held(&ifibss->mtx);
724 705
725 active_ibss = ieee80211_sta_active_ibss(sdata); 706 active_ibss = ieee80211_sta_active_ibss(sdata);
726#ifdef CONFIG_MAC80211_IBSS_DEBUG 707 ibss_dbg(sdata, "sta_find_ibss (active_ibss=%d)\n", active_ibss);
727 printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n",
728 sdata->name, active_ibss);
729#endif /* CONFIG_MAC80211_IBSS_DEBUG */
730 708
731 if (active_ibss) 709 if (active_ibss)
732 return; 710 return;
@@ -749,29 +727,24 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
749 struct ieee80211_bss *bss; 727 struct ieee80211_bss *bss;
750 728
751 bss = (void *)cbss->priv; 729 bss = (void *)cbss->priv;
752#ifdef CONFIG_MAC80211_IBSS_DEBUG 730 ibss_dbg(sdata,
753 printk(KERN_DEBUG " sta_find_ibss: selected %pM current " 731 "sta_find_ibss: selected %pM current %pM\n",
754 "%pM\n", cbss->bssid, ifibss->bssid); 732 cbss->bssid, ifibss->bssid);
755#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 733 sdata_info(sdata,
756 734 "Selected IBSS BSSID %pM based on configured SSID\n",
757 printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM" 735 cbss->bssid);
758 " based on configured SSID\n",
759 sdata->name, cbss->bssid);
760 736
761 ieee80211_sta_join_ibss(sdata, bss); 737 ieee80211_sta_join_ibss(sdata, bss);
762 ieee80211_rx_bss_put(local, bss); 738 ieee80211_rx_bss_put(local, bss);
763 return; 739 return;
764 } 740 }
765 741
766#ifdef CONFIG_MAC80211_IBSS_DEBUG 742 ibss_dbg(sdata, "sta_find_ibss: did not try to join ibss\n");
767 printk(KERN_DEBUG " did not try to join ibss\n");
768#endif /* CONFIG_MAC80211_IBSS_DEBUG */
769 743
770 /* Selected IBSS not found in current scan results - try to scan */ 744 /* Selected IBSS not found in current scan results - try to scan */
771 if (time_after(jiffies, ifibss->last_scan_completed + 745 if (time_after(jiffies, ifibss->last_scan_completed +
772 IEEE80211_SCAN_INTERVAL)) { 746 IEEE80211_SCAN_INTERVAL)) {
773 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to " 747 sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
774 "join\n", sdata->name);
775 748
776 ieee80211_request_internal_scan(sdata, 749 ieee80211_request_internal_scan(sdata,
777 ifibss->ssid, ifibss->ssid_len, 750 ifibss->ssid, ifibss->ssid_len,
@@ -785,9 +758,8 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
785 ieee80211_sta_create_ibss(sdata); 758 ieee80211_sta_create_ibss(sdata);
786 return; 759 return;
787 } 760 }
788 printk(KERN_DEBUG "%s: IBSS not allowed on" 761 sdata_info(sdata, "IBSS not allowed on %d MHz\n",
789 " %d MHz\n", sdata->name, 762 local->hw.conf.channel->center_freq);
790 local->hw.conf.channel->center_freq);
791 763
792 /* No IBSS found - decrease scan interval and continue 764 /* No IBSS found - decrease scan interval and continue
793 * scanning. */ 765 * scanning. */
@@ -822,12 +794,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
822 794
823 tx_last_beacon = drv_tx_last_beacon(local); 795 tx_last_beacon = drv_tx_last_beacon(local);
824 796
825#ifdef CONFIG_MAC80211_IBSS_DEBUG 797 ibss_dbg(sdata,
826 printk(KERN_DEBUG "%s: RX ProbeReq SA=%pM DA=%pM BSSID=%pM" 798 "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n",
827 " (tx_last_beacon=%d)\n", 799 mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon);
828 sdata->name, mgmt->sa, mgmt->da,
829 mgmt->bssid, tx_last_beacon);
830#endif /* CONFIG_MAC80211_IBSS_DEBUG */
831 800
832 if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da)) 801 if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
833 return; 802 return;
@@ -840,11 +809,8 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
840 pos = mgmt->u.probe_req.variable; 809 pos = mgmt->u.probe_req.variable;
841 if (pos[0] != WLAN_EID_SSID || 810 if (pos[0] != WLAN_EID_SSID ||
842 pos + 2 + pos[1] > end) { 811 pos + 2 + pos[1] > end) {
843#ifdef CONFIG_MAC80211_IBSS_DEBUG 812 ibss_dbg(sdata, "Invalid SSID IE in ProbeReq from %pM\n",
844 printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq " 813 mgmt->sa);
845 "from %pM\n",
846 sdata->name, mgmt->sa);
847#endif
848 return; 814 return;
849 } 815 }
850 if (pos[1] != 0 && 816 if (pos[1] != 0 &&
@@ -861,10 +827,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
861 827
862 resp = (struct ieee80211_mgmt *) skb->data; 828 resp = (struct ieee80211_mgmt *) skb->data;
863 memcpy(resp->da, mgmt->sa, ETH_ALEN); 829 memcpy(resp->da, mgmt->sa, ETH_ALEN);
864#ifdef CONFIG_MAC80211_IBSS_DEBUG 830 ibss_dbg(sdata, "Sending ProbeResp to %pM\n", resp->da);
865 printk(KERN_DEBUG "%s: Sending ProbeResp to %pM\n",
866 sdata->name, resp->da);
867#endif /* CONFIG_MAC80211_IBSS_DEBUG */
868 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 831 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
869 ieee80211_tx_skb(sdata, skb); 832 ieee80211_tx_skb(sdata, skb);
870} 833}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 3f3cd50fff1..f834a005e1c 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -30,6 +30,7 @@
30#include <net/mac80211.h> 30#include <net/mac80211.h>
31#include "key.h" 31#include "key.h"
32#include "sta_info.h" 32#include "sta_info.h"
33#include "debug.h"
33 34
34struct ieee80211_local; 35struct ieee80211_local;
35 36
@@ -55,11 +56,14 @@ struct ieee80211_local;
55#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) 56#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024))
56#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) 57#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x))
57 58
59/*
60 * Some APs experience problems when working with U-APSD. Decrease the
61 * probability of that happening by using legacy mode for all ACs but VO.
62 * The AP that caused us trouble was a Cisco 4410N. It ignores our
63 * setting, and always treats non-VO ACs as legacy.
64 */
58#define IEEE80211_DEFAULT_UAPSD_QUEUES \ 65#define IEEE80211_DEFAULT_UAPSD_QUEUES \
59 (IEEE80211_WMM_IE_STA_QOSINFO_AC_BK | \ 66 IEEE80211_WMM_IE_STA_QOSINFO_AC_VO
60 IEEE80211_WMM_IE_STA_QOSINFO_AC_BE | \
61 IEEE80211_WMM_IE_STA_QOSINFO_AC_VI | \
62 IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
63 67
64#define IEEE80211_DEFAULT_MAX_SP_LEN \ 68#define IEEE80211_DEFAULT_MAX_SP_LEN \
65 IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL 69 IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
@@ -317,55 +321,30 @@ struct mesh_preq_queue {
317 u8 flags; 321 u8 flags;
318}; 322};
319 323
320enum ieee80211_work_type { 324#if HZ/100 == 0
321 IEEE80211_WORK_ABORT, 325#define IEEE80211_ROC_MIN_LEFT 1
322 IEEE80211_WORK_REMAIN_ON_CHANNEL, 326#else
323 IEEE80211_WORK_OFFCHANNEL_TX, 327#define IEEE80211_ROC_MIN_LEFT (HZ/100)
324}; 328#endif
325
326/**
327 * enum work_done_result - indicates what to do after work was done
328 *
329 * @WORK_DONE_DESTROY: This work item is no longer needed, destroy.
330 * @WORK_DONE_REQUEUE: This work item was reset to be reused, and
331 * should be requeued.
332 */
333enum work_done_result {
334 WORK_DONE_DESTROY,
335 WORK_DONE_REQUEUE,
336};
337 329
338struct ieee80211_work { 330struct ieee80211_roc_work {
339 struct list_head list; 331 struct list_head list;
332 struct list_head dependents;
340 333
341 struct rcu_head rcu_head; 334 struct delayed_work work;
342 335
343 struct ieee80211_sub_if_data *sdata; 336 struct ieee80211_sub_if_data *sdata;
344 337
345 enum work_done_result (*done)(struct ieee80211_work *wk,
346 struct sk_buff *skb);
347
348 struct ieee80211_channel *chan; 338 struct ieee80211_channel *chan;
349 enum nl80211_channel_type chan_type; 339 enum nl80211_channel_type chan_type;
350 340
351 unsigned long timeout; 341 bool started, abort, hw_begun, notified;
352 enum ieee80211_work_type type;
353 342
354 bool started; 343 unsigned long hw_start_time;
355 344
356 union { 345 u32 duration, req_duration;
357 struct { 346 struct sk_buff *frame;
358 u32 duration; 347 u64 mgmt_tx_cookie;
359 } remain;
360 struct {
361 struct sk_buff *frame;
362 u32 wait;
363 bool status;
364 } offchan_tx;
365 };
366
367 size_t data_len;
368 u8 data[];
369}; 348};
370 349
371/* flags used in struct ieee80211_if_managed.flags */ 350/* flags used in struct ieee80211_if_managed.flags */
@@ -399,7 +378,6 @@ struct ieee80211_mgd_auth_data {
399struct ieee80211_mgd_assoc_data { 378struct ieee80211_mgd_assoc_data {
400 struct cfg80211_bss *bss; 379 struct cfg80211_bss *bss;
401 const u8 *supp_rates; 380 const u8 *supp_rates;
402 const u8 *ht_operation_ie;
403 381
404 unsigned long timeout; 382 unsigned long timeout;
405 int tries; 383 int tries;
@@ -414,6 +392,8 @@ struct ieee80211_mgd_assoc_data {
414 bool sent_assoc; 392 bool sent_assoc;
415 bool synced; 393 bool synced;
416 394
395 u8 ap_ht_param;
396
417 size_t ie_len; 397 size_t ie_len;
418 u8 ie[]; 398 u8 ie[];
419}; 399};
@@ -532,6 +512,7 @@ struct ieee80211_if_ibss {
532 bool privacy; 512 bool privacy;
533 513
534 bool control_port; 514 bool control_port;
515 unsigned int auth_frame_registrations;
535 516
536 u8 bssid[ETH_ALEN] __aligned(2); 517 u8 bssid[ETH_ALEN] __aligned(2);
537 u8 ssid[IEEE80211_MAX_SSID_LEN]; 518 u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -701,6 +682,9 @@ struct ieee80211_sub_if_data {
701 /* TID bitmap for NoAck policy */ 682 /* TID bitmap for NoAck policy */
702 u16 noack_map; 683 u16 noack_map;
703 684
685 /* bit field of ACM bits (BIT(802.1D tag)) */
686 u8 wmm_acm;
687
704 struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS]; 688 struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
705 struct ieee80211_key __rcu *default_unicast_key; 689 struct ieee80211_key __rcu *default_unicast_key;
706 struct ieee80211_key __rcu *default_multicast_key; 690 struct ieee80211_key __rcu *default_multicast_key;
@@ -847,13 +831,6 @@ struct ieee80211_local {
847 const struct ieee80211_ops *ops; 831 const struct ieee80211_ops *ops;
848 832
849 /* 833 /*
850 * work stuff, potentially off-channel (in the future)
851 */
852 struct list_head work_list;
853 struct timer_list work_timer;
854 struct work_struct work_work;
855
856 /*
857 * private workqueue to mac80211. mac80211 makes this accessible 834 * private workqueue to mac80211. mac80211 makes this accessible
858 * via ieee80211_queue_work() 835 * via ieee80211_queue_work()
859 */ 836 */
@@ -912,6 +889,9 @@ struct ieee80211_local {
912 /* device is started */ 889 /* device is started */
913 bool started; 890 bool started;
914 891
892 /* device is during a HW reconfig */
893 bool in_reconfig;
894
915 /* wowlan is enabled -- don't reconfig on resume */ 895 /* wowlan is enabled -- don't reconfig on resume */
916 bool wowlan; 896 bool wowlan;
917 897
@@ -1050,7 +1030,6 @@ struct ieee80211_local {
1050 int total_ps_buffered; /* total number of all buffered unicast and 1030 int total_ps_buffered; /* total number of all buffered unicast and
1051 * multicast packets for power saving stations 1031 * multicast packets for power saving stations
1052 */ 1032 */
1053 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
1054 1033
1055 bool pspolling; 1034 bool pspolling;
1056 bool offchannel_ps_enabled; 1035 bool offchannel_ps_enabled;
@@ -1087,14 +1066,12 @@ struct ieee80211_local {
1087 } debugfs; 1066 } debugfs;
1088#endif 1067#endif
1089 1068
1090 struct ieee80211_channel *hw_roc_channel; 1069 /*
1091 struct net_device *hw_roc_dev; 1070 * Remain-on-channel support
1092 struct sk_buff *hw_roc_skb, *hw_roc_skb_for_status; 1071 */
1072 struct list_head roc_list;
1093 struct work_struct hw_roc_start, hw_roc_done; 1073 struct work_struct hw_roc_start, hw_roc_done;
1094 enum nl80211_channel_type hw_roc_channel_type; 1074 unsigned long hw_roc_start_time;
1095 unsigned int hw_roc_duration;
1096 u32 hw_roc_cookie;
1097 bool hw_roc_for_tx;
1098 1075
1099 struct idr ack_status_frames; 1076 struct idr ack_status_frames;
1100 spinlock_t ack_status_lock; 1077 spinlock_t ack_status_lock;
@@ -1290,7 +1267,12 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
1290 bool offchannel_ps_enable); 1267 bool offchannel_ps_enable);
1291void ieee80211_offchannel_return(struct ieee80211_local *local, 1268void ieee80211_offchannel_return(struct ieee80211_local *local,
1292 bool offchannel_ps_disable); 1269 bool offchannel_ps_disable);
1293void ieee80211_hw_roc_setup(struct ieee80211_local *local); 1270void ieee80211_roc_setup(struct ieee80211_local *local);
1271void ieee80211_start_next_roc(struct ieee80211_local *local);
1272void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata);
1273void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc);
1274void ieee80211_sw_roc_work(struct work_struct *work);
1275void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc);
1294 1276
1295/* interface handling */ 1277/* interface handling */
1296int ieee80211_iface_init(void); 1278int ieee80211_iface_init(void);
@@ -1500,18 +1482,6 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1500 enum nl80211_channel_type channel_type, 1482 enum nl80211_channel_type channel_type,
1501 u16 prot_mode); 1483 u16 prot_mode);
1502 1484
1503/* internal work items */
1504void ieee80211_work_init(struct ieee80211_local *local);
1505void ieee80211_add_work(struct ieee80211_work *wk);
1506void free_work(struct ieee80211_work *wk);
1507void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata);
1508int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
1509 struct ieee80211_channel *chan,
1510 enum nl80211_channel_type channel_type,
1511 unsigned int duration, u64 *cookie);
1512int ieee80211_wk_cancel_remain_on_channel(
1513 struct ieee80211_sub_if_data *sdata, u64 cookie);
1514
1515/* channel management */ 1485/* channel management */
1516enum ieee80211_chan_mode { 1486enum ieee80211_chan_mode {
1517 CHAN_MODE_UNDEFINED, 1487 CHAN_MODE_UNDEFINED,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 8664111d056..58c2ab3d483 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -57,9 +57,6 @@ static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
57 return -EINVAL; 57 return -EINVAL;
58 } 58 }
59 59
60#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
61 printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu);
62#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
63 dev->mtu = new_mtu; 60 dev->mtu = new_mtu;
64 return 0; 61 return 0;
65} 62}
@@ -100,15 +97,12 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
100{ 97{
101 struct ieee80211_local *local = sdata->local; 98 struct ieee80211_local *local = sdata->local;
102 struct ieee80211_sub_if_data *nsdata; 99 struct ieee80211_sub_if_data *nsdata;
103 struct net_device *dev = sdata->dev;
104 100
105 ASSERT_RTNL(); 101 ASSERT_RTNL();
106 102
107 /* we hold the RTNL here so can safely walk the list */ 103 /* we hold the RTNL here so can safely walk the list */
108 list_for_each_entry(nsdata, &local->interfaces, list) { 104 list_for_each_entry(nsdata, &local->interfaces, list) {
109 struct net_device *ndev = nsdata->dev; 105 if (nsdata != sdata && ieee80211_sdata_running(nsdata)) {
110
111 if (ndev != dev && ieee80211_sdata_running(nsdata)) {
112 /* 106 /*
113 * Allow only a single IBSS interface to be up at any 107 * Allow only a single IBSS interface to be up at any
114 * time. This is restricted because beacon distribution 108 * time. This is restricted because beacon distribution
@@ -127,7 +121,8 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
127 * The remaining checks are only performed for interfaces 121 * The remaining checks are only performed for interfaces
128 * with the same MAC address. 122 * with the same MAC address.
129 */ 123 */
130 if (!ether_addr_equal(dev->dev_addr, ndev->dev_addr)) 124 if (!ether_addr_equal(sdata->vif.addr,
125 nsdata->vif.addr))
131 continue; 126 continue;
132 127
133 /* 128 /*
@@ -528,10 +523,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
528 */ 523 */
529 netif_tx_stop_all_queues(sdata->dev); 524 netif_tx_stop_all_queues(sdata->dev);
530 525
531 /* 526 ieee80211_roc_purge(sdata);
532 * Purge work for this interface.
533 */
534 ieee80211_work_purge(sdata);
535 527
536 /* 528 /*
537 * Remove all stations associated with this interface. 529 * Remove all stations associated with this interface.
@@ -637,18 +629,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
637 ieee80211_configure_filter(local); 629 ieee80211_configure_filter(local);
638 break; 630 break;
639 default: 631 default:
640 mutex_lock(&local->mtx);
641 if (local->hw_roc_dev == sdata->dev &&
642 local->hw_roc_channel) {
643 /* ignore return value since this is racy */
644 drv_cancel_remain_on_channel(local);
645 ieee80211_queue_work(&local->hw, &local->hw_roc_done);
646 }
647 mutex_unlock(&local->mtx);
648
649 flush_work(&local->hw_roc_start);
650 flush_work(&local->hw_roc_done);
651
652 flush_work(&sdata->work); 632 flush_work(&sdata->work);
653 /* 633 /*
654 * When we get here, the interface is marked down. 634 * When we get here, the interface is marked down.
@@ -823,7 +803,7 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
823 803
824 hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len)); 804 hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len));
825 805
826 return ieee80211_select_queue_80211(local, skb, hdr); 806 return ieee80211_select_queue_80211(sdata, skb, hdr);
827} 807}
828 808
829static const struct net_device_ops ieee80211_monitorif_ops = { 809static const struct net_device_ops ieee80211_monitorif_ops = {
@@ -1238,7 +1218,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
1238 1218
1239 if (__ffs64(mask) + hweight64(mask) != fls64(mask)) { 1219 if (__ffs64(mask) + hweight64(mask) != fls64(mask)) {
1240 /* not a contiguous mask ... not handled now! */ 1220 /* not a contiguous mask ... not handled now! */
1241 printk(KERN_DEBUG "not contiguous\n"); 1221 pr_info("not contiguous\n");
1242 break; 1222 break;
1243 } 1223 }
1244 1224
@@ -1364,6 +1344,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1364 sdata->u.mgd.use_4addr = params->use_4addr; 1344 sdata->u.mgd.use_4addr = params->use_4addr;
1365 } 1345 }
1366 1346
1347 ndev->features |= local->hw.netdev_features;
1348
1367 ret = register_netdevice(ndev); 1349 ret = register_netdevice(ndev);
1368 if (ret) 1350 if (ret)
1369 goto fail; 1351 goto fail;
@@ -1427,10 +1409,6 @@ static u32 ieee80211_idle_off(struct ieee80211_local *local,
1427 if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) 1409 if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE))
1428 return 0; 1410 return 0;
1429 1411
1430#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1431 wiphy_debug(local->hw.wiphy, "device no longer idle - %s\n", reason);
1432#endif
1433
1434 local->hw.conf.flags &= ~IEEE80211_CONF_IDLE; 1412 local->hw.conf.flags &= ~IEEE80211_CONF_IDLE;
1435 return IEEE80211_CONF_CHANGE_IDLE; 1413 return IEEE80211_CONF_CHANGE_IDLE;
1436} 1414}
@@ -1440,10 +1418,6 @@ static u32 ieee80211_idle_on(struct ieee80211_local *local)
1440 if (local->hw.conf.flags & IEEE80211_CONF_IDLE) 1418 if (local->hw.conf.flags & IEEE80211_CONF_IDLE)
1441 return 0; 1419 return 0;
1442 1420
1443#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1444 wiphy_debug(local->hw.wiphy, "device now idle\n");
1445#endif
1446
1447 drv_flush(local, false); 1421 drv_flush(local, false);
1448 1422
1449 local->hw.conf.flags |= IEEE80211_CONF_IDLE; 1423 local->hw.conf.flags |= IEEE80211_CONF_IDLE;
@@ -1454,9 +1428,9 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
1454{ 1428{
1455 struct ieee80211_sub_if_data *sdata; 1429 struct ieee80211_sub_if_data *sdata;
1456 int count = 0; 1430 int count = 0;
1457 bool working = false, scanning = false, hw_roc = false; 1431 bool working = false, scanning = false;
1458 struct ieee80211_work *wk;
1459 unsigned int led_trig_start = 0, led_trig_stop = 0; 1432 unsigned int led_trig_start = 0, led_trig_stop = 0;
1433 struct ieee80211_roc_work *roc;
1460 1434
1461#ifdef CONFIG_PROVE_LOCKING 1435#ifdef CONFIG_PROVE_LOCKING
1462 WARN_ON(debug_locks && !lockdep_rtnl_is_held() && 1436 WARN_ON(debug_locks && !lockdep_rtnl_is_held() &&
@@ -1491,9 +1465,11 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
1491 count++; 1465 count++;
1492 } 1466 }
1493 1467
1494 list_for_each_entry(wk, &local->work_list, list) { 1468 if (!local->ops->remain_on_channel) {
1495 working = true; 1469 list_for_each_entry(roc, &local->roc_list, list) {
1496 wk->sdata->vif.bss_conf.idle = false; 1470 working = true;
1471 roc->sdata->vif.bss_conf.idle = false;
1472 }
1497 } 1473 }
1498 1474
1499 if (local->scan_sdata && 1475 if (local->scan_sdata &&
@@ -1502,9 +1478,6 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
1502 local->scan_sdata->vif.bss_conf.idle = false; 1478 local->scan_sdata->vif.bss_conf.idle = false;
1503 } 1479 }
1504 1480
1505 if (local->hw_roc_channel)
1506 hw_roc = true;
1507
1508 list_for_each_entry(sdata, &local->interfaces, list) { 1481 list_for_each_entry(sdata, &local->interfaces, list) {
1509 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 1482 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
1510 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1483 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -1516,7 +1489,7 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
1516 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE); 1489 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
1517 } 1490 }
1518 1491
1519 if (working || scanning || hw_roc) 1492 if (working || scanning)
1520 led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_WORK; 1493 led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_WORK;
1521 else 1494 else
1522 led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_WORK; 1495 led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_WORK;
@@ -1528,8 +1501,6 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
1528 1501
1529 ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop); 1502 ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop);
1530 1503
1531 if (hw_roc)
1532 return ieee80211_idle_off(local, "hw remain-on-channel");
1533 if (working) 1504 if (working)
1534 return ieee80211_idle_off(local, "working"); 1505 return ieee80211_idle_off(local, "working");
1535 if (scanning) 1506 if (scanning)
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 5bb600d93d7..b3b7e526e24 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -139,7 +139,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
139 } 139 }
140 140
141 if (ret != -ENOSPC && ret != -EOPNOTSUPP) 141 if (ret != -ENOSPC && ret != -EOPNOTSUPP)
142 wiphy_err(key->local->hw.wiphy, 142 sdata_err(sdata,
143 "failed to set key (%d, %pM) to hardware (%d)\n", 143 "failed to set key (%d, %pM) to hardware (%d)\n",
144 key->conf.keyidx, 144 key->conf.keyidx,
145 sta ? sta->sta.addr : bcast_addr, ret); 145 sta ? sta->sta.addr : bcast_addr, ret);
@@ -186,7 +186,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
186 sta ? &sta->sta : NULL, &key->conf); 186 sta ? &sta->sta : NULL, &key->conf);
187 187
188 if (ret) 188 if (ret)
189 wiphy_err(key->local->hw.wiphy, 189 sdata_err(sdata,
190 "failed to remove key (%d, %pM) from hardware (%d)\n", 190 "failed to remove key (%d, %pM) from hardware (%d)\n",
191 key->conf.keyidx, 191 key->conf.keyidx,
192 sta ? sta->sta.addr : bcast_addr, ret); 192 sta ? sta->sta.addr : bcast_addr, ret);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index f5548e95325..0b040fb7367 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -345,6 +345,13 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw)
345 ieee80211_stop_queues_by_reason(hw, 345 ieee80211_stop_queues_by_reason(hw,
346 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 346 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
347 347
348 /*
349 * Stop all Rx during the reconfig. We don't want state changes
350 * or driver callbacks while this is in progress.
351 */
352 local->in_reconfig = true;
353 barrier();
354
348 schedule_work(&local->restart_work); 355 schedule_work(&local->restart_work);
349} 356}
350EXPORT_SYMBOL(ieee80211_restart_hw); 357EXPORT_SYMBOL(ieee80211_restart_hw);
@@ -455,7 +462,9 @@ static const struct ieee80211_txrx_stypes
455ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = { 462ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
456 [NL80211_IFTYPE_ADHOC] = { 463 [NL80211_IFTYPE_ADHOC] = {
457 .tx = 0xffff, 464 .tx = 0xffff,
458 .rx = BIT(IEEE80211_STYPE_ACTION >> 4), 465 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
466 BIT(IEEE80211_STYPE_AUTH >> 4) |
467 BIT(IEEE80211_STYPE_DEAUTH >> 4),
459 }, 468 },
460 [NL80211_IFTYPE_STATION] = { 469 [NL80211_IFTYPE_STATION] = {
461 .tx = 0xffff, 470 .tx = 0xffff,
@@ -625,8 +634,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
625 634
626 INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); 635 INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
627 636
628 ieee80211_work_init(local);
629
630 INIT_WORK(&local->restart_work, ieee80211_restart_work); 637 INIT_WORK(&local->restart_work, ieee80211_restart_work);
631 638
632 INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter); 639 INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
@@ -669,7 +676,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
669 676
670 ieee80211_led_names(local); 677 ieee80211_led_names(local);
671 678
672 ieee80211_hw_roc_setup(local); 679 ieee80211_roc_setup(local);
673 680
674 return &local->hw; 681 return &local->hw;
675} 682}
@@ -682,6 +689,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
682 enum ieee80211_band band; 689 enum ieee80211_band band;
683 int channels, max_bitrates; 690 int channels, max_bitrates;
684 bool supp_ht; 691 bool supp_ht;
692 netdev_features_t feature_whitelist;
685 static const u32 cipher_suites[] = { 693 static const u32 cipher_suites[] = {
686 /* keep WEP first, it may be removed below */ 694 /* keep WEP first, it may be removed below */
687 WLAN_CIPHER_SUITE_WEP40, 695 WLAN_CIPHER_SUITE_WEP40,
@@ -708,6 +716,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
708 if ((hw->flags & IEEE80211_HW_SCAN_WHILE_IDLE) && !local->ops->hw_scan) 716 if ((hw->flags & IEEE80211_HW_SCAN_WHILE_IDLE) && !local->ops->hw_scan)
709 return -EINVAL; 717 return -EINVAL;
710 718
719 /* Only HW csum features are currently compatible with mac80211 */
720 feature_whitelist = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
721 NETIF_F_HW_CSUM;
722 if (WARN_ON(hw->netdev_features & ~feature_whitelist))
723 return -EINVAL;
724
711 if (hw->max_report_rates == 0) 725 if (hw->max_report_rates == 0)
712 hw->max_report_rates = hw->max_rates; 726 hw->max_report_rates = hw->max_rates;
713 727
@@ -1009,12 +1023,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1009 1023
1010 rtnl_unlock(); 1024 rtnl_unlock();
1011 1025
1012 /*
1013 * Now all work items will be gone, but the
1014 * timer might still be armed, so delete it
1015 */
1016 del_timer_sync(&local->work_timer);
1017
1018 cancel_work_sync(&local->restart_work); 1026 cancel_work_sync(&local->restart_work);
1019 cancel_work_sync(&local->reconfig_filter); 1027 cancel_work_sync(&local->reconfig_filter);
1020 1028
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 2913113c583..6fac18c0423 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -133,7 +133,7 @@ bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
133} 133}
134 134
135/** 135/**
136 * mesh_accept_plinks_update: update accepting_plink in local mesh beacons 136 * mesh_accept_plinks_update - update accepting_plink in local mesh beacons
137 * 137 *
138 * @sdata: mesh interface in which mesh beacons are going to be updated 138 * @sdata: mesh interface in which mesh beacons are going to be updated
139 */ 139 */
@@ -443,7 +443,7 @@ static void ieee80211_mesh_path_root_timer(unsigned long data)
443 443
444void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh) 444void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh)
445{ 445{
446 if (ifmsh->mshcfg.dot11MeshHWMPRootMode) 446 if (ifmsh->mshcfg.dot11MeshHWMPRootMode > IEEE80211_ROOTMODE_ROOT)
447 set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); 447 set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
448 else { 448 else {
449 clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); 449 clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
@@ -523,11 +523,6 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
523{ 523{
524 bool free_plinks; 524 bool free_plinks;
525 525
526#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
527 printk(KERN_DEBUG "%s: running mesh housekeeping\n",
528 sdata->name);
529#endif
530
531 ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT); 526 ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
532 mesh_path_expire(sdata); 527 mesh_path_expire(sdata);
533 528
@@ -542,11 +537,17 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
542static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata) 537static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata)
543{ 538{
544 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 539 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
540 u32 interval;
545 541
546 mesh_path_tx_root_frame(sdata); 542 mesh_path_tx_root_frame(sdata);
543
544 if (ifmsh->mshcfg.dot11MeshHWMPRootMode == IEEE80211_PROACTIVE_RANN)
545 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
546 else
547 interval = ifmsh->mshcfg.dot11MeshHWMProotInterval;
548
547 mod_timer(&ifmsh->mesh_path_root_timer, 549 mod_timer(&ifmsh->mesh_path_root_timer,
548 round_jiffies(TU_TO_EXP_TIME( 550 round_jiffies(TU_TO_EXP_TIME(interval)));
549 ifmsh->mshcfg.dot11MeshHWMPRannInterval)));
550} 551}
551 552
552#ifdef CONFIG_PM 553#ifdef CONFIG_PM
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index e3642756f8f..faaa39bcfd1 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -104,6 +104,7 @@ enum mesh_deferred_task_flags {
104 * an mpath to a hash bucket on a path table. 104 * an mpath to a hash bucket on a path table.
105 * @rann_snd_addr: the RANN sender address 105 * @rann_snd_addr: the RANN sender address
106 * @rann_metric: the aggregated path metric towards the root node 106 * @rann_metric: the aggregated path metric towards the root node
107 * @last_preq_to_root: Timestamp of last PREQ sent to root
107 * @is_root: the destination station of this path is a root node 108 * @is_root: the destination station of this path is a root node
108 * @is_gate: the destination station of this path is a mesh gate 109 * @is_gate: the destination station of this path is a mesh gate
109 * 110 *
@@ -131,6 +132,7 @@ struct mesh_path {
131 spinlock_t state_lock; 132 spinlock_t state_lock;
132 u8 rann_snd_addr[ETH_ALEN]; 133 u8 rann_snd_addr[ETH_ALEN];
133 u32 rann_metric; 134 u32 rann_metric;
135 unsigned long last_preq_to_root;
134 bool is_root; 136 bool is_root;
135 bool is_gate; 137 bool is_gate;
136}; 138};
@@ -245,7 +247,7 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
245int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); 247int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
246void ieee80211s_init(void); 248void ieee80211s_init(void);
247void ieee80211s_update_metric(struct ieee80211_local *local, 249void ieee80211s_update_metric(struct ieee80211_local *local,
248 struct sta_info *stainfo, struct sk_buff *skb); 250 struct sta_info *sta, struct sk_buff *skb);
249void ieee80211s_stop(void); 251void ieee80211s_stop(void);
250void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); 252void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
251void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); 253void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 9b59658e865..494bc39f61a 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -13,13 +13,6 @@
13#include "wme.h" 13#include "wme.h"
14#include "mesh.h" 14#include "mesh.h"
15 15
16#ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG
17#define mhwmp_dbg(fmt, args...) \
18 printk(KERN_DEBUG "Mesh HWMP (%s): " fmt "\n", sdata->name, ##args)
19#else
20#define mhwmp_dbg(fmt, args...) do { (void)(0); } while (0)
21#endif
22
23#define TEST_FRAME_LEN 8192 16#define TEST_FRAME_LEN 8192
24#define MAX_METRIC 0xffffffff 17#define MAX_METRIC 0xffffffff
25#define ARITH_SHIFT 8 18#define ARITH_SHIFT 8
@@ -98,6 +91,8 @@ static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae)
98#define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries) 91#define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries)
99#define disc_timeout_jiff(s) \ 92#define disc_timeout_jiff(s) \
100 msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout) 93 msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout)
94#define root_path_confirmation_jiffies(s) \
95 msecs_to_jiffies(sdata->u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval)
101 96
102enum mpath_frame_type { 97enum mpath_frame_type {
103 MPATH_PREQ = 0, 98 MPATH_PREQ = 0,
@@ -142,19 +137,19 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
142 137
143 switch (action) { 138 switch (action) {
144 case MPATH_PREQ: 139 case MPATH_PREQ:
145 mhwmp_dbg("sending PREQ to %pM", target); 140 mhwmp_dbg(sdata, "sending PREQ to %pM\n", target);
146 ie_len = 37; 141 ie_len = 37;
147 pos = skb_put(skb, 2 + ie_len); 142 pos = skb_put(skb, 2 + ie_len);
148 *pos++ = WLAN_EID_PREQ; 143 *pos++ = WLAN_EID_PREQ;
149 break; 144 break;
150 case MPATH_PREP: 145 case MPATH_PREP:
151 mhwmp_dbg("sending PREP to %pM", target); 146 mhwmp_dbg(sdata, "sending PREP to %pM\n", target);
152 ie_len = 31; 147 ie_len = 31;
153 pos = skb_put(skb, 2 + ie_len); 148 pos = skb_put(skb, 2 + ie_len);
154 *pos++ = WLAN_EID_PREP; 149 *pos++ = WLAN_EID_PREP;
155 break; 150 break;
156 case MPATH_RANN: 151 case MPATH_RANN:
157 mhwmp_dbg("sending RANN from %pM", orig_addr); 152 mhwmp_dbg(sdata, "sending RANN from %pM\n", orig_addr);
158 ie_len = sizeof(struct ieee80211_rann_ie); 153 ie_len = sizeof(struct ieee80211_rann_ie);
159 pos = skb_put(skb, 2 + ie_len); 154 pos = skb_put(skb, 2 + ie_len);
160 *pos++ = WLAN_EID_RANN; 155 *pos++ = WLAN_EID_RANN;
@@ -303,7 +298,7 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
303} 298}
304 299
305void ieee80211s_update_metric(struct ieee80211_local *local, 300void ieee80211s_update_metric(struct ieee80211_local *local,
306 struct sta_info *stainfo, struct sk_buff *skb) 301 struct sta_info *sta, struct sk_buff *skb)
307{ 302{
308 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb); 303 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
309 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 304 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -315,15 +310,14 @@ void ieee80211s_update_metric(struct ieee80211_local *local,
315 failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK); 310 failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
316 311
317 /* moving average, scaled to 100 */ 312 /* moving average, scaled to 100 */
318 stainfo->fail_avg = ((80 * stainfo->fail_avg + 5) / 100 + 20 * failed); 313 sta->fail_avg = ((80 * sta->fail_avg + 5) / 100 + 20 * failed);
319 if (stainfo->fail_avg > 95) 314 if (sta->fail_avg > 95)
320 mesh_plink_broken(stainfo); 315 mesh_plink_broken(sta);
321} 316}
322 317
323static u32 airtime_link_metric_get(struct ieee80211_local *local, 318static u32 airtime_link_metric_get(struct ieee80211_local *local,
324 struct sta_info *sta) 319 struct sta_info *sta)
325{ 320{
326 struct ieee80211_supported_band *sband;
327 struct rate_info rinfo; 321 struct rate_info rinfo;
328 /* This should be adjusted for each device */ 322 /* This should be adjusted for each device */
329 int device_constant = 1 << ARITH_SHIFT; 323 int device_constant = 1 << ARITH_SHIFT;
@@ -333,8 +327,6 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
333 u32 tx_time, estimated_retx; 327 u32 tx_time, estimated_retx;
334 u64 result; 328 u64 result;
335 329
336 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
337
338 if (sta->fail_avg >= 100) 330 if (sta->fail_avg >= 100)
339 return MAX_METRIC; 331 return MAX_METRIC;
340 332
@@ -519,10 +511,11 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
519 struct mesh_path *mpath = NULL; 511 struct mesh_path *mpath = NULL;
520 u8 *target_addr, *orig_addr; 512 u8 *target_addr, *orig_addr;
521 const u8 *da; 513 const u8 *da;
522 u8 target_flags, ttl; 514 u8 target_flags, ttl, flags;
523 u32 orig_sn, target_sn, lifetime; 515 u32 orig_sn, target_sn, lifetime, orig_metric;
524 bool reply = false; 516 bool reply = false;
525 bool forward = true; 517 bool forward = true;
518 bool root_is_gate;
526 519
527 /* Update target SN, if present */ 520 /* Update target SN, if present */
528 target_addr = PREQ_IE_TARGET_ADDR(preq_elem); 521 target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
@@ -530,11 +523,15 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
530 target_sn = PREQ_IE_TARGET_SN(preq_elem); 523 target_sn = PREQ_IE_TARGET_SN(preq_elem);
531 orig_sn = PREQ_IE_ORIG_SN(preq_elem); 524 orig_sn = PREQ_IE_ORIG_SN(preq_elem);
532 target_flags = PREQ_IE_TARGET_F(preq_elem); 525 target_flags = PREQ_IE_TARGET_F(preq_elem);
526 orig_metric = metric;
527 /* Proactive PREQ gate announcements */
528 flags = PREQ_IE_FLAGS(preq_elem);
529 root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
533 530
534 mhwmp_dbg("received PREQ from %pM", orig_addr); 531 mhwmp_dbg(sdata, "received PREQ from %pM\n", orig_addr);
535 532
536 if (ether_addr_equal(target_addr, sdata->vif.addr)) { 533 if (ether_addr_equal(target_addr, sdata->vif.addr)) {
537 mhwmp_dbg("PREQ is for us"); 534 mhwmp_dbg(sdata, "PREQ is for us\n");
538 forward = false; 535 forward = false;
539 reply = true; 536 reply = true;
540 metric = 0; 537 metric = 0;
@@ -544,6 +541,22 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
544 target_sn = ++ifmsh->sn; 541 target_sn = ++ifmsh->sn;
545 ifmsh->last_sn_update = jiffies; 542 ifmsh->last_sn_update = jiffies;
546 } 543 }
544 } else if (is_broadcast_ether_addr(target_addr) &&
545 (target_flags & IEEE80211_PREQ_TO_FLAG)) {
546 rcu_read_lock();
547 mpath = mesh_path_lookup(orig_addr, sdata);
548 if (mpath) {
549 if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
550 reply = true;
551 target_addr = sdata->vif.addr;
552 target_sn = ++ifmsh->sn;
553 metric = 0;
554 ifmsh->last_sn_update = jiffies;
555 }
556 if (root_is_gate)
557 mesh_path_add_gate(mpath);
558 }
559 rcu_read_unlock();
547 } else { 560 } else {
548 rcu_read_lock(); 561 rcu_read_lock();
549 mpath = mesh_path_lookup(target_addr, sdata); 562 mpath = mesh_path_lookup(target_addr, sdata);
@@ -570,19 +583,20 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
570 lifetime = PREQ_IE_LIFETIME(preq_elem); 583 lifetime = PREQ_IE_LIFETIME(preq_elem);
571 ttl = ifmsh->mshcfg.element_ttl; 584 ttl = ifmsh->mshcfg.element_ttl;
572 if (ttl != 0) { 585 if (ttl != 0) {
573 mhwmp_dbg("replying to the PREQ"); 586 mhwmp_dbg(sdata, "replying to the PREQ\n");
574 mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr, 587 mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr,
575 cpu_to_le32(orig_sn), 0, target_addr, 588 cpu_to_le32(orig_sn), 0, target_addr,
576 cpu_to_le32(target_sn), mgmt->sa, 0, ttl, 589 cpu_to_le32(target_sn), mgmt->sa, 0, ttl,
577 cpu_to_le32(lifetime), cpu_to_le32(metric), 590 cpu_to_le32(lifetime), cpu_to_le32(metric),
578 0, sdata); 591 0, sdata);
579 } else 592 } else {
580 ifmsh->mshstats.dropped_frames_ttl++; 593 ifmsh->mshstats.dropped_frames_ttl++;
594 }
581 } 595 }
582 596
583 if (forward && ifmsh->mshcfg.dot11MeshForwarding) { 597 if (forward && ifmsh->mshcfg.dot11MeshForwarding) {
584 u32 preq_id; 598 u32 preq_id;
585 u8 hopcount, flags; 599 u8 hopcount;
586 600
587 ttl = PREQ_IE_TTL(preq_elem); 601 ttl = PREQ_IE_TTL(preq_elem);
588 lifetime = PREQ_IE_LIFETIME(preq_elem); 602 lifetime = PREQ_IE_LIFETIME(preq_elem);
@@ -590,13 +604,19 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
590 ifmsh->mshstats.dropped_frames_ttl++; 604 ifmsh->mshstats.dropped_frames_ttl++;
591 return; 605 return;
592 } 606 }
593 mhwmp_dbg("forwarding the PREQ from %pM", orig_addr); 607 mhwmp_dbg(sdata, "forwarding the PREQ from %pM\n", orig_addr);
594 --ttl; 608 --ttl;
595 flags = PREQ_IE_FLAGS(preq_elem);
596 preq_id = PREQ_IE_PREQ_ID(preq_elem); 609 preq_id = PREQ_IE_PREQ_ID(preq_elem);
597 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1; 610 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
598 da = (mpath && mpath->is_root) ? 611 da = (mpath && mpath->is_root) ?
599 mpath->rann_snd_addr : broadcast_addr; 612 mpath->rann_snd_addr : broadcast_addr;
613
614 if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
615 target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
616 target_sn = PREQ_IE_TARGET_SN(preq_elem);
617 metric = orig_metric;
618 }
619
600 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr, 620 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
601 cpu_to_le32(orig_sn), target_flags, target_addr, 621 cpu_to_le32(orig_sn), target_flags, target_addr,
602 cpu_to_le32(target_sn), da, 622 cpu_to_le32(target_sn), da,
@@ -631,7 +651,8 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
631 u8 next_hop[ETH_ALEN]; 651 u8 next_hop[ETH_ALEN];
632 u32 target_sn, orig_sn, lifetime; 652 u32 target_sn, orig_sn, lifetime;
633 653
634 mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem)); 654 mhwmp_dbg(sdata, "received PREP from %pM\n",
655 PREP_IE_ORIG_ADDR(prep_elem));
635 656
636 orig_addr = PREP_IE_ORIG_ADDR(prep_elem); 657 orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
637 if (ether_addr_equal(orig_addr, sdata->vif.addr)) 658 if (ether_addr_equal(orig_addr, sdata->vif.addr))
@@ -744,11 +765,6 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
744 bool root_is_gate; 765 bool root_is_gate;
745 766
746 ttl = rann->rann_ttl; 767 ttl = rann->rann_ttl;
747 if (ttl <= 1) {
748 ifmsh->mshstats.dropped_frames_ttl++;
749 return;
750 }
751 ttl--;
752 flags = rann->rann_flags; 768 flags = rann->rann_flags;
753 root_is_gate = !!(flags & RANN_FLAG_IS_GATE); 769 root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
754 orig_addr = rann->rann_addr; 770 orig_addr = rann->rann_addr;
@@ -762,8 +778,9 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
762 if (ether_addr_equal(orig_addr, sdata->vif.addr)) 778 if (ether_addr_equal(orig_addr, sdata->vif.addr))
763 return; 779 return;
764 780
765 mhwmp_dbg("received RANN from %pM via neighbour %pM (is_gate=%d)", 781 mhwmp_dbg(sdata,
766 orig_addr, mgmt->sa, root_is_gate); 782 "received RANN from %pM via neighbour %pM (is_gate=%d)\n",
783 orig_addr, mgmt->sa, root_is_gate);
767 784
768 rcu_read_lock(); 785 rcu_read_lock();
769 sta = sta_info_get(sdata, mgmt->sa); 786 sta = sta_info_get(sdata, mgmt->sa);
@@ -785,34 +802,50 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
785 } 802 }
786 } 803 }
787 804
805 if (!(SN_LT(mpath->sn, orig_sn)) &&
806 !(mpath->sn == orig_sn && metric < mpath->rann_metric)) {
807 rcu_read_unlock();
808 return;
809 }
810
788 if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) || 811 if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) ||
789 time_after(jiffies, mpath->exp_time - 1*HZ)) && 812 (time_after(jiffies, mpath->last_preq_to_root +
790 !(mpath->flags & MESH_PATH_FIXED)) { 813 root_path_confirmation_jiffies(sdata)) ||
791 mhwmp_dbg("%s time to refresh root mpath %pM", sdata->name, 814 time_before(jiffies, mpath->last_preq_to_root))) &&
792 orig_addr); 815 !(mpath->flags & MESH_PATH_FIXED) && (ttl != 0)) {
816 mhwmp_dbg(sdata,
817 "time to refresh root mpath %pM\n",
818 orig_addr);
793 mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH); 819 mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
820 mpath->last_preq_to_root = jiffies;
794 } 821 }
795 822
796 if ((SN_LT(mpath->sn, orig_sn) || (mpath->sn == orig_sn && 823 mpath->sn = orig_sn;
797 metric < mpath->rann_metric)) && ifmsh->mshcfg.dot11MeshForwarding) { 824 mpath->rann_metric = metric + metric_txsta;
825 mpath->is_root = true;
826 /* Recording RANNs sender address to send individually
827 * addressed PREQs destined for root mesh STA */
828 memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
829
830 if (root_is_gate)
831 mesh_path_add_gate(mpath);
832
833 if (ttl <= 1) {
834 ifmsh->mshstats.dropped_frames_ttl++;
835 rcu_read_unlock();
836 return;
837 }
838 ttl--;
839
840 if (ifmsh->mshcfg.dot11MeshForwarding) {
798 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, 841 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
799 cpu_to_le32(orig_sn), 842 cpu_to_le32(orig_sn),
800 0, NULL, 0, broadcast_addr, 843 0, NULL, 0, broadcast_addr,
801 hopcount, ttl, cpu_to_le32(interval), 844 hopcount, ttl, cpu_to_le32(interval),
802 cpu_to_le32(metric + metric_txsta), 845 cpu_to_le32(metric + metric_txsta),
803 0, sdata); 846 0, sdata);
804 mpath->sn = orig_sn;
805 mpath->rann_metric = metric + metric_txsta;
806 /* Recording RANNs sender address to send individually
807 * addressed PREQs destined for root mesh STA */
808 memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
809 } 847 }
810 848
811 mpath->is_root = true;
812
813 if (root_is_gate)
814 mesh_path_add_gate(mpath);
815
816 rcu_read_unlock(); 849 rcu_read_unlock();
817} 850}
818 851
@@ -889,7 +922,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
889 922
890 preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC); 923 preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
891 if (!preq_node) { 924 if (!preq_node) {
892 mhwmp_dbg("could not allocate PREQ node"); 925 mhwmp_dbg(sdata, "could not allocate PREQ node\n");
893 return; 926 return;
894 } 927 }
895 928
@@ -898,7 +931,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
898 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); 931 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
899 kfree(preq_node); 932 kfree(preq_node);
900 if (printk_ratelimit()) 933 if (printk_ratelimit())
901 mhwmp_dbg("PREQ node queue full"); 934 mhwmp_dbg(sdata, "PREQ node queue full\n");
902 return; 935 return;
903 } 936 }
904 937
@@ -1021,12 +1054,15 @@ enddiscovery:
1021 kfree(preq_node); 1054 kfree(preq_node);
1022} 1055}
1023 1056
1024/* mesh_nexthop_resolve - lookup next hop for given skb and start path 1057/**
1025 * discovery if no forwarding information is found. 1058 * mesh_nexthop_resolve - lookup next hop; conditionally start path discovery
1026 * 1059 *
1027 * @skb: 802.11 frame to be sent 1060 * @skb: 802.11 frame to be sent
1028 * @sdata: network subif the frame will be sent through 1061 * @sdata: network subif the frame will be sent through
1029 * 1062 *
1063 * Lookup next hop for given skb and start path discovery if no
1064 * forwarding information is found.
1065 *
1030 * Returns: 0 if the next hop was found and -ENOENT if the frame was queued. 1066 * Returns: 0 if the next hop was found and -ENOENT if the frame was queued.
1031 * skb is freeed here if no mpath could be allocated. 1067 * skb is freeed here if no mpath could be allocated.
1032 */ 1068 */
@@ -1146,7 +1182,7 @@ void mesh_path_timer(unsigned long data)
1146 if (!mpath->is_gate && mesh_gate_num(sdata) > 0) { 1182 if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
1147 ret = mesh_path_send_to_gates(mpath); 1183 ret = mesh_path_send_to_gates(mpath);
1148 if (ret) 1184 if (ret)
1149 mhwmp_dbg("no gate was reachable"); 1185 mhwmp_dbg(sdata, "no gate was reachable\n");
1150 } else 1186 } else
1151 mesh_path_flush_pending(mpath); 1187 mesh_path_flush_pending(mpath);
1152 } 1188 }
@@ -1157,13 +1193,34 @@ mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
1157{ 1193{
1158 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 1194 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1159 u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval; 1195 u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
1160 u8 flags; 1196 u8 flags, target_flags = 0;
1161 1197
1162 flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol) 1198 flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol)
1163 ? RANN_FLAG_IS_GATE : 0; 1199 ? RANN_FLAG_IS_GATE : 0;
1164 mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr, 1200
1201 switch (ifmsh->mshcfg.dot11MeshHWMPRootMode) {
1202 case IEEE80211_PROACTIVE_RANN:
1203 mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr,
1165 cpu_to_le32(++ifmsh->sn), 1204 cpu_to_le32(++ifmsh->sn),
1166 0, NULL, 0, broadcast_addr, 1205 0, NULL, 0, broadcast_addr,
1167 0, sdata->u.mesh.mshcfg.element_ttl, 1206 0, ifmsh->mshcfg.element_ttl,
1168 cpu_to_le32(interval), 0, 0, sdata); 1207 cpu_to_le32(interval), 0, 0, sdata);
1208 break;
1209 case IEEE80211_PROACTIVE_PREQ_WITH_PREP:
1210 flags |= IEEE80211_PREQ_PROACTIVE_PREP_FLAG;
1211 case IEEE80211_PROACTIVE_PREQ_NO_PREP:
1212 interval = ifmsh->mshcfg.dot11MeshHWMPactivePathToRootTimeout;
1213 target_flags |= IEEE80211_PREQ_TO_FLAG |
1214 IEEE80211_PREQ_USN_FLAG;
1215 mesh_path_sel_frame_tx(MPATH_PREQ, flags, sdata->vif.addr,
1216 cpu_to_le32(++ifmsh->sn), target_flags,
1217 (u8 *) broadcast_addr, 0, broadcast_addr,
1218 0, ifmsh->mshcfg.element_ttl,
1219 cpu_to_le32(interval),
1220 0, cpu_to_le32(ifmsh->preq_id++), sdata);
1221 break;
1222 default:
1223 mhwmp_dbg(sdata, "Proactive mechanism not supported\n");
1224 return;
1225 }
1169} 1226}
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index b39224d8255..075bc535c60 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -18,12 +18,6 @@
18#include "ieee80211_i.h" 18#include "ieee80211_i.h"
19#include "mesh.h" 19#include "mesh.h"
20 20
21#ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
22#define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
23#else
24#define mpath_dbg(fmt, args...) do { (void)(0); } while (0)
25#endif
26
27/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */ 21/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
28#define INIT_PATHS_SIZE_ORDER 2 22#define INIT_PATHS_SIZE_ORDER 2
29 23
@@ -322,9 +316,8 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
322 316
323 spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags); 317 spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
324 skb_queue_splice(&gateq, &gate_mpath->frame_queue); 318 skb_queue_splice(&gateq, &gate_mpath->frame_queue);
325 mpath_dbg("Mpath queue for gate %pM has %d frames\n", 319 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
326 gate_mpath->dst, 320 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
327 skb_queue_len(&gate_mpath->frame_queue));
328 spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags); 321 spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
329 322
330 if (!copy) 323 if (!copy)
@@ -446,9 +439,9 @@ int mesh_path_add_gate(struct mesh_path *mpath)
446 hlist_add_head_rcu(&new_gate->list, tbl->known_gates); 439 hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
447 spin_unlock_bh(&tbl->gates_lock); 440 spin_unlock_bh(&tbl->gates_lock);
448 rcu_read_unlock(); 441 rcu_read_unlock();
449 mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n", 442 mpath_dbg(mpath->sdata,
450 mpath->sdata->name, mpath->dst, 443 "Mesh path: Recorded new gate: %pM. %d known gates\n",
451 mpath->sdata->u.mesh.num_gates); 444 mpath->dst, mpath->sdata->u.mesh.num_gates);
452 return 0; 445 return 0;
453err_rcu: 446err_rcu:
454 rcu_read_unlock(); 447 rcu_read_unlock();
@@ -477,8 +470,8 @@ static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
477 spin_unlock_bh(&tbl->gates_lock); 470 spin_unlock_bh(&tbl->gates_lock);
478 mpath->sdata->u.mesh.num_gates--; 471 mpath->sdata->u.mesh.num_gates--;
479 mpath->is_gate = false; 472 mpath->is_gate = false;
480 mpath_dbg("Mesh path (%s): Deleted gate: %pM. " 473 mpath_dbg(mpath->sdata,
481 "%d known gates\n", mpath->sdata->name, 474 "Mesh path: Deleted gate: %pM. %d known gates\n",
482 mpath->dst, mpath->sdata->u.mesh.num_gates); 475 mpath->dst, mpath->sdata->u.mesh.num_gates);
483 break; 476 break;
484 } 477 }
@@ -785,7 +778,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
785/** 778/**
786 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches 779 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
787 * 780 *
788 * @sta - mesh peer to match 781 * @sta: mesh peer to match
789 * 782 *
790 * RCU notes: this function is called when a mesh plink transitions from 783 * RCU notes: this function is called when a mesh plink transitions from
791 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that 784 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
@@ -840,7 +833,7 @@ static void table_flush_by_iface(struct mesh_table *tbl,
840 * 833 *
841 * This function deletes both mesh paths as well as mesh portal paths. 834 * This function deletes both mesh paths as well as mesh portal paths.
842 * 835 *
843 * @sdata - interface data to match 836 * @sdata: interface data to match
844 * 837 *
845 */ 838 */
846void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) 839void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
@@ -946,19 +939,20 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
946 continue; 939 continue;
947 940
948 if (gate->mpath->flags & MESH_PATH_ACTIVE) { 941 if (gate->mpath->flags & MESH_PATH_ACTIVE) {
949 mpath_dbg("Forwarding to %pM\n", gate->mpath->dst); 942 mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
950 mesh_path_move_to_queue(gate->mpath, from_mpath, copy); 943 mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
951 from_mpath = gate->mpath; 944 from_mpath = gate->mpath;
952 copy = true; 945 copy = true;
953 } else { 946 } else {
954 mpath_dbg("Not forwarding %p\n", gate->mpath); 947 mpath_dbg(sdata,
955 mpath_dbg("flags %x\n", gate->mpath->flags); 948 "Not forwarding %p (flags %#x)\n",
949 gate->mpath, gate->mpath->flags);
956 } 950 }
957 } 951 }
958 952
959 hlist_for_each_entry_rcu(gate, n, known_gates, list) 953 hlist_for_each_entry_rcu(gate, n, known_gates, list)
960 if (gate->mpath->sdata == sdata) { 954 if (gate->mpath->sdata == sdata) {
961 mpath_dbg("Sending to %pM\n", gate->mpath->dst); 955 mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
962 mesh_path_tx_pending(gate->mpath); 956 mesh_path_tx_pending(gate->mpath);
963 } 957 }
964 958
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 60ef235c9d9..9ad74dd87a7 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -13,12 +13,6 @@
13#include "rate.h" 13#include "rate.h"
14#include "mesh.h" 14#include "mesh.h"
15 15
16#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
17#define mpl_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
18#else
19#define mpl_dbg(fmt, args...) do { (void)(0); } while (0)
20#endif
21
22#define PLINK_GET_LLID(p) (p + 2) 16#define PLINK_GET_LLID(p) (p + 2)
23#define PLINK_GET_PLID(p) (p + 4) 17#define PLINK_GET_PLID(p) (p + 4)
24 18
@@ -105,7 +99,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
105 return sta; 99 return sta;
106} 100}
107 101
108/* 102/**
109 * mesh_set_ht_prot_mode - set correct HT protection mode 103 * mesh_set_ht_prot_mode - set correct HT protection mode
110 * 104 *
111 * Section 9.23.3.5 of IEEE 80211-2012 describes the protection rules for HT 105 * Section 9.23.3.5 of IEEE 80211-2012 describes the protection rules for HT
@@ -134,12 +128,14 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
134 128
135 switch (sta->ch_type) { 129 switch (sta->ch_type) {
136 case NL80211_CHAN_NO_HT: 130 case NL80211_CHAN_NO_HT:
137 mpl_dbg("mesh_plink %pM: nonHT sta (%pM) is present", 131 mpl_dbg(sdata,
132 "mesh_plink %pM: nonHT sta (%pM) is present\n",
138 sdata->vif.addr, sta->sta.addr); 133 sdata->vif.addr, sta->sta.addr);
139 non_ht_sta = true; 134 non_ht_sta = true;
140 goto out; 135 goto out;
141 case NL80211_CHAN_HT20: 136 case NL80211_CHAN_HT20:
142 mpl_dbg("mesh_plink %pM: HT20 sta (%pM) is present", 137 mpl_dbg(sdata,
138 "mesh_plink %pM: HT20 sta (%pM) is present\n",
143 sdata->vif.addr, sta->sta.addr); 139 sdata->vif.addr, sta->sta.addr);
144 ht20_sta = true; 140 ht20_sta = true;
145 default: 141 default:
@@ -160,7 +156,8 @@ out:
160 sdata->vif.bss_conf.ht_operation_mode = ht_opmode; 156 sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
161 sdata->u.mesh.mshcfg.ht_opmode = ht_opmode; 157 sdata->u.mesh.mshcfg.ht_opmode = ht_opmode;
162 changed = BSS_CHANGED_HT; 158 changed = BSS_CHANGED_HT;
163 mpl_dbg("mesh_plink %pM: protection mode changed to %d", 159 mpl_dbg(sdata,
160 "mesh_plink %pM: protection mode changed to %d\n",
164 sdata->vif.addr, ht_opmode); 161 sdata->vif.addr, ht_opmode);
165 } 162 }
166 163
@@ -323,7 +320,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
323 return 0; 320 return 0;
324} 321}
325 322
326/* mesh_peer_init - initialize new mesh peer and return resulting sta_info 323/**
324 * mesh_peer_init - initialize new mesh peer and return resulting sta_info
327 * 325 *
328 * @sdata: local meshif 326 * @sdata: local meshif
329 * @addr: peer's address 327 * @addr: peer's address
@@ -437,7 +435,8 @@ static void mesh_plink_timer(unsigned long data)
437 spin_unlock_bh(&sta->lock); 435 spin_unlock_bh(&sta->lock);
438 return; 436 return;
439 } 437 }
440 mpl_dbg("Mesh plink timer for %pM fired on state %d\n", 438 mpl_dbg(sta->sdata,
439 "Mesh plink timer for %pM fired on state %d\n",
441 sta->sta.addr, sta->plink_state); 440 sta->sta.addr, sta->plink_state);
442 reason = 0; 441 reason = 0;
443 llid = sta->llid; 442 llid = sta->llid;
@@ -450,7 +449,8 @@ static void mesh_plink_timer(unsigned long data)
450 /* retry timer */ 449 /* retry timer */
451 if (sta->plink_retries < dot11MeshMaxRetries(sdata)) { 450 if (sta->plink_retries < dot11MeshMaxRetries(sdata)) {
452 u32 rand; 451 u32 rand;
453 mpl_dbg("Mesh plink for %pM (retry, timeout): %d %d\n", 452 mpl_dbg(sta->sdata,
453 "Mesh plink for %pM (retry, timeout): %d %d\n",
454 sta->sta.addr, sta->plink_retries, 454 sta->sta.addr, sta->plink_retries,
455 sta->plink_timeout); 455 sta->plink_timeout);
456 get_random_bytes(&rand, sizeof(u32)); 456 get_random_bytes(&rand, sizeof(u32));
@@ -530,7 +530,8 @@ int mesh_plink_open(struct sta_info *sta)
530 sta->plink_state = NL80211_PLINK_OPN_SNT; 530 sta->plink_state = NL80211_PLINK_OPN_SNT;
531 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); 531 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
532 spin_unlock_bh(&sta->lock); 532 spin_unlock_bh(&sta->lock);
533 mpl_dbg("Mesh plink: starting establishment with %pM\n", 533 mpl_dbg(sdata,
534 "Mesh plink: starting establishment with %pM\n",
534 sta->sta.addr); 535 sta->sta.addr);
535 536
536 return mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN, 537 return mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
@@ -565,7 +566,6 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
565 u8 *baseaddr; 566 u8 *baseaddr;
566 u32 changed = 0; 567 u32 changed = 0;
567 __le16 plid, llid, reason; 568 __le16 plid, llid, reason;
568#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
569 static const char *mplstates[] = { 569 static const char *mplstates[] = {
570 [NL80211_PLINK_LISTEN] = "LISTEN", 570 [NL80211_PLINK_LISTEN] = "LISTEN",
571 [NL80211_PLINK_OPN_SNT] = "OPN-SNT", 571 [NL80211_PLINK_OPN_SNT] = "OPN-SNT",
@@ -575,14 +575,14 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
575 [NL80211_PLINK_HOLDING] = "HOLDING", 575 [NL80211_PLINK_HOLDING] = "HOLDING",
576 [NL80211_PLINK_BLOCKED] = "BLOCKED" 576 [NL80211_PLINK_BLOCKED] = "BLOCKED"
577 }; 577 };
578#endif
579 578
580 /* need action_code, aux */ 579 /* need action_code, aux */
581 if (len < IEEE80211_MIN_ACTION_SIZE + 3) 580 if (len < IEEE80211_MIN_ACTION_SIZE + 3)
582 return; 581 return;
583 582
584 if (is_multicast_ether_addr(mgmt->da)) { 583 if (is_multicast_ether_addr(mgmt->da)) {
585 mpl_dbg("Mesh plink: ignore frame from multicast address"); 584 mpl_dbg(sdata,
585 "Mesh plink: ignore frame from multicast address\n");
586 return; 586 return;
587 } 587 }
588 588
@@ -595,12 +595,14 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
595 } 595 }
596 ieee802_11_parse_elems(baseaddr, len - baselen, &elems); 596 ieee802_11_parse_elems(baseaddr, len - baselen, &elems);
597 if (!elems.peering) { 597 if (!elems.peering) {
598 mpl_dbg("Mesh plink: missing necessary peer link ie\n"); 598 mpl_dbg(sdata,
599 "Mesh plink: missing necessary peer link ie\n");
599 return; 600 return;
600 } 601 }
601 if (elems.rsn_len && 602 if (elems.rsn_len &&
602 sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) { 603 sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) {
603 mpl_dbg("Mesh plink: can't establish link with secure peer\n"); 604 mpl_dbg(sdata,
605 "Mesh plink: can't establish link with secure peer\n");
604 return; 606 return;
605 } 607 }
606 608
@@ -610,14 +612,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
610 (ftype == WLAN_SP_MESH_PEERING_CONFIRM && ie_len != 6) || 612 (ftype == WLAN_SP_MESH_PEERING_CONFIRM && ie_len != 6) ||
611 (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len != 6 613 (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len != 6
612 && ie_len != 8)) { 614 && ie_len != 8)) {
613 mpl_dbg("Mesh plink: incorrect plink ie length %d %d\n", 615 mpl_dbg(sdata,
614 ftype, ie_len); 616 "Mesh plink: incorrect plink ie length %d %d\n",
617 ftype, ie_len);
615 return; 618 return;
616 } 619 }
617 620
618 if (ftype != WLAN_SP_MESH_PEERING_CLOSE && 621 if (ftype != WLAN_SP_MESH_PEERING_CLOSE &&
619 (!elems.mesh_id || !elems.mesh_config)) { 622 (!elems.mesh_id || !elems.mesh_config)) {
620 mpl_dbg("Mesh plink: missing necessary ie\n"); 623 mpl_dbg(sdata, "Mesh plink: missing necessary ie\n");
621 return; 624 return;
622 } 625 }
623 /* Note the lines below are correct, the llid in the frame is the plid 626 /* Note the lines below are correct, the llid in the frame is the plid
@@ -632,21 +635,21 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
632 635
633 sta = sta_info_get(sdata, mgmt->sa); 636 sta = sta_info_get(sdata, mgmt->sa);
634 if (!sta && ftype != WLAN_SP_MESH_PEERING_OPEN) { 637 if (!sta && ftype != WLAN_SP_MESH_PEERING_OPEN) {
635 mpl_dbg("Mesh plink: cls or cnf from unknown peer\n"); 638 mpl_dbg(sdata, "Mesh plink: cls or cnf from unknown peer\n");
636 rcu_read_unlock(); 639 rcu_read_unlock();
637 return; 640 return;
638 } 641 }
639 642
640 if (ftype == WLAN_SP_MESH_PEERING_OPEN && 643 if (ftype == WLAN_SP_MESH_PEERING_OPEN &&
641 !rssi_threshold_check(sta, sdata)) { 644 !rssi_threshold_check(sta, sdata)) {
642 mpl_dbg("Mesh plink: %pM does not meet rssi threshold\n", 645 mpl_dbg(sdata, "Mesh plink: %pM does not meet rssi threshold\n",
643 mgmt->sa); 646 mgmt->sa);
644 rcu_read_unlock(); 647 rcu_read_unlock();
645 return; 648 return;
646 } 649 }
647 650
648 if (sta && !test_sta_flag(sta, WLAN_STA_AUTH)) { 651 if (sta && !test_sta_flag(sta, WLAN_STA_AUTH)) {
649 mpl_dbg("Mesh plink: Action frame from non-authed peer\n"); 652 mpl_dbg(sdata, "Mesh plink: Action frame from non-authed peer\n");
650 rcu_read_unlock(); 653 rcu_read_unlock();
651 return; 654 return;
652 } 655 }
@@ -683,7 +686,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
683 } else if (!sta) { 686 } else if (!sta) {
684 /* ftype == WLAN_SP_MESH_PEERING_OPEN */ 687 /* ftype == WLAN_SP_MESH_PEERING_OPEN */
685 if (!mesh_plink_free_count(sdata)) { 688 if (!mesh_plink_free_count(sdata)) {
686 mpl_dbg("Mesh plink error: no more free plinks\n"); 689 mpl_dbg(sdata, "Mesh plink error: no more free plinks\n");
687 rcu_read_unlock(); 690 rcu_read_unlock();
688 return; 691 return;
689 } 692 }
@@ -724,7 +727,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
724 event = CLS_ACPT; 727 event = CLS_ACPT;
725 break; 728 break;
726 default: 729 default:
727 mpl_dbg("Mesh plink: unknown frame subtype\n"); 730 mpl_dbg(sdata, "Mesh plink: unknown frame subtype\n");
728 rcu_read_unlock(); 731 rcu_read_unlock();
729 return; 732 return;
730 } 733 }
@@ -734,13 +737,14 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
734 /* allocate sta entry if necessary and update info */ 737 /* allocate sta entry if necessary and update info */
735 sta = mesh_peer_init(sdata, mgmt->sa, &elems); 738 sta = mesh_peer_init(sdata, mgmt->sa, &elems);
736 if (!sta) { 739 if (!sta) {
737 mpl_dbg("Mesh plink: failed to init peer!\n"); 740 mpl_dbg(sdata, "Mesh plink: failed to init peer!\n");
738 rcu_read_unlock(); 741 rcu_read_unlock();
739 return; 742 return;
740 } 743 }
741 } 744 }
742 745
743 mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n", 746 mpl_dbg(sdata,
747 "Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n",
744 mgmt->sa, mplstates[sta->plink_state], 748 mgmt->sa, mplstates[sta->plink_state],
745 le16_to_cpu(sta->llid), le16_to_cpu(sta->plid), 749 le16_to_cpu(sta->llid), le16_to_cpu(sta->plid),
746 event); 750 event);
@@ -851,7 +855,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
851 mesh_plink_inc_estab_count(sdata); 855 mesh_plink_inc_estab_count(sdata);
852 changed |= mesh_set_ht_prot_mode(sdata); 856 changed |= mesh_set_ht_prot_mode(sdata);
853 changed |= BSS_CHANGED_BEACON; 857 changed |= BSS_CHANGED_BEACON;
854 mpl_dbg("Mesh plink with %pM ESTABLISHED\n", 858 mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
855 sta->sta.addr); 859 sta->sta.addr);
856 break; 860 break;
857 default: 861 default:
@@ -887,7 +891,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
887 mesh_plink_inc_estab_count(sdata); 891 mesh_plink_inc_estab_count(sdata);
888 changed |= mesh_set_ht_prot_mode(sdata); 892 changed |= mesh_set_ht_prot_mode(sdata);
889 changed |= BSS_CHANGED_BEACON; 893 changed |= BSS_CHANGED_BEACON;
890 mpl_dbg("Mesh plink with %pM ESTABLISHED\n", 894 mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
891 sta->sta.addr); 895 sta->sta.addr);
892 mesh_plink_frame_tx(sdata, 896 mesh_plink_frame_tx(sdata,
893 WLAN_SP_MESH_PEERING_CONFIRM, 897 WLAN_SP_MESH_PEERING_CONFIRM,
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
index 38d30e8ce6d..accfa00ffcd 100644
--- a/net/mac80211/mesh_sync.c
+++ b/net/mac80211/mesh_sync.c
@@ -12,13 +12,6 @@
12#include "mesh.h" 12#include "mesh.h"
13#include "driver-ops.h" 13#include "driver-ops.h"
14 14
15#ifdef CONFIG_MAC80211_VERBOSE_MESH_SYNC_DEBUG
16#define msync_dbg(fmt, args...) \
17 printk(KERN_DEBUG "Mesh sync (%s): " fmt "\n", sdata->name, ##args)
18#else
19#define msync_dbg(fmt, args...) do { (void)(0); } while (0)
20#endif
21
22/* This is not in the standard. It represents a tolerable tbtt drift below 15/* This is not in the standard. It represents a tolerable tbtt drift below
23 * which we do no TSF adjustment. 16 * which we do no TSF adjustment.
24 */ 17 */
@@ -65,14 +58,14 @@ void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
65 spin_lock_bh(&ifmsh->sync_offset_lock); 58 spin_lock_bh(&ifmsh->sync_offset_lock);
66 59
67 if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) { 60 if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) {
68 msync_dbg("TBTT : max clockdrift=%lld; adjusting", 61 msync_dbg(sdata, "TBTT : max clockdrift=%lld; adjusting\n",
69 (long long) ifmsh->sync_offset_clockdrift_max); 62 (long long) ifmsh->sync_offset_clockdrift_max);
70 tsfdelta = -ifmsh->sync_offset_clockdrift_max; 63 tsfdelta = -ifmsh->sync_offset_clockdrift_max;
71 ifmsh->sync_offset_clockdrift_max = 0; 64 ifmsh->sync_offset_clockdrift_max = 0;
72 } else { 65 } else {
73 msync_dbg("TBTT : max clockdrift=%lld; adjusting by %llu", 66 msync_dbg(sdata, "TBTT : max clockdrift=%lld; adjusting by %llu\n",
74 (long long) ifmsh->sync_offset_clockdrift_max, 67 (long long) ifmsh->sync_offset_clockdrift_max,
75 (unsigned long long) beacon_int_fraction); 68 (unsigned long long) beacon_int_fraction);
76 tsfdelta = -beacon_int_fraction; 69 tsfdelta = -beacon_int_fraction;
77 ifmsh->sync_offset_clockdrift_max -= beacon_int_fraction; 70 ifmsh->sync_offset_clockdrift_max -= beacon_int_fraction;
78 } 71 }
@@ -120,7 +113,7 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
120 113
121 if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) { 114 if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) {
122 clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); 115 clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
123 msync_dbg("STA %pM : is adjusting TBTT", sta->sta.addr); 116 msync_dbg(sdata, "STA %pM : is adjusting TBTT\n", sta->sta.addr);
124 goto no_sync; 117 goto no_sync;
125 } 118 }
126 119
@@ -169,7 +162,8 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
169 if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { 162 if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
170 s64 t_clockdrift = sta->t_offset_setpoint 163 s64 t_clockdrift = sta->t_offset_setpoint
171 - sta->t_offset; 164 - sta->t_offset;
172 msync_dbg("STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld", 165 msync_dbg(sdata,
166 "STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld\n",
173 sta->sta.addr, 167 sta->sta.addr,
174 (long long) sta->t_offset, 168 (long long) sta->t_offset,
175 (long long) 169 (long long)
@@ -178,7 +172,8 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
178 172
179 if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT || 173 if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT ||
180 t_clockdrift < -TOFFSET_MAXIMUM_ADJUSTMENT) { 174 t_clockdrift < -TOFFSET_MAXIMUM_ADJUSTMENT) {
181 msync_dbg("STA %pM : t_clockdrift=%lld too large, setpoint reset", 175 msync_dbg(sdata,
176 "STA %pM : t_clockdrift=%lld too large, setpoint reset\n",
182 sta->sta.addr, 177 sta->sta.addr,
183 (long long) t_clockdrift); 178 (long long) t_clockdrift);
184 clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); 179 clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
@@ -197,8 +192,8 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
197 } else { 192 } else {
198 sta->t_offset_setpoint = sta->t_offset - TOFFSET_SET_MARGIN; 193 sta->t_offset_setpoint = sta->t_offset - TOFFSET_SET_MARGIN;
199 set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); 194 set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
200 msync_dbg("STA %pM : offset was invalid, " 195 msync_dbg(sdata,
201 " sta->t_offset=%lld", 196 "STA %pM : offset was invalid, sta->t_offset=%lld\n",
202 sta->sta.addr, 197 sta->sta.addr,
203 (long long) sta->t_offset); 198 (long long) sta->t_offset);
204 rcu_read_unlock(); 199 rcu_read_unlock();
@@ -226,17 +221,15 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
226 * to the driver tsf setter, we punt 221 * to the driver tsf setter, we punt
227 * the tsf adjustment to the mesh tasklet 222 * the tsf adjustment to the mesh tasklet
228 */ 223 */
229 msync_dbg("TBTT : kicking off TBTT " 224 msync_dbg(sdata,
230 "adjustment with " 225 "TBTT : kicking off TBTT adjustment with clockdrift_max=%lld\n",
231 "clockdrift_max=%lld", 226 ifmsh->sync_offset_clockdrift_max);
232 ifmsh->sync_offset_clockdrift_max);
233 set_bit(MESH_WORK_DRIFT_ADJUST, 227 set_bit(MESH_WORK_DRIFT_ADJUST,
234 &ifmsh->wrkq_flags); 228 &ifmsh->wrkq_flags);
235 } else { 229 } else {
236 msync_dbg("TBTT : max clockdrift=%lld; " 230 msync_dbg(sdata,
237 "too small to adjust", 231 "TBTT : max clockdrift=%lld; too small to adjust\n",
238 (long long) 232 (long long)ifmsh->sync_offset_clockdrift_max);
239 ifmsh->sync_offset_clockdrift_max);
240 ifmsh->sync_offset_clockdrift_max = 0; 233 ifmsh->sync_offset_clockdrift_max = 0;
241 } 234 }
242 spin_unlock_bh(&ifmsh->sync_offset_lock); 235 spin_unlock_bh(&ifmsh->sync_offset_lock);
@@ -268,7 +261,7 @@ static void mesh_sync_vendor_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
268 const u8 *oui; 261 const u8 *oui;
269 262
270 WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR); 263 WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR);
271 msync_dbg("called mesh_sync_vendor_rx_bcn_presp"); 264 msync_dbg(sdata, "called mesh_sync_vendor_rx_bcn_presp\n");
272 oui = mesh_get_vendor_oui(sdata); 265 oui = mesh_get_vendor_oui(sdata);
273 /* here you would implement the vendor offset tracking for this oui */ 266 /* here you would implement the vendor offset tracking for this oui */
274} 267}
@@ -278,7 +271,7 @@ static void mesh_sync_vendor_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
278 const u8 *oui; 271 const u8 *oui;
279 272
280 WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR); 273 WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR);
281 msync_dbg("called mesh_sync_vendor_adjust_tbtt"); 274 msync_dbg(sdata, "called mesh_sync_vendor_adjust_tbtt\n");
282 oui = mesh_get_vendor_oui(sdata); 275 oui = mesh_get_vendor_oui(sdata);
283 /* here you would implement the vendor tsf adjustment for this oui */ 276 /* here you would implement the vendor tsf adjustment for this oui */
284} 277}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 0db5d34a06b..aa69a331f37 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -258,12 +258,11 @@ static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
258} 258}
259 259
260static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata, 260static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
261 struct sk_buff *skb, const u8 *ht_oper_ie, 261 struct sk_buff *skb, u8 ap_ht_param,
262 struct ieee80211_supported_band *sband, 262 struct ieee80211_supported_band *sband,
263 struct ieee80211_channel *channel, 263 struct ieee80211_channel *channel,
264 enum ieee80211_smps_mode smps) 264 enum ieee80211_smps_mode smps)
265{ 265{
266 struct ieee80211_ht_operation *ht_oper;
267 u8 *pos; 266 u8 *pos;
268 u32 flags = channel->flags; 267 u32 flags = channel->flags;
269 u16 cap; 268 u16 cap;
@@ -271,21 +270,13 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
271 270
272 BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap)); 271 BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap));
273 272
274 if (!ht_oper_ie)
275 return;
276
277 if (ht_oper_ie[1] < sizeof(struct ieee80211_ht_operation))
278 return;
279
280 memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap)); 273 memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
281 ieee80211_apply_htcap_overrides(sdata, &ht_cap); 274 ieee80211_apply_htcap_overrides(sdata, &ht_cap);
282 275
283 ht_oper = (struct ieee80211_ht_operation *)(ht_oper_ie + 2);
284
285 /* determine capability flags */ 276 /* determine capability flags */
286 cap = ht_cap.cap; 277 cap = ht_cap.cap;
287 278
288 switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { 279 switch (ap_ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
289 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 280 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
290 if (flags & IEEE80211_CHAN_NO_HT40PLUS) { 281 if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
291 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; 282 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
@@ -509,7 +500,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
509 } 500 }
510 501
511 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) 502 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
512 ieee80211_add_ht_ie(sdata, skb, assoc_data->ht_operation_ie, 503 ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param,
513 sband, local->oper_channel, ifmgd->ap_smps); 504 sband, local->oper_channel, ifmgd->ap_smps);
514 505
515 /* if present, add any custom non-vendor IEs that go after HT */ 506 /* if present, add any custom non-vendor IEs that go after HT */
@@ -939,11 +930,6 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
939 return; 930 return;
940 } 931 }
941 932
942 if (!list_empty(&local->work_list)) {
943 local->ps_sdata = NULL;
944 goto change;
945 }
946
947 list_for_each_entry(sdata, &local->interfaces, list) { 933 list_for_each_entry(sdata, &local->interfaces, list) {
948 if (!ieee80211_sdata_running(sdata)) 934 if (!ieee80211_sdata_running(sdata))
949 continue; 935 continue;
@@ -1016,7 +1002,6 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
1016 local->ps_sdata = NULL; 1002 local->ps_sdata = NULL;
1017 } 1003 }
1018 1004
1019 change:
1020 ieee80211_change_ps(local); 1005 ieee80211_change_ps(local);
1021} 1006}
1022 1007
@@ -1156,7 +1141,7 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
1156 1141
1157 memset(&params, 0, sizeof(params)); 1142 memset(&params, 0, sizeof(params));
1158 1143
1159 local->wmm_acm = 0; 1144 sdata->wmm_acm = 0;
1160 for (; left >= 4; left -= 4, pos += 4) { 1145 for (; left >= 4; left -= 4, pos += 4) {
1161 int aci = (pos[0] >> 5) & 0x03; 1146 int aci = (pos[0] >> 5) & 0x03;
1162 int acm = (pos[0] >> 4) & 0x01; 1147 int acm = (pos[0] >> 4) & 0x01;
@@ -1167,21 +1152,21 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
1167 case 1: /* AC_BK */ 1152 case 1: /* AC_BK */
1168 queue = 3; 1153 queue = 3;
1169 if (acm) 1154 if (acm)
1170 local->wmm_acm |= BIT(1) | BIT(2); /* BK/- */ 1155 sdata->wmm_acm |= BIT(1) | BIT(2); /* BK/- */
1171 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 1156 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
1172 uapsd = true; 1157 uapsd = true;
1173 break; 1158 break;
1174 case 2: /* AC_VI */ 1159 case 2: /* AC_VI */
1175 queue = 1; 1160 queue = 1;
1176 if (acm) 1161 if (acm)
1177 local->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */ 1162 sdata->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */
1178 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 1163 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
1179 uapsd = true; 1164 uapsd = true;
1180 break; 1165 break;
1181 case 3: /* AC_VO */ 1166 case 3: /* AC_VO */
1182 queue = 0; 1167 queue = 0;
1183 if (acm) 1168 if (acm)
1184 local->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */ 1169 sdata->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */
1185 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 1170 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
1186 uapsd = true; 1171 uapsd = true;
1187 break; 1172 break;
@@ -1189,7 +1174,7 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
1189 default: 1174 default:
1190 queue = 2; 1175 queue = 2;
1191 if (acm) 1176 if (acm)
1192 local->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */ 1177 sdata->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */
1193 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 1178 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
1194 uapsd = true; 1179 uapsd = true;
1195 break; 1180 break;
@@ -1201,19 +1186,16 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
1201 params.txop = get_unaligned_le16(pos + 2); 1186 params.txop = get_unaligned_le16(pos + 2);
1202 params.uapsd = uapsd; 1187 params.uapsd = uapsd;
1203 1188
1204#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1189 mlme_dbg(sdata,
1205 wiphy_debug(local->hw.wiphy, 1190 "WMM queue=%d aci=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d\n",
1206 "WMM queue=%d aci=%d acm=%d aifs=%d " 1191 queue, aci, acm,
1207 "cWmin=%d cWmax=%d txop=%d uapsd=%d\n", 1192 params.aifs, params.cw_min, params.cw_max,
1208 queue, aci, acm, 1193 params.txop, params.uapsd);
1209 params.aifs, params.cw_min, params.cw_max,
1210 params.txop, params.uapsd);
1211#endif
1212 sdata->tx_conf[queue] = params; 1194 sdata->tx_conf[queue] = params;
1213 if (drv_conf_tx(local, sdata, queue, &params)) 1195 if (drv_conf_tx(local, sdata, queue, &params))
1214 wiphy_debug(local->hw.wiphy, 1196 sdata_err(sdata,
1215 "failed to set TX queue parameters for queue %d\n", 1197 "failed to set TX queue parameters for queue %d\n",
1216 queue); 1198 queue);
1217 } 1199 }
1218 1200
1219 /* enable WMM or activate new settings */ 1201 /* enable WMM or activate new settings */
@@ -1290,7 +1272,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
1290 1272
1291 bss_info_changed |= BSS_CHANGED_BEACON_INT; 1273 bss_info_changed |= BSS_CHANGED_BEACON_INT;
1292 bss_info_changed |= ieee80211_handle_bss_capability(sdata, 1274 bss_info_changed |= ieee80211_handle_bss_capability(sdata,
1293 cbss->capability, bss->has_erp_value, bss->erp_value); 1275 bss_conf->assoc_capability, bss->has_erp_value, bss->erp_value);
1294 1276
1295 sdata->u.mgd.beacon_timeout = usecs_to_jiffies(ieee80211_tu_to_usec( 1277 sdata->u.mgd.beacon_timeout = usecs_to_jiffies(ieee80211_tu_to_usec(
1296 IEEE80211_BEACON_LOSS_COUNT * bss_conf->beacon_int)); 1278 IEEE80211_BEACON_LOSS_COUNT * bss_conf->beacon_int));
@@ -1581,11 +1563,12 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
1581 goto out; 1563 goto out;
1582 } 1564 }
1583 1565
1584#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1585 if (beacon) 1566 if (beacon)
1586 net_dbg_ratelimited("%s: detected beacon loss from AP - sending probe request\n", 1567 mlme_dbg_ratelimited(sdata,
1587 sdata->name); 1568 "detected beacon loss from AP - sending probe request\n");
1588#endif 1569
1570 ieee80211_cqm_rssi_notify(&sdata->vif,
1571 NL80211_CQM_RSSI_BEACON_LOSS_EVENT, GFP_KERNEL);
1589 1572
1590 /* 1573 /*
1591 * The driver/our work has already reported this event or the 1574 * The driver/our work has already reported this event or the
@@ -1668,8 +1651,7 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
1668 1651
1669 memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); 1652 memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
1670 1653
1671 printk(KERN_DEBUG "%s: Connection to AP %pM lost.\n", 1654 sdata_info(sdata, "Connection to AP %pM lost\n", bssid);
1672 sdata->name, bssid);
1673 1655
1674 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, 1656 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
1675 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, 1657 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
@@ -1803,9 +1785,10 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1803 return RX_MGMT_NONE; 1785 return RX_MGMT_NONE;
1804 1786
1805 if (status_code != WLAN_STATUS_SUCCESS) { 1787 if (status_code != WLAN_STATUS_SUCCESS) {
1806 printk(KERN_DEBUG "%s: %pM denied authentication (status %d)\n", 1788 sdata_info(sdata, "%pM denied authentication (status %d)\n",
1807 sdata->name, mgmt->sa, status_code); 1789 mgmt->sa, status_code);
1808 goto out; 1790 ieee80211_destroy_auth_data(sdata, false);
1791 return RX_MGMT_CFG80211_RX_AUTH;
1809 } 1792 }
1810 1793
1811 switch (ifmgd->auth_data->algorithm) { 1794 switch (ifmgd->auth_data->algorithm) {
@@ -1826,8 +1809,7 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1826 return RX_MGMT_NONE; 1809 return RX_MGMT_NONE;
1827 } 1810 }
1828 1811
1829 printk(KERN_DEBUG "%s: authenticated\n", sdata->name); 1812 sdata_info(sdata, "authenticated\n");
1830 out:
1831 ifmgd->auth_data->done = true; 1813 ifmgd->auth_data->done = true;
1832 ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC; 1814 ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
1833 run_again(ifmgd, ifmgd->auth_data->timeout); 1815 run_again(ifmgd, ifmgd->auth_data->timeout);
@@ -1840,8 +1822,7 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1840 goto out_err; 1822 goto out_err;
1841 } 1823 }
1842 if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) { 1824 if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) {
1843 printk(KERN_DEBUG "%s: failed moving %pM to auth\n", 1825 sdata_info(sdata, "failed moving %pM to auth\n", bssid);
1844 sdata->name, bssid);
1845 goto out_err; 1826 goto out_err;
1846 } 1827 }
1847 mutex_unlock(&sdata->local->sta_mtx); 1828 mutex_unlock(&sdata->local->sta_mtx);
@@ -1875,8 +1856,8 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1875 1856
1876 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); 1857 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
1877 1858
1878 printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n", 1859 sdata_info(sdata, "deauthenticated from %pM (Reason: %u)\n",
1879 sdata->name, bssid, reason_code); 1860 bssid, reason_code);
1880 1861
1881 ieee80211_set_disassoc(sdata, 0, 0, false, NULL); 1862 ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
1882 1863
@@ -1906,8 +1887,8 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1906 1887
1907 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); 1888 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
1908 1889
1909 printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n", 1890 sdata_info(sdata, "disassociated from %pM (Reason: %u)\n",
1910 sdata->name, mgmt->sa, reason_code); 1891 mgmt->sa, reason_code);
1911 1892
1912 ieee80211_set_disassoc(sdata, 0, 0, false, NULL); 1893 ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
1913 1894
@@ -1999,17 +1980,15 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
1999 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); 1980 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
2000 1981
2001 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) 1982 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
2002 printk(KERN_DEBUG 1983 sdata_info(sdata, "invalid AID value 0x%x; bits 15:14 not set\n",
2003 "%s: invalid AID value 0x%x; bits 15:14 not set\n", 1984 aid);
2004 sdata->name, aid);
2005 aid &= ~(BIT(15) | BIT(14)); 1985 aid &= ~(BIT(15) | BIT(14));
2006 1986
2007 ifmgd->broken_ap = false; 1987 ifmgd->broken_ap = false;
2008 1988
2009 if (aid == 0 || aid > IEEE80211_MAX_AID) { 1989 if (aid == 0 || aid > IEEE80211_MAX_AID) {
2010 printk(KERN_DEBUG 1990 sdata_info(sdata, "invalid AID value %d (out of range), turn off PS\n",
2011 "%s: invalid AID value %d (out of range), turn off PS\n", 1991 aid);
2012 sdata->name, aid);
2013 aid = 0; 1992 aid = 0;
2014 ifmgd->broken_ap = true; 1993 ifmgd->broken_ap = true;
2015 } 1994 }
@@ -2018,8 +1997,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2018 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); 1997 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
2019 1998
2020 if (!elems.supp_rates) { 1999 if (!elems.supp_rates) {
2021 printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n", 2000 sdata_info(sdata, "no SuppRates element in AssocResp\n");
2022 sdata->name);
2023 return false; 2001 return false;
2024 } 2002 }
2025 2003
@@ -2059,9 +2037,9 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2059 if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT)) 2037 if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
2060 err = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); 2038 err = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
2061 if (err) { 2039 if (err) {
2062 printk(KERN_DEBUG 2040 sdata_info(sdata,
2063 "%s: failed to move station %pM to desired state\n", 2041 "failed to move station %pM to desired state\n",
2064 sdata->name, sta->sta.addr); 2042 sta->sta.addr);
2065 WARN_ON(__sta_info_destroy(sta)); 2043 WARN_ON(__sta_info_destroy(sta));
2066 mutex_unlock(&sdata->local->sta_mtx); 2044 mutex_unlock(&sdata->local->sta_mtx);
2067 return false; 2045 return false;
@@ -2144,10 +2122,10 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2144 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); 2122 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
2145 aid = le16_to_cpu(mgmt->u.assoc_resp.aid); 2123 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
2146 2124
2147 printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x " 2125 sdata_info(sdata,
2148 "status=%d aid=%d)\n", 2126 "RX %sssocResp from %pM (capab=0x%x status=%d aid=%d)\n",
2149 sdata->name, reassoc ? "Rea" : "A", mgmt->sa, 2127 reassoc ? "Rea" : "A", mgmt->sa,
2150 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14)))); 2128 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
2151 2129
2152 pos = mgmt->u.assoc_resp.variable; 2130 pos = mgmt->u.assoc_resp.variable;
2153 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); 2131 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
@@ -2158,9 +2136,9 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2158 u32 tu, ms; 2136 u32 tu, ms;
2159 tu = get_unaligned_le32(elems.timeout_int + 1); 2137 tu = get_unaligned_le32(elems.timeout_int + 1);
2160 ms = tu * 1024 / 1000; 2138 ms = tu * 1024 / 1000;
2161 printk(KERN_DEBUG "%s: %pM rejected association temporarily; " 2139 sdata_info(sdata,
2162 "comeback duration %u TU (%u ms)\n", 2140 "%pM rejected association temporarily; comeback duration %u TU (%u ms)\n",
2163 sdata->name, mgmt->sa, tu, ms); 2141 mgmt->sa, tu, ms);
2164 assoc_data->timeout = jiffies + msecs_to_jiffies(ms); 2142 assoc_data->timeout = jiffies + msecs_to_jiffies(ms);
2165 if (ms > IEEE80211_ASSOC_TIMEOUT) 2143 if (ms > IEEE80211_ASSOC_TIMEOUT)
2166 run_again(ifmgd, assoc_data->timeout); 2144 run_again(ifmgd, assoc_data->timeout);
@@ -2170,8 +2148,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2170 *bss = assoc_data->bss; 2148 *bss = assoc_data->bss;
2171 2149
2172 if (status_code != WLAN_STATUS_SUCCESS) { 2150 if (status_code != WLAN_STATUS_SUCCESS) {
2173 printk(KERN_DEBUG "%s: %pM denied association (code=%d)\n", 2151 sdata_info(sdata, "%pM denied association (code=%d)\n",
2174 sdata->name, mgmt->sa, status_code); 2152 mgmt->sa, status_code);
2175 ieee80211_destroy_assoc_data(sdata, false); 2153 ieee80211_destroy_assoc_data(sdata, false);
2176 } else { 2154 } else {
2177 if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) { 2155 if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
@@ -2180,7 +2158,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2180 cfg80211_put_bss(*bss); 2158 cfg80211_put_bss(*bss);
2181 return RX_MGMT_CFG80211_ASSOC_TIMEOUT; 2159 return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
2182 } 2160 }
2183 printk(KERN_DEBUG "%s: associated\n", sdata->name); 2161 sdata_info(sdata, "associated\n");
2184 2162
2185 /* 2163 /*
2186 * destroy assoc_data afterwards, as otherwise an idle 2164 * destroy assoc_data afterwards, as otherwise an idle
@@ -2280,7 +2258,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
2280 if (ifmgd->auth_data && !ifmgd->auth_data->bss->proberesp_ies && 2258 if (ifmgd->auth_data && !ifmgd->auth_data->bss->proberesp_ies &&
2281 ether_addr_equal(mgmt->bssid, ifmgd->auth_data->bss->bssid)) { 2259 ether_addr_equal(mgmt->bssid, ifmgd->auth_data->bss->bssid)) {
2282 /* got probe response, continue with auth */ 2260 /* got probe response, continue with auth */
2283 printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name); 2261 sdata_info(sdata, "direct probe responded\n");
2284 ifmgd->auth_data->tries = 0; 2262 ifmgd->auth_data->tries = 0;
2285 ifmgd->auth_data->timeout = jiffies; 2263 ifmgd->auth_data->timeout = jiffies;
2286 run_again(ifmgd, ifmgd->auth_data->timeout); 2264 run_again(ifmgd, ifmgd->auth_data->timeout);
@@ -2416,10 +2394,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2416 } 2394 }
2417 2395
2418 if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) { 2396 if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) {
2419#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 2397 mlme_dbg_ratelimited(sdata,
2420 net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n", 2398 "cancelling probereq poll due to a received beacon\n");
2421 sdata->name);
2422#endif
2423 mutex_lock(&local->mtx); 2399 mutex_lock(&local->mtx);
2424 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; 2400 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
2425 ieee80211_run_deferred_scan(local); 2401 ieee80211_run_deferred_scan(local);
@@ -2642,8 +2618,8 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
2642 auth_data->tries++; 2618 auth_data->tries++;
2643 2619
2644 if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) { 2620 if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) {
2645 printk(KERN_DEBUG "%s: authentication with %pM timed out\n", 2621 sdata_info(sdata, "authentication with %pM timed out\n",
2646 sdata->name, auth_data->bss->bssid); 2622 auth_data->bss->bssid);
2647 2623
2648 /* 2624 /*
2649 * Most likely AP is not in the range so remove the 2625 * Most likely AP is not in the range so remove the
@@ -2655,9 +2631,9 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
2655 } 2631 }
2656 2632
2657 if (auth_data->bss->proberesp_ies) { 2633 if (auth_data->bss->proberesp_ies) {
2658 printk(KERN_DEBUG "%s: send auth to %pM (try %d/%d)\n", 2634 sdata_info(sdata, "send auth to %pM (try %d/%d)\n",
2659 sdata->name, auth_data->bss->bssid, auth_data->tries, 2635 auth_data->bss->bssid, auth_data->tries,
2660 IEEE80211_AUTH_MAX_TRIES); 2636 IEEE80211_AUTH_MAX_TRIES);
2661 2637
2662 auth_data->expected_transaction = 2; 2638 auth_data->expected_transaction = 2;
2663 ieee80211_send_auth(sdata, 1, auth_data->algorithm, 2639 ieee80211_send_auth(sdata, 1, auth_data->algorithm,
@@ -2667,9 +2643,9 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
2667 } else { 2643 } else {
2668 const u8 *ssidie; 2644 const u8 *ssidie;
2669 2645
2670 printk(KERN_DEBUG "%s: direct probe to %pM (try %d/%i)\n", 2646 sdata_info(sdata, "direct probe to %pM (try %d/%i)\n",
2671 sdata->name, auth_data->bss->bssid, auth_data->tries, 2647 auth_data->bss->bssid, auth_data->tries,
2672 IEEE80211_AUTH_MAX_TRIES); 2648 IEEE80211_AUTH_MAX_TRIES);
2673 2649
2674 ssidie = ieee80211_bss_get_ie(auth_data->bss, WLAN_EID_SSID); 2650 ssidie = ieee80211_bss_get_ie(auth_data->bss, WLAN_EID_SSID);
2675 if (!ssidie) 2651 if (!ssidie)
@@ -2697,8 +2673,8 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
2697 2673
2698 assoc_data->tries++; 2674 assoc_data->tries++;
2699 if (assoc_data->tries > IEEE80211_ASSOC_MAX_TRIES) { 2675 if (assoc_data->tries > IEEE80211_ASSOC_MAX_TRIES) {
2700 printk(KERN_DEBUG "%s: association with %pM timed out\n", 2676 sdata_info(sdata, "association with %pM timed out\n",
2701 sdata->name, assoc_data->bss->bssid); 2677 assoc_data->bss->bssid);
2702 2678
2703 /* 2679 /*
2704 * Most likely AP is not in the range so remove the 2680 * Most likely AP is not in the range so remove the
@@ -2709,9 +2685,9 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
2709 return -ETIMEDOUT; 2685 return -ETIMEDOUT;
2710 } 2686 }
2711 2687
2712 printk(KERN_DEBUG "%s: associate with %pM (try %d/%d)\n", 2688 sdata_info(sdata, "associate with %pM (try %d/%d)\n",
2713 sdata->name, assoc_data->bss->bssid, assoc_data->tries, 2689 assoc_data->bss->bssid, assoc_data->tries,
2714 IEEE80211_ASSOC_MAX_TRIES); 2690 IEEE80211_ASSOC_MAX_TRIES);
2715 ieee80211_send_assoc(sdata); 2691 ieee80211_send_assoc(sdata);
2716 2692
2717 assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT; 2693 assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
@@ -2784,45 +2760,31 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
2784 ieee80211_reset_ap_probe(sdata); 2760 ieee80211_reset_ap_probe(sdata);
2785 else if (ifmgd->nullfunc_failed) { 2761 else if (ifmgd->nullfunc_failed) {
2786 if (ifmgd->probe_send_count < max_tries) { 2762 if (ifmgd->probe_send_count < max_tries) {
2787#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 2763 mlme_dbg(sdata,
2788 wiphy_debug(local->hw.wiphy, 2764 "No ack for nullfunc frame to AP %pM, try %d/%i\n",
2789 "%s: No ack for nullfunc frame to" 2765 bssid, ifmgd->probe_send_count,
2790 " AP %pM, try %d/%i\n", 2766 max_tries);
2791 sdata->name, bssid,
2792 ifmgd->probe_send_count, max_tries);
2793#endif
2794 ieee80211_mgd_probe_ap_send(sdata); 2767 ieee80211_mgd_probe_ap_send(sdata);
2795 } else { 2768 } else {
2796#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 2769 mlme_dbg(sdata,
2797 wiphy_debug(local->hw.wiphy, 2770 "No ack for nullfunc frame to AP %pM, disconnecting.\n",
2798 "%s: No ack for nullfunc frame to" 2771 bssid);
2799 " AP %pM, disconnecting.\n",
2800 sdata->name, bssid);
2801#endif
2802 ieee80211_sta_connection_lost(sdata, bssid, 2772 ieee80211_sta_connection_lost(sdata, bssid,
2803 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); 2773 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
2804 } 2774 }
2805 } else if (time_is_after_jiffies(ifmgd->probe_timeout)) 2775 } else if (time_is_after_jiffies(ifmgd->probe_timeout))
2806 run_again(ifmgd, ifmgd->probe_timeout); 2776 run_again(ifmgd, ifmgd->probe_timeout);
2807 else if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) { 2777 else if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
2808#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 2778 mlme_dbg(sdata,
2809 wiphy_debug(local->hw.wiphy, 2779 "Failed to send nullfunc to AP %pM after %dms, disconnecting\n",
2810 "%s: Failed to send nullfunc to AP %pM" 2780 bssid, probe_wait_ms);
2811 " after %dms, disconnecting.\n",
2812 sdata->name,
2813 bssid, probe_wait_ms);
2814#endif
2815 ieee80211_sta_connection_lost(sdata, bssid, 2781 ieee80211_sta_connection_lost(sdata, bssid,
2816 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); 2782 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
2817 } else if (ifmgd->probe_send_count < max_tries) { 2783 } else if (ifmgd->probe_send_count < max_tries) {
2818#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 2784 mlme_dbg(sdata,
2819 wiphy_debug(local->hw.wiphy, 2785 "No probe response from AP %pM after %dms, try %d/%i\n",
2820 "%s: No probe response from AP %pM" 2786 bssid, probe_wait_ms,
2821 " after %dms, try %d/%i\n", 2787 ifmgd->probe_send_count, max_tries);
2822 sdata->name,
2823 bssid, probe_wait_ms,
2824 ifmgd->probe_send_count, max_tries);
2825#endif
2826 ieee80211_mgd_probe_ap_send(sdata); 2788 ieee80211_mgd_probe_ap_send(sdata);
2827 } else { 2789 } else {
2828 /* 2790 /*
@@ -2937,11 +2899,8 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
2937 sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME; 2899 sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME;
2938 mutex_lock(&ifmgd->mtx); 2900 mutex_lock(&ifmgd->mtx);
2939 if (ifmgd->associated) { 2901 if (ifmgd->associated) {
2940#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 2902 mlme_dbg(sdata,
2941 wiphy_debug(sdata->local->hw.wiphy, 2903 "driver requested disconnect after resume\n");
2942 "%s: driver requested disconnect after resume.\n",
2943 sdata->name);
2944#endif
2945 ieee80211_sta_connection_lost(sdata, 2904 ieee80211_sta_connection_lost(sdata,
2946 ifmgd->associated->bssid, 2905 ifmgd->associated->bssid,
2947 WLAN_REASON_UNSPECIFIED); 2906 WLAN_REASON_UNSPECIFIED);
@@ -3029,7 +2988,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3029 struct ieee80211_local *local = sdata->local; 2988 struct ieee80211_local *local = sdata->local;
3030 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2989 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3031 struct ieee80211_bss *bss = (void *)cbss->priv; 2990 struct ieee80211_bss *bss = (void *)cbss->priv;
3032 struct sta_info *sta; 2991 struct sta_info *sta = NULL;
3033 bool have_sta = false; 2992 bool have_sta = false;
3034 int err; 2993 int err;
3035 int ht_cfreq; 2994 int ht_cfreq;
@@ -3082,13 +3041,11 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3082 * since we look at probe response/beacon data here 3041 * since we look at probe response/beacon data here
3083 * it should be OK. 3042 * it should be OK.
3084 */ 3043 */
3085 printk(KERN_DEBUG 3044 sdata_info(sdata,
3086 "%s: Wrong control channel: center-freq: %d" 3045 "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
3087 " ht-cfreq: %d ht->primary_chan: %d" 3046 cbss->channel->center_freq,
3088 " band: %d. Disabling HT.\n", 3047 ht_cfreq, ht_oper->primary_chan,
3089 sdata->name, cbss->channel->center_freq, 3048 cbss->channel->band);
3090 ht_cfreq, ht_oper->primary_chan,
3091 cbss->channel->band);
3092 ht_oper = NULL; 3049 ht_oper = NULL;
3093 } 3050 }
3094 } 3051 }
@@ -3112,9 +3069,8 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3112 if (!ieee80211_set_channel_type(local, sdata, channel_type)) { 3069 if (!ieee80211_set_channel_type(local, sdata, channel_type)) {
3113 /* can only fail due to HT40+/- mismatch */ 3070 /* can only fail due to HT40+/- mismatch */
3114 channel_type = NL80211_CHAN_HT20; 3071 channel_type = NL80211_CHAN_HT20;
3115 printk(KERN_DEBUG 3072 sdata_info(sdata,
3116 "%s: disabling 40 MHz due to multi-vif mismatch\n", 3073 "disabling 40 MHz due to multi-vif mismatch\n");
3117 sdata->name);
3118 ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ; 3074 ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
3119 WARN_ON(!ieee80211_set_channel_type(local, sdata, 3075 WARN_ON(!ieee80211_set_channel_type(local, sdata,
3120 channel_type)); 3076 channel_type));
@@ -3123,7 +3079,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3123 local->oper_channel = cbss->channel; 3079 local->oper_channel = cbss->channel;
3124 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 3080 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
3125 3081
3126 if (!have_sta) { 3082 if (sta) {
3127 u32 rates = 0, basic_rates = 0; 3083 u32 rates = 0, basic_rates = 0;
3128 bool have_higher_than_11mbit; 3084 bool have_higher_than_11mbit;
3129 int min_rate = INT_MAX, min_rate_index = -1; 3085 int min_rate = INT_MAX, min_rate_index = -1;
@@ -3143,9 +3099,8 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3143 * we can connect -- with a warning. 3099 * we can connect -- with a warning.
3144 */ 3100 */
3145 if (!basic_rates && min_rate_index >= 0) { 3101 if (!basic_rates && min_rate_index >= 0) {
3146 printk(KERN_DEBUG 3102 sdata_info(sdata,
3147 "%s: No basic rates, using min rate instead.\n", 3103 "No basic rates, using min rate instead\n");
3148 sdata->name);
3149 basic_rates = BIT(min_rate_index); 3104 basic_rates = BIT(min_rate_index);
3150 } 3105 }
3151 3106
@@ -3171,9 +3126,9 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3171 err = sta_info_insert(sta); 3126 err = sta_info_insert(sta);
3172 sta = NULL; 3127 sta = NULL;
3173 if (err) { 3128 if (err) {
3174 printk(KERN_DEBUG 3129 sdata_info(sdata,
3175 "%s: failed to insert STA entry for the AP (error %d)\n", 3130 "failed to insert STA entry for the AP (error %d)\n",
3176 sdata->name, err); 3131 err);
3177 return err; 3132 return err;
3178 } 3133 }
3179 } else 3134 } else
@@ -3251,8 +3206,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
3251 if (ifmgd->associated) 3206 if (ifmgd->associated)
3252 ieee80211_set_disassoc(sdata, 0, 0, false, NULL); 3207 ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
3253 3208
3254 printk(KERN_DEBUG "%s: authenticate with %pM\n", 3209 sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid);
3255 sdata->name, req->bss->bssid);
3256 3210
3257 err = ieee80211_prep_connection(sdata, req->bss, false); 3211 err = ieee80211_prep_connection(sdata, req->bss, false);
3258 if (err) 3212 if (err)
@@ -3287,7 +3241,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3287 struct ieee80211_bss *bss = (void *)req->bss->priv; 3241 struct ieee80211_bss *bss = (void *)req->bss->priv;
3288 struct ieee80211_mgd_assoc_data *assoc_data; 3242 struct ieee80211_mgd_assoc_data *assoc_data;
3289 struct ieee80211_supported_band *sband; 3243 struct ieee80211_supported_band *sband;
3290 const u8 *ssidie; 3244 const u8 *ssidie, *ht_ie;
3291 int i, err; 3245 int i, err;
3292 3246
3293 ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID); 3247 ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
@@ -3335,11 +3289,15 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3335 * We can set this to true for non-11n hardware, that'll be checked 3289 * We can set this to true for non-11n hardware, that'll be checked
3336 * separately along with the peer capabilities. 3290 * separately along with the peer capabilities.
3337 */ 3291 */
3338 for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) 3292 for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) {
3339 if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 || 3293 if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
3340 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP || 3294 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
3341 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) 3295 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
3342 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 3296 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
3297 netdev_info(sdata->dev,
3298 "disabling HT due to WEP/TKIP use\n");
3299 }
3300 }
3343 3301
3344 if (req->flags & ASSOC_REQ_DISABLE_HT) 3302 if (req->flags & ASSOC_REQ_DISABLE_HT)
3345 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 3303 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
@@ -3347,8 +3305,11 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3347 /* Also disable HT if we don't support it or the AP doesn't use WMM */ 3305 /* Also disable HT if we don't support it or the AP doesn't use WMM */
3348 sband = local->hw.wiphy->bands[req->bss->channel->band]; 3306 sband = local->hw.wiphy->bands[req->bss->channel->band];
3349 if (!sband->ht_cap.ht_supported || 3307 if (!sband->ht_cap.ht_supported ||
3350 local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) 3308 local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
3351 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 3309 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
3310 netdev_info(sdata->dev,
3311 "disabling HT as WMM/QoS is not supported\n");
3312 }
3352 3313
3353 memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa)); 3314 memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
3354 memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask, 3315 memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
@@ -3374,8 +3335,13 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3374 (local->hw.queues >= IEEE80211_NUM_ACS); 3335 (local->hw.queues >= IEEE80211_NUM_ACS);
3375 assoc_data->supp_rates = bss->supp_rates; 3336 assoc_data->supp_rates = bss->supp_rates;
3376 assoc_data->supp_rates_len = bss->supp_rates_len; 3337 assoc_data->supp_rates_len = bss->supp_rates_len;
3377 assoc_data->ht_operation_ie = 3338
3378 ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_OPERATION); 3339 ht_ie = ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_OPERATION);
3340 if (ht_ie && ht_ie[1] >= sizeof(struct ieee80211_ht_operation))
3341 assoc_data->ap_ht_param =
3342 ((struct ieee80211_ht_operation *)(ht_ie + 2))->ht_param;
3343 else
3344 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
3379 3345
3380 if (bss->wmm_used && bss->uapsd_supported && 3346 if (bss->wmm_used && bss->uapsd_supported &&
3381 (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) { 3347 (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
@@ -3422,8 +3388,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3422 * Wait up to one beacon interval ... 3388 * Wait up to one beacon interval ...
3423 * should this be more if we miss one? 3389 * should this be more if we miss one?
3424 */ 3390 */
3425 printk(KERN_DEBUG "%s: waiting for beacon from %pM\n", 3391 sdata_info(sdata, "waiting for beacon from %pM\n",
3426 sdata->name, ifmgd->bssid); 3392 ifmgd->bssid);
3427 assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval); 3393 assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval);
3428 } else { 3394 } else {
3429 assoc_data->have_beacon = true; 3395 assoc_data->have_beacon = true;
@@ -3442,8 +3408,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3442 corrupt_type = "beacon"; 3408 corrupt_type = "beacon";
3443 } else if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_PROBE_RESP) 3409 } else if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_PROBE_RESP)
3444 corrupt_type = "probe response"; 3410 corrupt_type = "probe response";
3445 printk(KERN_DEBUG "%s: associating with AP with corrupt %s\n", 3411 sdata_info(sdata, "associating with AP with corrupt %s\n",
3446 sdata->name, corrupt_type); 3412 corrupt_type);
3447 } 3413 }
3448 3414
3449 err = 0; 3415 err = 0;
@@ -3472,9 +3438,9 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
3472 return 0; 3438 return 0;
3473 } 3439 }
3474 3440
3475 printk(KERN_DEBUG 3441 sdata_info(sdata,
3476 "%s: deauthenticating from %pM by local choice (reason=%d)\n", 3442 "deauthenticating from %pM by local choice (reason=%d)\n",
3477 sdata->name, req->bssid, req->reason_code); 3443 req->bssid, req->reason_code);
3478 3444
3479 if (ifmgd->associated && 3445 if (ifmgd->associated &&
3480 ether_addr_equal(ifmgd->associated->bssid, req->bssid)) 3446 ether_addr_equal(ifmgd->associated->bssid, req->bssid))
@@ -3516,8 +3482,9 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
3516 return -ENOLINK; 3482 return -ENOLINK;
3517 } 3483 }
3518 3484
3519 printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n", 3485 sdata_info(sdata,
3520 sdata->name, req->bss->bssid, req->reason_code); 3486 "disassociating from %pM by local choice (reason=%d)\n",
3487 req->bss->bssid, req->reason_code);
3521 3488
3522 memcpy(bssid, req->bss->bssid, ETH_ALEN); 3489 memcpy(bssid, req->bss->bssid, ETH_ALEN);
3523 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DISASSOC, 3490 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DISASSOC,
@@ -3558,10 +3525,3 @@ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
3558 cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, gfp); 3525 cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, gfp);
3559} 3526}
3560EXPORT_SYMBOL(ieee80211_cqm_rssi_notify); 3527EXPORT_SYMBOL(ieee80211_cqm_rssi_notify);
3561
3562unsigned char ieee80211_get_operstate(struct ieee80211_vif *vif)
3563{
3564 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
3565 return sdata->dev->operstate;
3566}
3567EXPORT_SYMBOL(ieee80211_get_operstate);
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 935aa4b6dee..b0fb6a2b89a 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -15,7 +15,7 @@
15#include <linux/export.h> 15#include <linux/export.h>
16#include <net/mac80211.h> 16#include <net/mac80211.h>
17#include "ieee80211_i.h" 17#include "ieee80211_i.h"
18#include "driver-trace.h" 18#include "driver-ops.h"
19 19
20/* 20/*
21 * Tell our hardware to disable PS. 21 * Tell our hardware to disable PS.
@@ -24,8 +24,7 @@
24 * because we *may* be doing work on-operating channel, and want our 24 * because we *may* be doing work on-operating channel, and want our
25 * hardware unconditionally awake, but still let the AP send us normal frames. 25 * hardware unconditionally awake, but still let the AP send us normal frames.
26 */ 26 */
27static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata, 27static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
28 bool tell_ap)
29{ 28{
30 struct ieee80211_local *local = sdata->local; 29 struct ieee80211_local *local = sdata->local;
31 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 30 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -46,8 +45,8 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata,
46 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 45 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
47 } 46 }
48 47
49 if (tell_ap && (!local->offchannel_ps_enabled || 48 if (!local->offchannel_ps_enabled ||
50 !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))) 49 !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
51 /* 50 /*
52 * If power save was enabled, no need to send a nullfunc 51 * If power save was enabled, no need to send a nullfunc
53 * frame because AP knows that we are sleeping. But if the 52 * frame because AP knows that we are sleeping. But if the
@@ -132,7 +131,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
132 if (offchannel_ps_enable && 131 if (offchannel_ps_enable &&
133 (sdata->vif.type == NL80211_IFTYPE_STATION) && 132 (sdata->vif.type == NL80211_IFTYPE_STATION) &&
134 sdata->u.mgd.associated) 133 sdata->u.mgd.associated)
135 ieee80211_offchannel_ps_enable(sdata, true); 134 ieee80211_offchannel_ps_enable(sdata);
136 } 135 }
137 } 136 }
138 mutex_unlock(&local->iflist_mtx); 137 mutex_unlock(&local->iflist_mtx);
@@ -181,34 +180,58 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
181 mutex_unlock(&local->iflist_mtx); 180 mutex_unlock(&local->iflist_mtx);
182} 181}
183 182
183void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc)
184{
185 if (roc->notified)
186 return;
187
188 if (roc->mgmt_tx_cookie) {
189 if (!WARN_ON(!roc->frame)) {
190 ieee80211_tx_skb(roc->sdata, roc->frame);
191 roc->frame = NULL;
192 }
193 } else {
194 cfg80211_ready_on_channel(roc->sdata->dev, (unsigned long)roc,
195 roc->chan, roc->chan_type,
196 roc->req_duration, GFP_KERNEL);
197 }
198
199 roc->notified = true;
200}
201
184static void ieee80211_hw_roc_start(struct work_struct *work) 202static void ieee80211_hw_roc_start(struct work_struct *work)
185{ 203{
186 struct ieee80211_local *local = 204 struct ieee80211_local *local =
187 container_of(work, struct ieee80211_local, hw_roc_start); 205 container_of(work, struct ieee80211_local, hw_roc_start);
188 struct ieee80211_sub_if_data *sdata; 206 struct ieee80211_roc_work *roc, *dep, *tmp;
189 207
190 mutex_lock(&local->mtx); 208 mutex_lock(&local->mtx);
191 209
192 if (!local->hw_roc_channel) { 210 if (list_empty(&local->roc_list))
193 mutex_unlock(&local->mtx); 211 goto out_unlock;
194 return;
195 }
196 212
197 if (local->hw_roc_skb) { 213 roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
198 sdata = IEEE80211_DEV_TO_SUB_IF(local->hw_roc_dev); 214 list);
199 ieee80211_tx_skb(sdata, local->hw_roc_skb); 215
200 local->hw_roc_skb = NULL; 216 if (!roc->started)
201 } else { 217 goto out_unlock;
202 cfg80211_ready_on_channel(local->hw_roc_dev,
203 local->hw_roc_cookie,
204 local->hw_roc_channel,
205 local->hw_roc_channel_type,
206 local->hw_roc_duration,
207 GFP_KERNEL);
208 }
209 218
210 ieee80211_recalc_idle(local); 219 roc->hw_begun = true;
220 roc->hw_start_time = local->hw_roc_start_time;
211 221
222 ieee80211_handle_roc_started(roc);
223 list_for_each_entry_safe(dep, tmp, &roc->dependents, list) {
224 ieee80211_handle_roc_started(dep);
225
226 if (dep->duration > roc->duration) {
227 u32 dur = dep->duration;
228 dep->duration = dur - roc->duration;
229 roc->duration = dur;
230 list_del(&dep->list);
231 list_add(&dep->list, &roc->list);
232 }
233 }
234 out_unlock:
212 mutex_unlock(&local->mtx); 235 mutex_unlock(&local->mtx);
213} 236}
214 237
@@ -216,52 +239,179 @@ void ieee80211_ready_on_channel(struct ieee80211_hw *hw)
216{ 239{
217 struct ieee80211_local *local = hw_to_local(hw); 240 struct ieee80211_local *local = hw_to_local(hw);
218 241
242 local->hw_roc_start_time = jiffies;
243
219 trace_api_ready_on_channel(local); 244 trace_api_ready_on_channel(local);
220 245
221 ieee80211_queue_work(hw, &local->hw_roc_start); 246 ieee80211_queue_work(hw, &local->hw_roc_start);
222} 247}
223EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel); 248EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel);
224 249
225static void ieee80211_hw_roc_done(struct work_struct *work) 250void ieee80211_start_next_roc(struct ieee80211_local *local)
226{ 251{
227 struct ieee80211_local *local = 252 struct ieee80211_roc_work *roc;
228 container_of(work, struct ieee80211_local, hw_roc_done);
229 253
230 mutex_lock(&local->mtx); 254 lockdep_assert_held(&local->mtx);
231 255
232 if (!local->hw_roc_channel) { 256 if (list_empty(&local->roc_list)) {
233 mutex_unlock(&local->mtx); 257 ieee80211_run_deferred_scan(local);
234 return; 258 return;
235 } 259 }
236 260
237 /* was never transmitted */ 261 roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
238 if (local->hw_roc_skb) { 262 list);
239 u64 cookie;
240 263
241 cookie = local->hw_roc_cookie ^ 2; 264 if (WARN_ON_ONCE(roc->started))
265 return;
266
267 if (local->ops->remain_on_channel) {
268 int ret, duration = roc->duration;
269
270 /* XXX: duplicated, see ieee80211_start_roc_work() */
271 if (!duration)
272 duration = 10;
273
274 ret = drv_remain_on_channel(local, roc->chan,
275 roc->chan_type,
276 duration);
277
278 roc->started = true;
279
280 if (ret) {
281 wiphy_warn(local->hw.wiphy,
282 "failed to start next HW ROC (%d)\n", ret);
283 /*
284 * queue the work struct again to avoid recursion
285 * when multiple failures occur
286 */
287 ieee80211_remain_on_channel_expired(&local->hw);
288 }
289 } else {
290 /* delay it a bit */
291 ieee80211_queue_delayed_work(&local->hw, &roc->work,
292 round_jiffies_relative(HZ/2));
293 }
294}
242 295
243 cfg80211_mgmt_tx_status(local->hw_roc_dev, cookie, 296void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc)
244 local->hw_roc_skb->data, 297{
245 local->hw_roc_skb->len, false, 298 struct ieee80211_roc_work *dep, *tmp;
246 GFP_KERNEL);
247 299
248 kfree_skb(local->hw_roc_skb); 300 /* was never transmitted */
249 local->hw_roc_skb = NULL; 301 if (roc->frame) {
250 local->hw_roc_skb_for_status = NULL; 302 cfg80211_mgmt_tx_status(roc->sdata->dev,
303 (unsigned long)roc->frame,
304 roc->frame->data, roc->frame->len,
305 false, GFP_KERNEL);
306 kfree_skb(roc->frame);
251 } 307 }
252 308
253 if (!local->hw_roc_for_tx) 309 if (!roc->mgmt_tx_cookie)
254 cfg80211_remain_on_channel_expired(local->hw_roc_dev, 310 cfg80211_remain_on_channel_expired(roc->sdata->dev,
255 local->hw_roc_cookie, 311 (unsigned long)roc,
256 local->hw_roc_channel, 312 roc->chan, roc->chan_type,
257 local->hw_roc_channel_type,
258 GFP_KERNEL); 313 GFP_KERNEL);
259 314
260 local->hw_roc_channel = NULL; 315 list_for_each_entry_safe(dep, tmp, &roc->dependents, list)
261 local->hw_roc_cookie = 0; 316 ieee80211_roc_notify_destroy(dep);
317
318 kfree(roc);
319}
320
321void ieee80211_sw_roc_work(struct work_struct *work)
322{
323 struct ieee80211_roc_work *roc =
324 container_of(work, struct ieee80211_roc_work, work.work);
325 struct ieee80211_sub_if_data *sdata = roc->sdata;
326 struct ieee80211_local *local = sdata->local;
327
328 mutex_lock(&local->mtx);
329
330 if (roc->abort)
331 goto finish;
332
333 if (WARN_ON(list_empty(&local->roc_list)))
334 goto out_unlock;
335
336 if (WARN_ON(roc != list_first_entry(&local->roc_list,
337 struct ieee80211_roc_work,
338 list)))
339 goto out_unlock;
262 340
263 ieee80211_recalc_idle(local); 341 if (!roc->started) {
342 struct ieee80211_roc_work *dep;
264 343
344 /* start this ROC */
345
346 /* switch channel etc */
347 ieee80211_recalc_idle(local);
348
349 local->tmp_channel = roc->chan;
350 local->tmp_channel_type = roc->chan_type;
351 ieee80211_hw_config(local, 0);
352
353 /* tell userspace or send frame */
354 ieee80211_handle_roc_started(roc);
355 list_for_each_entry(dep, &roc->dependents, list)
356 ieee80211_handle_roc_started(dep);
357
358 /* if it was pure TX, just finish right away */
359 if (!roc->duration)
360 goto finish;
361
362 roc->started = true;
363 ieee80211_queue_delayed_work(&local->hw, &roc->work,
364 msecs_to_jiffies(roc->duration));
365 } else {
366 /* finish this ROC */
367 finish:
368 list_del(&roc->list);
369 ieee80211_roc_notify_destroy(roc);
370
371 if (roc->started) {
372 drv_flush(local, false);
373
374 local->tmp_channel = NULL;
375 ieee80211_hw_config(local, 0);
376
377 ieee80211_offchannel_return(local, true);
378 }
379
380 ieee80211_recalc_idle(local);
381
382 if (roc->started)
383 ieee80211_start_next_roc(local);
384 }
385
386 out_unlock:
387 mutex_unlock(&local->mtx);
388}
389
390static void ieee80211_hw_roc_done(struct work_struct *work)
391{
392 struct ieee80211_local *local =
393 container_of(work, struct ieee80211_local, hw_roc_done);
394 struct ieee80211_roc_work *roc;
395
396 mutex_lock(&local->mtx);
397
398 if (list_empty(&local->roc_list))
399 goto out_unlock;
400
401 roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
402 list);
403
404 if (!roc->started)
405 goto out_unlock;
406
407 list_del(&roc->list);
408
409 ieee80211_roc_notify_destroy(roc);
410
411 /* if there's another roc, start it now */
412 ieee80211_start_next_roc(local);
413
414 out_unlock:
265 mutex_unlock(&local->mtx); 415 mutex_unlock(&local->mtx);
266} 416}
267 417
@@ -275,8 +425,47 @@ void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw)
275} 425}
276EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired); 426EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired);
277 427
278void ieee80211_hw_roc_setup(struct ieee80211_local *local) 428void ieee80211_roc_setup(struct ieee80211_local *local)
279{ 429{
280 INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start); 430 INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start);
281 INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done); 431 INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done);
432 INIT_LIST_HEAD(&local->roc_list);
433}
434
435void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata)
436{
437 struct ieee80211_local *local = sdata->local;
438 struct ieee80211_roc_work *roc, *tmp;
439 LIST_HEAD(tmp_list);
440
441 mutex_lock(&local->mtx);
442 list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
443 if (roc->sdata != sdata)
444 continue;
445
446 if (roc->started && local->ops->remain_on_channel) {
447 /* can race, so ignore return value */
448 drv_cancel_remain_on_channel(local);
449 }
450
451 list_move_tail(&roc->list, &tmp_list);
452 roc->abort = true;
453 }
454
455 ieee80211_start_next_roc(local);
456 mutex_unlock(&local->mtx);
457
458 list_for_each_entry_safe(roc, tmp, &tmp_list, list) {
459 if (local->ops->remain_on_channel) {
460 list_del(&roc->list);
461 ieee80211_roc_notify_destroy(roc);
462 } else {
463 ieee80211_queue_delayed_work(&local->hw, &roc->work, 0);
464
465 /* work will clean up etc */
466 flush_delayed_work(&roc->work);
467 }
468 }
469
470 WARN_ON_ONCE(!list_empty(&tmp_list));
282} 471}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index af1c4e26e96..5c572e7a1a7 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -77,6 +77,17 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
77 int err = drv_suspend(local, wowlan); 77 int err = drv_suspend(local, wowlan);
78 if (err < 0) { 78 if (err < 0) {
79 local->quiescing = false; 79 local->quiescing = false;
80 local->wowlan = false;
81 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
82 mutex_lock(&local->sta_mtx);
83 list_for_each_entry(sta,
84 &local->sta_list, list) {
85 clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
86 }
87 mutex_unlock(&local->sta_mtx);
88 }
89 ieee80211_wake_queues_by_reason(hw,
90 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
80 return err; 91 return err;
81 } else if (err > 0) { 92 } else if (err > 0) {
82 WARN_ON(err != 1); 93 WARN_ON(err != 1);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 965e6ec0adb..67edd69e842 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -94,7 +94,7 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
94 return len; 94 return len;
95} 95}
96 96
97/* 97/**
98 * ieee80211_add_rx_radiotap_header - add radiotap header 98 * ieee80211_add_rx_radiotap_header - add radiotap header
99 * 99 *
100 * add a radiotap header containing all the fields which the hardware provided. 100 * add a radiotap header containing all the fields which the hardware provided.
@@ -554,11 +554,11 @@ static inline u16 seq_sub(u16 sq1, u16 sq2)
554} 554}
555 555
556 556
557static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw, 557static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
558 struct tid_ampdu_rx *tid_agg_rx, 558 struct tid_ampdu_rx *tid_agg_rx,
559 int index) 559 int index)
560{ 560{
561 struct ieee80211_local *local = hw_to_local(hw); 561 struct ieee80211_local *local = sdata->local;
562 struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; 562 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
563 struct ieee80211_rx_status *status; 563 struct ieee80211_rx_status *status;
564 564
@@ -578,7 +578,7 @@ no_frame:
578 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 578 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
579} 579}
580 580
581static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, 581static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
582 struct tid_ampdu_rx *tid_agg_rx, 582 struct tid_ampdu_rx *tid_agg_rx,
583 u16 head_seq_num) 583 u16 head_seq_num)
584{ 584{
@@ -589,7 +589,7 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
589 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { 589 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
590 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 590 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
591 tid_agg_rx->buf_size; 591 tid_agg_rx->buf_size;
592 ieee80211_release_reorder_frame(hw, tid_agg_rx, index); 592 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index);
593 } 593 }
594} 594}
595 595
@@ -604,7 +604,7 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
604 */ 604 */
605#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 605#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
606 606
607static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, 607static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
608 struct tid_ampdu_rx *tid_agg_rx) 608 struct tid_ampdu_rx *tid_agg_rx)
609{ 609{
610 int index, j; 610 int index, j;
@@ -632,12 +632,9 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
632 HT_RX_REORDER_BUF_TIMEOUT)) 632 HT_RX_REORDER_BUF_TIMEOUT))
633 goto set_release_timer; 633 goto set_release_timer;
634 634
635#ifdef CONFIG_MAC80211_HT_DEBUG 635 ht_dbg_ratelimited(sdata,
636 if (net_ratelimit()) 636 "release an RX reorder frame due to timeout on earlier frames\n");
637 wiphy_debug(hw->wiphy, 637 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j);
638 "release an RX reorder frame due to timeout on earlier frames\n");
639#endif
640 ieee80211_release_reorder_frame(hw, tid_agg_rx, j);
641 638
642 /* 639 /*
643 * Increment the head seq# also for the skipped slots. 640 * Increment the head seq# also for the skipped slots.
@@ -647,7 +644,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
647 skipped = 0; 644 skipped = 0;
648 } 645 }
649 } else while (tid_agg_rx->reorder_buf[index]) { 646 } else while (tid_agg_rx->reorder_buf[index]) {
650 ieee80211_release_reorder_frame(hw, tid_agg_rx, index); 647 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index);
651 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 648 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
652 tid_agg_rx->buf_size; 649 tid_agg_rx->buf_size;
653 } 650 }
@@ -677,7 +674,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
677 * rcu_read_lock protection. It returns false if the frame 674 * rcu_read_lock protection. It returns false if the frame
678 * can be processed immediately, true if it was consumed. 675 * can be processed immediately, true if it was consumed.
679 */ 676 */
680static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, 677static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
681 struct tid_ampdu_rx *tid_agg_rx, 678 struct tid_ampdu_rx *tid_agg_rx,
682 struct sk_buff *skb) 679 struct sk_buff *skb)
683{ 680{
@@ -706,7 +703,8 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
706 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) { 703 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
707 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); 704 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
708 /* release stored frames up to new head to stack */ 705 /* release stored frames up to new head to stack */
709 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num); 706 ieee80211_release_reorder_frames(sdata, tid_agg_rx,
707 head_seq_num);
710 } 708 }
711 709
712 /* Now the new frame is always in the range of the reordering buffer */ 710 /* Now the new frame is always in the range of the reordering buffer */
@@ -736,7 +734,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
736 tid_agg_rx->reorder_buf[index] = skb; 734 tid_agg_rx->reorder_buf[index] = skb;
737 tid_agg_rx->reorder_time[index] = jiffies; 735 tid_agg_rx->reorder_time[index] = jiffies;
738 tid_agg_rx->stored_mpdu_num++; 736 tid_agg_rx->stored_mpdu_num++;
739 ieee80211_sta_reorder_release(hw, tid_agg_rx); 737 ieee80211_sta_reorder_release(sdata, tid_agg_rx);
740 738
741 out: 739 out:
742 spin_unlock(&tid_agg_rx->reorder_lock); 740 spin_unlock(&tid_agg_rx->reorder_lock);
@@ -751,7 +749,6 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
751{ 749{
752 struct sk_buff *skb = rx->skb; 750 struct sk_buff *skb = rx->skb;
753 struct ieee80211_local *local = rx->local; 751 struct ieee80211_local *local = rx->local;
754 struct ieee80211_hw *hw = &local->hw;
755 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 752 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
756 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 753 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
757 struct sta_info *sta = rx->sta; 754 struct sta_info *sta = rx->sta;
@@ -813,7 +810,7 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
813 * sure that we cannot get to it any more before doing 810 * sure that we cannot get to it any more before doing
814 * anything with it. 811 * anything with it.
815 */ 812 */
816 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb)) 813 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb))
817 return; 814 return;
818 815
819 dont_reorder: 816 dont_reorder:
@@ -1136,24 +1133,18 @@ static void ap_sta_ps_start(struct sta_info *sta)
1136 set_sta_flag(sta, WLAN_STA_PS_STA); 1133 set_sta_flag(sta, WLAN_STA_PS_STA);
1137 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS)) 1134 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
1138 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1135 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1139#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1136 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
1140 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n", 1137 sta->sta.addr, sta->sta.aid);
1141 sdata->name, sta->sta.addr, sta->sta.aid);
1142#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1143} 1138}
1144 1139
1145static void ap_sta_ps_end(struct sta_info *sta) 1140static void ap_sta_ps_end(struct sta_info *sta)
1146{ 1141{
1147#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1142 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
1148 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n", 1143 sta->sta.addr, sta->sta.aid);
1149 sta->sdata->name, sta->sta.addr, sta->sta.aid);
1150#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1151 1144
1152 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { 1145 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1153#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1146 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
1154 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n", 1147 sta->sta.addr, sta->sta.aid);
1155 sta->sdata->name, sta->sta.addr, sta->sta.aid);
1156#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1157 return; 1148 return;
1158 } 1149 }
1159 1150
@@ -1383,19 +1374,8 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1383 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) 1374 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
1384 sdata->fragment_next = 0; 1375 sdata->fragment_next = 0;
1385 1376
1386 if (!skb_queue_empty(&entry->skb_list)) { 1377 if (!skb_queue_empty(&entry->skb_list))
1387#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1388 struct ieee80211_hdr *hdr =
1389 (struct ieee80211_hdr *) entry->skb_list.next->data;
1390 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
1391 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
1392 "addr1=%pM addr2=%pM\n",
1393 sdata->name, idx,
1394 jiffies - entry->first_frag_time, entry->seq,
1395 entry->last_frag, hdr->addr1, hdr->addr2);
1396#endif
1397 __skb_queue_purge(&entry->skb_list); 1378 __skb_queue_purge(&entry->skb_list);
1398 }
1399 1379
1400 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ 1380 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
1401 *skb = NULL; 1381 *skb = NULL;
@@ -1753,7 +1733,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1753 */ 1733 */
1754 xmit_skb = skb_copy(skb, GFP_ATOMIC); 1734 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1755 if (!xmit_skb) 1735 if (!xmit_skb)
1756 net_dbg_ratelimited("%s: failed to clone multicast frame\n", 1736 net_info_ratelimited("%s: failed to clone multicast frame\n",
1757 dev->name); 1737 dev->name);
1758 } else { 1738 } else {
1759 dsta = sta_info_get(sdata, skb->data); 1739 dsta = sta_info_get(sdata, skb->data);
@@ -1937,7 +1917,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1937 ether_addr_equal(sdata->vif.addr, hdr->addr3)) 1917 ether_addr_equal(sdata->vif.addr, hdr->addr3))
1938 return RX_CONTINUE; 1918 return RX_CONTINUE;
1939 1919
1940 q = ieee80211_select_queue_80211(local, skb, hdr); 1920 q = ieee80211_select_queue_80211(sdata, skb, hdr);
1941 if (ieee80211_queue_stopped(&local->hw, q)) { 1921 if (ieee80211_queue_stopped(&local->hw, q)) {
1942 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion); 1922 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
1943 return RX_DROP_MONITOR; 1923 return RX_DROP_MONITOR;
@@ -1957,7 +1937,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1957 1937
1958 fwd_skb = skb_copy(skb, GFP_ATOMIC); 1938 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1959 if (!fwd_skb) { 1939 if (!fwd_skb) {
1960 net_dbg_ratelimited("%s: failed to clone mesh frame\n", 1940 net_info_ratelimited("%s: failed to clone mesh frame\n",
1961 sdata->name); 1941 sdata->name);
1962 goto out; 1942 goto out;
1963 } 1943 }
@@ -2060,8 +2040,6 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
2060static ieee80211_rx_result debug_noinline 2040static ieee80211_rx_result debug_noinline
2061ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) 2041ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
2062{ 2042{
2063 struct ieee80211_local *local = rx->local;
2064 struct ieee80211_hw *hw = &local->hw;
2065 struct sk_buff *skb = rx->skb; 2043 struct sk_buff *skb = rx->skb;
2066 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 2044 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
2067 struct tid_ampdu_rx *tid_agg_rx; 2045 struct tid_ampdu_rx *tid_agg_rx;
@@ -2098,7 +2076,8 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
2098 2076
2099 spin_lock(&tid_agg_rx->reorder_lock); 2077 spin_lock(&tid_agg_rx->reorder_lock);
2100 /* release stored frames up to start of BAR */ 2078 /* release stored frames up to start of BAR */
2101 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num); 2079 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
2080 start_seq_num);
2102 spin_unlock(&tid_agg_rx->reorder_lock); 2081 spin_unlock(&tid_agg_rx->reorder_lock);
2103 2082
2104 kfree_skb(skb); 2083 kfree_skb(skb);
@@ -2752,7 +2731,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2752 return; 2731 return;
2753 2732
2754 spin_lock(&tid_agg_rx->reorder_lock); 2733 spin_lock(&tid_agg_rx->reorder_lock);
2755 ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx); 2734 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx);
2756 spin_unlock(&tid_agg_rx->reorder_lock); 2735 spin_unlock(&tid_agg_rx->reorder_lock);
2757 2736
2758 ieee80211_rx_handlers(&rx); 2737 ieee80211_rx_handlers(&rx);
@@ -3032,6 +3011,10 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
3032 if (unlikely(local->quiescing || local->suspended)) 3011 if (unlikely(local->quiescing || local->suspended))
3033 goto drop; 3012 goto drop;
3034 3013
3014 /* We might be during a HW reconfig, prevent Rx for the same reason */
3015 if (unlikely(local->in_reconfig))
3016 goto drop;
3017
3035 /* 3018 /*
3036 * The same happens when we're not even started, 3019 * The same happens when we're not even started,
3037 * but that's worth a warning. 3020 * but that's worth a warning.
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 169da0742c8..267b2940fad 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -114,8 +114,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
114 114
115 if (elems->tim && (!elems->parse_error || 115 if (elems->tim && (!elems->parse_error ||
116 !(bss->valid_data & IEEE80211_BSS_VALID_DTIM))) { 116 !(bss->valid_data & IEEE80211_BSS_VALID_DTIM))) {
117 struct ieee80211_tim_ie *tim_ie = 117 struct ieee80211_tim_ie *tim_ie = elems->tim;
118 (struct ieee80211_tim_ie *)elems->tim;
119 bss->dtim_period = tim_ie->dtim_period; 118 bss->dtim_period = tim_ie->dtim_period;
120 if (!elems->parse_error) 119 if (!elems->parse_error)
121 bss->valid_data |= IEEE80211_BSS_VALID_DTIM; 120 bss->valid_data |= IEEE80211_BSS_VALID_DTIM;
@@ -323,7 +322,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
323 ieee80211_mlme_notify_scan_completed(local); 322 ieee80211_mlme_notify_scan_completed(local);
324 ieee80211_ibss_notify_scan_completed(local); 323 ieee80211_ibss_notify_scan_completed(local);
325 ieee80211_mesh_notify_scan_completed(local); 324 ieee80211_mesh_notify_scan_completed(local);
326 ieee80211_queue_work(&local->hw, &local->work_work); 325 ieee80211_start_next_roc(local);
327} 326}
328 327
329void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) 328void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
@@ -376,7 +375,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
376static bool ieee80211_can_scan(struct ieee80211_local *local, 375static bool ieee80211_can_scan(struct ieee80211_local *local,
377 struct ieee80211_sub_if_data *sdata) 376 struct ieee80211_sub_if_data *sdata)
378{ 377{
379 if (!list_empty(&local->work_list)) 378 if (!list_empty(&local->roc_list))
380 return false; 379 return false;
381 380
382 if (sdata->vif.type == NL80211_IFTYPE_STATION && 381 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index de455f8bbb9..06fa75ceb02 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -169,9 +169,7 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
169 if (sta->rate_ctrl) 169 if (sta->rate_ctrl)
170 rate_control_free_sta(sta); 170 rate_control_free_sta(sta);
171 171
172#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 172 sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr);
173 wiphy_debug(local->hw.wiphy, "Destroyed STA %pM\n", sta->sta.addr);
174#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
175 173
176 kfree(sta); 174 kfree(sta);
177} 175}
@@ -278,9 +276,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
278 for (i = 0; i < NUM_RX_DATA_QUEUES; i++) 276 for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
279 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); 277 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
280 278
281#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 279 sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr);
282 wiphy_debug(local->hw.wiphy, "Allocated STA %pM\n", sta->sta.addr);
283#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
284 280
285#ifdef CONFIG_MAC80211_MESH 281#ifdef CONFIG_MAC80211_MESH
286 sta->plink_state = NL80211_PLINK_LISTEN; 282 sta->plink_state = NL80211_PLINK_LISTEN;
@@ -333,9 +329,9 @@ static int sta_info_insert_drv_state(struct ieee80211_local *local,
333 } 329 }
334 330
335 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { 331 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
336 printk(KERN_DEBUG 332 sdata_info(sdata,
337 "%s: failed to move IBSS STA %pM to state %d (%d) - keeping it anyway.\n", 333 "failed to move IBSS STA %pM to state %d (%d) - keeping it anyway\n",
338 sdata->name, sta->sta.addr, state + 1, err); 334 sta->sta.addr, state + 1, err);
339 err = 0; 335 err = 0;
340 } 336 }
341 337
@@ -390,9 +386,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
390 sinfo.generation = local->sta_generation; 386 sinfo.generation = local->sta_generation;
391 cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL); 387 cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
392 388
393#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 389 sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr);
394 wiphy_debug(local->hw.wiphy, "Inserted STA %pM\n", sta->sta.addr);
395#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
396 390
397 /* move reference to rcu-protected */ 391 /* move reference to rcu-protected */
398 rcu_read_lock(); 392 rcu_read_lock();
@@ -618,10 +612,8 @@ static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local,
618 break; 612 break;
619 613
620 local->total_ps_buffered--; 614 local->total_ps_buffered--;
621#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 615 ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n",
622 printk(KERN_DEBUG "Buffered frame expired (STA %pM)\n",
623 sta->sta.addr); 616 sta->sta.addr);
624#endif
625 dev_kfree_skb(skb); 617 dev_kfree_skb(skb);
626 } 618 }
627 619
@@ -747,9 +739,8 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
747 mesh_accept_plinks_update(sdata); 739 mesh_accept_plinks_update(sdata);
748#endif 740#endif
749 741
750#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 742 sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr);
751 wiphy_debug(local->hw.wiphy, "Removed STA %pM\n", sta->sta.addr); 743
752#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
753 cancel_work_sync(&sta->drv_unblock_wk); 744 cancel_work_sync(&sta->drv_unblock_wk);
754 745
755 cfg80211_del_sta(sdata->dev, sta->sta.addr, GFP_KERNEL); 746 cfg80211_del_sta(sdata->dev, sta->sta.addr, GFP_KERNEL);
@@ -889,10 +880,8 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
889 continue; 880 continue;
890 881
891 if (time_after(jiffies, sta->last_rx + exp_time)) { 882 if (time_after(jiffies, sta->last_rx + exp_time)) {
892#ifdef CONFIG_MAC80211_IBSS_DEBUG 883 ibss_dbg(sdata, "expiring inactive STA %pM\n",
893 printk(KERN_DEBUG "%s: expiring inactive STA %pM\n", 884 sta->sta.addr);
894 sdata->name, sta->sta.addr);
895#endif
896 WARN_ON(__sta_info_destroy(sta)); 885 WARN_ON(__sta_info_destroy(sta));
897 } 886 }
898 } 887 }
@@ -990,11 +979,9 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
990 979
991 sta_info_recalc_tim(sta); 980 sta_info_recalc_tim(sta);
992 981
993#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 982 ps_dbg(sdata,
994 printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames " 983 "STA %pM aid %d sending %d filtered/%d PS frames since STA not sleeping anymore\n",
995 "since STA not sleeping anymore\n", sdata->name,
996 sta->sta.addr, sta->sta.aid, filtered, buffered); 984 sta->sta.addr, sta->sta.aid, filtered, buffered);
997#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
998} 985}
999 986
1000static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata, 987static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
@@ -1384,10 +1371,8 @@ int sta_info_move_state(struct sta_info *sta,
1384 return -EINVAL; 1371 return -EINVAL;
1385 } 1372 }
1386 1373
1387#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1374 sta_dbg(sta->sdata, "moving STA %pM to state %d\n",
1388 printk(KERN_DEBUG "%s: moving STA %pM to state %d\n", 1375 sta->sta.addr, new_state);
1389 sta->sdata->name, sta->sta.addr, new_state);
1390#endif
1391 1376
1392 /* 1377 /*
1393 * notify the driver before the actual changes so it can 1378 * notify the driver before the actual changes so it can
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 28cfa981cfb..2ed2f27fe8a 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -155,13 +155,10 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
155 return; 155 return;
156 } 156 }
157 157
158#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 158 ps_dbg_ratelimited(sta->sdata,
159 if (net_ratelimit()) 159 "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
160 wiphy_debug(local->hw.wiphy, 160 skb_queue_len(&sta->tx_filtered[ac]),
161 "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n", 161 !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies);
162 skb_queue_len(&sta->tx_filtered[ac]),
163 !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies);
164#endif
165 dev_kfree_skb(skb); 162 dev_kfree_skb(skb);
166} 163}
167 164
@@ -520,36 +517,16 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
520 517
521 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) { 518 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
522 u64 cookie = (unsigned long)skb; 519 u64 cookie = (unsigned long)skb;
520 acked = info->flags & IEEE80211_TX_STAT_ACK;
523 521
524 if (ieee80211_is_nullfunc(hdr->frame_control) || 522 if (ieee80211_is_nullfunc(hdr->frame_control) ||
525 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 523 ieee80211_is_qos_nullfunc(hdr->frame_control))
526 acked = info->flags & IEEE80211_TX_STAT_ACK;
527
528 cfg80211_probe_status(skb->dev, hdr->addr1, 524 cfg80211_probe_status(skb->dev, hdr->addr1,
529 cookie, acked, GFP_ATOMIC); 525 cookie, acked, GFP_ATOMIC);
530 } else { 526 else
531 struct ieee80211_work *wk;
532
533 rcu_read_lock();
534 list_for_each_entry_rcu(wk, &local->work_list, list) {
535 if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX)
536 continue;
537 if (wk->offchan_tx.frame != skb)
538 continue;
539 wk->offchan_tx.status = true;
540 break;
541 }
542 rcu_read_unlock();
543 if (local->hw_roc_skb_for_status == skb) {
544 cookie = local->hw_roc_cookie ^ 2;
545 local->hw_roc_skb_for_status = NULL;
546 }
547
548 cfg80211_mgmt_tx_status( 527 cfg80211_mgmt_tx_status(
549 skb->dev, cookie, skb->data, skb->len, 528 skb->dev, cookie, skb->data, skb->len,
550 !!(info->flags & IEEE80211_TX_STAT_ACK), 529 acked, GFP_ATOMIC);
551 GFP_ATOMIC);
552 }
553 } 530 }
554 531
555 if (unlikely(info->ack_frame_id)) { 532 if (unlikely(info->ack_frame_id)) {
@@ -589,7 +566,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
589 /* send frame to monitor interfaces now */ 566 /* send frame to monitor interfaces now */
590 rtap_len = ieee80211_tx_radiotap_len(info); 567 rtap_len = ieee80211_tx_radiotap_len(info);
591 if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) { 568 if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) {
592 printk(KERN_ERR "ieee80211_tx_status: headroom too small\n"); 569 pr_err("ieee80211_tx_status: headroom too small\n");
593 dev_kfree_skb(skb); 570 dev_kfree_skb(skb);
594 return; 571 return;
595 } 572 }
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 51077a956a8..57e14d59e12 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -260,17 +260,6 @@ int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
260 keyid = pos[3]; 260 keyid = pos[3];
261 iv32 = get_unaligned_le32(pos + 4); 261 iv32 = get_unaligned_le32(pos + 4);
262 pos += 8; 262 pos += 8;
263#ifdef CONFIG_MAC80211_TKIP_DEBUG
264 {
265 int i;
266 printk(KERN_DEBUG "TKIP decrypt: data(len=%zd)", payload_len);
267 for (i = 0; i < payload_len; i++)
268 printk(" %02x", payload[i]);
269 printk("\n");
270 printk(KERN_DEBUG "TKIP decrypt: iv16=%04x iv32=%08x\n",
271 iv16, iv32);
272 }
273#endif
274 263
275 if (!(keyid & (1 << 5))) 264 if (!(keyid & (1 << 5)))
276 return TKIP_DECRYPT_NO_EXT_IV; 265 return TKIP_DECRYPT_NO_EXT_IV;
@@ -281,16 +270,8 @@ int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
281 if (key->u.tkip.rx[queue].state != TKIP_STATE_NOT_INIT && 270 if (key->u.tkip.rx[queue].state != TKIP_STATE_NOT_INIT &&
282 (iv32 < key->u.tkip.rx[queue].iv32 || 271 (iv32 < key->u.tkip.rx[queue].iv32 ||
283 (iv32 == key->u.tkip.rx[queue].iv32 && 272 (iv32 == key->u.tkip.rx[queue].iv32 &&
284 iv16 <= key->u.tkip.rx[queue].iv16))) { 273 iv16 <= key->u.tkip.rx[queue].iv16)))
285#ifdef CONFIG_MAC80211_TKIP_DEBUG
286 printk(KERN_DEBUG "TKIP replay detected for RX frame from "
287 "%pM (RX IV (%04x,%02x) <= prev. IV (%04x,%02x)\n",
288 ta,
289 iv32, iv16, key->u.tkip.rx[queue].iv32,
290 key->u.tkip.rx[queue].iv16);
291#endif
292 return TKIP_DECRYPT_REPLAY; 274 return TKIP_DECRYPT_REPLAY;
293 }
294 275
295 if (only_iv) { 276 if (only_iv) {
296 res = TKIP_DECRYPT_OK; 277 res = TKIP_DECRYPT_OK;
@@ -302,22 +283,6 @@ int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
302 key->u.tkip.rx[queue].iv32 != iv32) { 283 key->u.tkip.rx[queue].iv32 != iv32) {
303 /* IV16 wrapped around - perform TKIP phase 1 */ 284 /* IV16 wrapped around - perform TKIP phase 1 */
304 tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32); 285 tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32);
305#ifdef CONFIG_MAC80211_TKIP_DEBUG
306 {
307 int i;
308 u8 key_offset = NL80211_TKIP_DATA_OFFSET_ENCR_KEY;
309 printk(KERN_DEBUG "TKIP decrypt: Phase1 TA=%pM"
310 " TK=", ta);
311 for (i = 0; i < 16; i++)
312 printk("%02x ",
313 key->conf.key[key_offset + i]);
314 printk("\n");
315 printk(KERN_DEBUG "TKIP decrypt: P1K=");
316 for (i = 0; i < 5; i++)
317 printk("%04x ", key->u.tkip.rx[queue].p1k[i]);
318 printk("\n");
319 }
320#endif
321 } 286 }
322 if (key->local->ops->update_tkip_key && 287 if (key->local->ops->update_tkip_key &&
323 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE && 288 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
@@ -333,15 +298,6 @@ int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
333 } 298 }
334 299
335 tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key); 300 tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key);
336#ifdef CONFIG_MAC80211_TKIP_DEBUG
337 {
338 int i;
339 printk(KERN_DEBUG "TKIP decrypt: Phase2 rc4key=");
340 for (i = 0; i < 16; i++)
341 printk("%02x ", rc4key[i]);
342 printk("\n");
343 }
344#endif
345 301
346 res = ieee80211_wep_decrypt_data(tfm, rc4key, 16, pos, payload_len - 12); 302 res = ieee80211_wep_decrypt_data(tfm, rc4key, 16, pos, payload_len - 12);
347 done: 303 done:
diff --git a/net/mac80211/trace.c b/net/mac80211/trace.c
new file mode 100644
index 00000000000..386e45d8a95
--- /dev/null
+++ b/net/mac80211/trace.c
@@ -0,0 +1,75 @@
1/* bug in tracepoint.h, it should include this */
2#include <linux/module.h>
3
4/* sparse isn't too happy with all macros... */
5#ifndef __CHECKER__
6#include <net/cfg80211.h>
7#include "driver-ops.h"
8#include "debug.h"
9#define CREATE_TRACE_POINTS
10#include "trace.h"
11
12#ifdef CONFIG_MAC80211_MESSAGE_TRACING
13void __sdata_info(const char *fmt, ...)
14{
15 struct va_format vaf = {
16 .fmt = fmt,
17 };
18 va_list args;
19
20 va_start(args, fmt);
21 vaf.va = &args;
22
23 pr_info("%pV", &vaf);
24 trace_mac80211_info(&vaf);
25 va_end(args);
26}
27
28void __sdata_dbg(bool print, const char *fmt, ...)
29{
30 struct va_format vaf = {
31 .fmt = fmt,
32 };
33 va_list args;
34
35 va_start(args, fmt);
36 vaf.va = &args;
37
38 if (print)
39 pr_debug("%pV", &vaf);
40 trace_mac80211_dbg(&vaf);
41 va_end(args);
42}
43
44void __sdata_err(const char *fmt, ...)
45{
46 struct va_format vaf = {
47 .fmt = fmt,
48 };
49 va_list args;
50
51 va_start(args, fmt);
52 vaf.va = &args;
53
54 pr_err("%pV", &vaf);
55 trace_mac80211_err(&vaf);
56 va_end(args);
57}
58
59void __wiphy_dbg(struct wiphy *wiphy, bool print, const char *fmt, ...)
60{
61 struct va_format vaf = {
62 .fmt = fmt,
63 };
64 va_list args;
65
66 va_start(args, fmt);
67 vaf.va = &args;
68
69 if (print)
70 wiphy_dbg(wiphy, "%pV", &vaf);
71 trace_mac80211_dbg(&vaf);
72 va_end(args);
73}
74#endif
75#endif
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/trace.h
index 6de00b2c268..2e60f4acd02 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/trace.h
@@ -1218,6 +1218,32 @@ DEFINE_EVENT(release_evt, drv_allow_buffered_frames,
1218 TP_ARGS(local, sta, tids, num_frames, reason, more_data) 1218 TP_ARGS(local, sta, tids, num_frames, reason, more_data)
1219); 1219);
1220 1220
1221TRACE_EVENT(drv_get_rssi,
1222 TP_PROTO(struct ieee80211_local *local, struct ieee80211_sta *sta,
1223 s8 rssi, int ret),
1224
1225 TP_ARGS(local, sta, rssi, ret),
1226
1227 TP_STRUCT__entry(
1228 LOCAL_ENTRY
1229 STA_ENTRY
1230 __field(s8, rssi)
1231 __field(int, ret)
1232 ),
1233
1234 TP_fast_assign(
1235 LOCAL_ASSIGN;
1236 STA_ASSIGN;
1237 __entry->rssi = rssi;
1238 __entry->ret = ret;
1239 ),
1240
1241 TP_printk(
1242 LOCAL_PR_FMT STA_PR_FMT " rssi:%d ret:%d",
1243 LOCAL_PR_ARG, STA_PR_ARG, __entry->rssi, __entry->ret
1244 )
1245);
1246
1221/* 1247/*
1222 * Tracing for API calls that drivers call. 1248 * Tracing for API calls that drivers call.
1223 */ 1249 */
@@ -1606,10 +1632,49 @@ TRACE_EVENT(stop_queue,
1606 LOCAL_PR_ARG, __entry->queue, __entry->reason 1632 LOCAL_PR_ARG, __entry->queue, __entry->reason
1607 ) 1633 )
1608); 1634);
1635
1636#ifdef CONFIG_MAC80211_MESSAGE_TRACING
1637#undef TRACE_SYSTEM
1638#define TRACE_SYSTEM mac80211_msg
1639
1640#define MAX_MSG_LEN 100
1641
1642DECLARE_EVENT_CLASS(mac80211_msg_event,
1643 TP_PROTO(struct va_format *vaf),
1644
1645 TP_ARGS(vaf),
1646
1647 TP_STRUCT__entry(
1648 __dynamic_array(char, msg, MAX_MSG_LEN)
1649 ),
1650
1651 TP_fast_assign(
1652 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
1653 MAX_MSG_LEN, vaf->fmt,
1654 *vaf->va) >= MAX_MSG_LEN);
1655 ),
1656
1657 TP_printk("%s", __get_str(msg))
1658);
1659
1660DEFINE_EVENT(mac80211_msg_event, mac80211_info,
1661 TP_PROTO(struct va_format *vaf),
1662 TP_ARGS(vaf)
1663);
1664DEFINE_EVENT(mac80211_msg_event, mac80211_dbg,
1665 TP_PROTO(struct va_format *vaf),
1666 TP_ARGS(vaf)
1667);
1668DEFINE_EVENT(mac80211_msg_event, mac80211_err,
1669 TP_PROTO(struct va_format *vaf),
1670 TP_ARGS(vaf)
1671);
1672#endif
1673
1609#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */ 1674#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
1610 1675
1611#undef TRACE_INCLUDE_PATH 1676#undef TRACE_INCLUDE_PATH
1612#define TRACE_INCLUDE_PATH . 1677#define TRACE_INCLUDE_PATH .
1613#undef TRACE_INCLUDE_FILE 1678#undef TRACE_INCLUDE_FILE
1614#define TRACE_INCLUDE_FILE driver-trace 1679#define TRACE_INCLUDE_FILE trace
1615#include <trace/define_trace.h> 1680#include <trace/define_trace.h>
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index e453212fa17..ec8f5346737 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -175,12 +175,6 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
175 return cpu_to_le16(dur); 175 return cpu_to_le16(dur);
176} 176}
177 177
178static inline int is_ieee80211_device(struct ieee80211_local *local,
179 struct net_device *dev)
180{
181 return local == wdev_priv(dev->ieee80211_ptr);
182}
183
184/* tx handlers */ 178/* tx handlers */
185static ieee80211_tx_result debug_noinline 179static ieee80211_tx_result debug_noinline
186ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx) 180ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
@@ -297,10 +291,10 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
297 if (unlikely(!assoc && 291 if (unlikely(!assoc &&
298 ieee80211_is_data(hdr->frame_control))) { 292 ieee80211_is_data(hdr->frame_control))) {
299#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 293#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
300 printk(KERN_DEBUG "%s: dropped data frame to not " 294 sdata_info(tx->sdata,
301 "associated station %pM\n", 295 "dropped data frame to not associated station %pM\n",
302 tx->sdata->name, hdr->addr1); 296 hdr->addr1);
303#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 297#endif
304 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc); 298 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
305 return TX_DROP; 299 return TX_DROP;
306 } 300 }
@@ -367,10 +361,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
367 rcu_read_unlock(); 361 rcu_read_unlock();
368 362
369 local->total_ps_buffered = total; 363 local->total_ps_buffered = total;
370#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 364 ps_dbg_hw(&local->hw, "PS buffers full - purged %d frames\n", purged);
371 wiphy_debug(local->hw.wiphy, "PS buffers full - purged %d frames\n",
372 purged);
373#endif
374} 365}
375 366
376static ieee80211_tx_result 367static ieee80211_tx_result
@@ -412,10 +403,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
412 purge_old_ps_buffers(tx->local); 403 purge_old_ps_buffers(tx->local);
413 404
414 if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) { 405 if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) {
415#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 406 ps_dbg(tx->sdata,
416 net_dbg_ratelimited("%s: BC TX buffer full - dropping the oldest frame\n", 407 "BC TX buffer full - dropping the oldest frame\n");
417 tx->sdata->name);
418#endif
419 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); 408 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
420 } else 409 } else
421 tx->local->total_ps_buffered++; 410 tx->local->total_ps_buffered++;
@@ -466,18 +455,15 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
466 return TX_CONTINUE; 455 return TX_CONTINUE;
467 } 456 }
468 457
469#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 458 ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
470 printk(KERN_DEBUG "STA %pM aid %d: PS buffer for AC %d\n",
471 sta->sta.addr, sta->sta.aid, ac); 459 sta->sta.addr, sta->sta.aid, ac);
472#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
473 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) 460 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
474 purge_old_ps_buffers(tx->local); 461 purge_old_ps_buffers(tx->local);
475 if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) { 462 if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
476 struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]); 463 struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
477#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 464 ps_dbg(tx->sdata,
478 net_dbg_ratelimited("%s: STA %pM TX buffer for AC %d full - dropping oldest frame\n", 465 "STA %pM TX buffer for AC %d full - dropping oldest frame\n",
479 tx->sdata->name, sta->sta.addr, ac); 466 sta->sta.addr, ac);
480#endif
481 dev_kfree_skb(old); 467 dev_kfree_skb(old);
482 } else 468 } else
483 tx->local->total_ps_buffered++; 469 tx->local->total_ps_buffered++;
@@ -499,14 +485,11 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
499 sta_info_recalc_tim(sta); 485 sta_info_recalc_tim(sta);
500 486
501 return TX_QUEUED; 487 return TX_QUEUED;
488 } else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) {
489 ps_dbg(tx->sdata,
490 "STA %pM in PS mode, but polling/in SP -> send frame\n",
491 sta->sta.addr);
502 } 492 }
503#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
504 else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) {
505 printk(KERN_DEBUG
506 "%s: STA %pM in PS mode, but polling/in SP -> send frame\n",
507 tx->sdata->name, sta->sta.addr);
508 }
509#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
510 493
511 return TX_CONTINUE; 494 return TX_CONTINUE;
512} 495}
@@ -1965,7 +1948,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1965 (cpu_to_be16(ethertype) != sdata->control_port_protocol || 1948 (cpu_to_be16(ethertype) != sdata->control_port_protocol ||
1966 !ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) { 1949 !ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) {
1967#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1950#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1968 net_dbg_ratelimited("%s: dropped frame to %pM (unauthorized port)\n", 1951 net_info_ratelimited("%s: dropped frame to %pM (unauthorized port)\n",
1969 dev->name, hdr.addr1); 1952 dev->name, hdr.addr1);
1970#endif 1953#endif
1971 1954
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 8dd4712620f..242ecde381f 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -804,7 +804,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
804 struct ieee80211_local *local = sdata->local; 804 struct ieee80211_local *local = sdata->local;
805 struct ieee80211_tx_queue_params qparam; 805 struct ieee80211_tx_queue_params qparam;
806 int ac; 806 int ac;
807 bool use_11b; 807 bool use_11b, enable_qos;
808 int aCWmin, aCWmax; 808 int aCWmin, aCWmax;
809 809
810 if (!local->ops->conf_tx) 810 if (!local->ops->conf_tx)
@@ -818,6 +818,13 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
818 use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) && 818 use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) &&
819 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE); 819 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
820 820
821 /*
822 * By default disable QoS in STA mode for old access points, which do
823 * not support 802.11e. New APs will provide proper queue parameters,
824 * that we will configure later.
825 */
826 enable_qos = (sdata->vif.type != NL80211_IFTYPE_STATION);
827
821 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 828 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
822 /* Set defaults according to 802.11-2007 Table 7-37 */ 829 /* Set defaults according to 802.11-2007 Table 7-37 */
823 aCWmax = 1023; 830 aCWmax = 1023;
@@ -826,38 +833,47 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
826 else 833 else
827 aCWmin = 15; 834 aCWmin = 15;
828 835
829 switch (ac) { 836 if (enable_qos) {
830 case IEEE80211_AC_BK: 837 switch (ac) {
831 qparam.cw_max = aCWmax; 838 case IEEE80211_AC_BK:
832 qparam.cw_min = aCWmin; 839 qparam.cw_max = aCWmax;
833 qparam.txop = 0; 840 qparam.cw_min = aCWmin;
834 qparam.aifs = 7; 841 qparam.txop = 0;
835 break; 842 qparam.aifs = 7;
836 default: /* never happens but let's not leave undefined */ 843 break;
837 case IEEE80211_AC_BE: 844 /* never happens but let's not leave undefined */
845 default:
846 case IEEE80211_AC_BE:
847 qparam.cw_max = aCWmax;
848 qparam.cw_min = aCWmin;
849 qparam.txop = 0;
850 qparam.aifs = 3;
851 break;
852 case IEEE80211_AC_VI:
853 qparam.cw_max = aCWmin;
854 qparam.cw_min = (aCWmin + 1) / 2 - 1;
855 if (use_11b)
856 qparam.txop = 6016/32;
857 else
858 qparam.txop = 3008/32;
859 qparam.aifs = 2;
860 break;
861 case IEEE80211_AC_VO:
862 qparam.cw_max = (aCWmin + 1) / 2 - 1;
863 qparam.cw_min = (aCWmin + 1) / 4 - 1;
864 if (use_11b)
865 qparam.txop = 3264/32;
866 else
867 qparam.txop = 1504/32;
868 qparam.aifs = 2;
869 break;
870 }
871 } else {
872 /* Confiure old 802.11b/g medium access rules. */
838 qparam.cw_max = aCWmax; 873 qparam.cw_max = aCWmax;
839 qparam.cw_min = aCWmin; 874 qparam.cw_min = aCWmin;
840 qparam.txop = 0; 875 qparam.txop = 0;
841 qparam.aifs = 3;
842 break;
843 case IEEE80211_AC_VI:
844 qparam.cw_max = aCWmin;
845 qparam.cw_min = (aCWmin + 1) / 2 - 1;
846 if (use_11b)
847 qparam.txop = 6016/32;
848 else
849 qparam.txop = 3008/32;
850 qparam.aifs = 2;
851 break;
852 case IEEE80211_AC_VO:
853 qparam.cw_max = (aCWmin + 1) / 2 - 1;
854 qparam.cw_min = (aCWmin + 1) / 4 - 1;
855 if (use_11b)
856 qparam.txop = 3264/32;
857 else
858 qparam.txop = 1504/32;
859 qparam.aifs = 2; 876 qparam.aifs = 2;
860 break;
861 } 877 }
862 878
863 qparam.uapsd = false; 879 qparam.uapsd = false;
@@ -866,12 +882,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
866 drv_conf_tx(local, sdata, ac, &qparam); 882 drv_conf_tx(local, sdata, ac, &qparam);
867 } 883 }
868 884
869 /* after reinitialize QoS TX queues setting to default,
870 * disable QoS at all */
871
872 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) { 885 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
873 sdata->vif.bss_conf.qos = 886 sdata->vif.bss_conf.qos = enable_qos;
874 sdata->vif.type != NL80211_IFTYPE_STATION;
875 if (bss_notify) 887 if (bss_notify)
876 ieee80211_bss_info_change_notify(sdata, 888 ieee80211_bss_info_change_notify(sdata,
877 BSS_CHANGED_QOS); 889 BSS_CHANGED_QOS);
@@ -1267,14 +1279,19 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1267 /* add STAs back */ 1279 /* add STAs back */
1268 mutex_lock(&local->sta_mtx); 1280 mutex_lock(&local->sta_mtx);
1269 list_for_each_entry(sta, &local->sta_list, list) { 1281 list_for_each_entry(sta, &local->sta_list, list) {
1270 if (sta->uploaded) { 1282 enum ieee80211_sta_state state;
1271 enum ieee80211_sta_state state;
1272 1283
1273 for (state = IEEE80211_STA_NOTEXIST; 1284 if (!sta->uploaded)
1274 state < sta->sta_state; state++) 1285 continue;
1275 WARN_ON(drv_sta_state(local, sta->sdata, sta, 1286
1276 state, state + 1)); 1287 /* AP-mode stations will be added later */
1277 } 1288 if (sta->sdata->vif.type == NL80211_IFTYPE_AP)
1289 continue;
1290
1291 for (state = IEEE80211_STA_NOTEXIST;
1292 state < sta->sta_state; state++)
1293 WARN_ON(drv_sta_state(local, sta->sdata, sta, state,
1294 state + 1));
1278 } 1295 }
1279 mutex_unlock(&local->sta_mtx); 1296 mutex_unlock(&local->sta_mtx);
1280 1297
@@ -1371,11 +1388,32 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1371 } 1388 }
1372 } 1389 }
1373 1390
1391 /* APs are now beaconing, add back stations */
1392 mutex_lock(&local->sta_mtx);
1393 list_for_each_entry(sta, &local->sta_list, list) {
1394 enum ieee80211_sta_state state;
1395
1396 if (!sta->uploaded)
1397 continue;
1398
1399 if (sta->sdata->vif.type != NL80211_IFTYPE_AP)
1400 continue;
1401
1402 for (state = IEEE80211_STA_NOTEXIST;
1403 state < sta->sta_state; state++)
1404 WARN_ON(drv_sta_state(local, sta->sdata, sta, state,
1405 state + 1));
1406 }
1407 mutex_unlock(&local->sta_mtx);
1408
1374 /* add back keys */ 1409 /* add back keys */
1375 list_for_each_entry(sdata, &local->interfaces, list) 1410 list_for_each_entry(sdata, &local->interfaces, list)
1376 if (ieee80211_sdata_running(sdata)) 1411 if (ieee80211_sdata_running(sdata))
1377 ieee80211_enable_keys(sdata); 1412 ieee80211_enable_keys(sdata);
1378 1413
1414 local->in_reconfig = false;
1415 barrier();
1416
1379 wake_up: 1417 wake_up:
1380 /* 1418 /*
1381 * Clear the WLAN_STA_BLOCK_BA flag so new aggregation 1419 * Clear the WLAN_STA_BLOCK_BA flag so new aggregation
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index c3d643a6536..cea06e9f26f 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -52,11 +52,11 @@ static int wme_downgrade_ac(struct sk_buff *skb)
52 } 52 }
53} 53}
54 54
55static u16 ieee80211_downgrade_queue(struct ieee80211_local *local, 55static u16 ieee80211_downgrade_queue(struct ieee80211_sub_if_data *sdata,
56 struct sk_buff *skb) 56 struct sk_buff *skb)
57{ 57{
58 /* in case we are a client verify acm is not set for this ac */ 58 /* in case we are a client verify acm is not set for this ac */
59 while (unlikely(local->wmm_acm & BIT(skb->priority))) { 59 while (unlikely(sdata->wmm_acm & BIT(skb->priority))) {
60 if (wme_downgrade_ac(skb)) { 60 if (wme_downgrade_ac(skb)) {
61 /* 61 /*
62 * This should not really happen. The AP has marked all 62 * This should not really happen. The AP has marked all
@@ -73,10 +73,11 @@ static u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
73} 73}
74 74
75/* Indicate which queue to use for this fully formed 802.11 frame */ 75/* Indicate which queue to use for this fully formed 802.11 frame */
76u16 ieee80211_select_queue_80211(struct ieee80211_local *local, 76u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
77 struct sk_buff *skb, 77 struct sk_buff *skb,
78 struct ieee80211_hdr *hdr) 78 struct ieee80211_hdr *hdr)
79{ 79{
80 struct ieee80211_local *local = sdata->local;
80 u8 *p; 81 u8 *p;
81 82
82 if (local->hw.queues < IEEE80211_NUM_ACS) 83 if (local->hw.queues < IEEE80211_NUM_ACS)
@@ -94,7 +95,7 @@ u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
94 p = ieee80211_get_qos_ctl(hdr); 95 p = ieee80211_get_qos_ctl(hdr);
95 skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK; 96 skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
96 97
97 return ieee80211_downgrade_queue(local, skb); 98 return ieee80211_downgrade_queue(sdata, skb);
98} 99}
99 100
100/* Indicate which queue to use. */ 101/* Indicate which queue to use. */
@@ -156,7 +157,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
156 * data frame has */ 157 * data frame has */
157 skb->priority = cfg80211_classify8021d(skb); 158 skb->priority = cfg80211_classify8021d(skb);
158 159
159 return ieee80211_downgrade_queue(local, skb); 160 return ieee80211_downgrade_queue(sdata, skb);
160} 161}
161 162
162void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, 163void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index ca80818b7b6..7fea4bb8acb 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -15,7 +15,7 @@
15 15
16extern const int ieee802_1d_to_ac[8]; 16extern const int ieee802_1d_to_ac[8];
17 17
18u16 ieee80211_select_queue_80211(struct ieee80211_local *local, 18u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
19 struct sk_buff *skb, 19 struct sk_buff *skb,
20 struct ieee80211_hdr *hdr); 20 struct ieee80211_hdr *hdr);
21u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, 21u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
deleted file mode 100644
index b2650a9d45f..00000000000
--- a/net/mac80211/work.c
+++ /dev/null
@@ -1,370 +0,0 @@
1/*
2 * mac80211 work implementation
3 *
4 * Copyright 2003-2008, Jouni Malinen <j@w1.fi>
5 * Copyright 2004, Instant802 Networks, Inc.
6 * Copyright 2005, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/delay.h>
17#include <linux/if_ether.h>
18#include <linux/skbuff.h>
19#include <linux/if_arp.h>
20#include <linux/etherdevice.h>
21#include <linux/crc32.h>
22#include <linux/slab.h>
23#include <net/mac80211.h>
24#include <asm/unaligned.h>
25
26#include "ieee80211_i.h"
27#include "rate.h"
28#include "driver-ops.h"
29
30enum work_action {
31 WORK_ACT_NONE,
32 WORK_ACT_TIMEOUT,
33};
34
35
36/* utils */
37static inline void ASSERT_WORK_MTX(struct ieee80211_local *local)
38{
39 lockdep_assert_held(&local->mtx);
40}
41
42/*
43 * We can have multiple work items (and connection probing)
44 * scheduling this timer, but we need to take care to only
45 * reschedule it when it should fire _earlier_ than it was
46 * asked for before, or if it's not pending right now. This
47 * function ensures that. Note that it then is required to
48 * run this function for all timeouts after the first one
49 * has happened -- the work that runs from this timer will
50 * do that.
51 */
52static void run_again(struct ieee80211_local *local,
53 unsigned long timeout)
54{
55 ASSERT_WORK_MTX(local);
56
57 if (!timer_pending(&local->work_timer) ||
58 time_before(timeout, local->work_timer.expires))
59 mod_timer(&local->work_timer, timeout);
60}
61
62void free_work(struct ieee80211_work *wk)
63{
64 kfree_rcu(wk, rcu_head);
65}
66
67static enum work_action __must_check
68ieee80211_remain_on_channel_timeout(struct ieee80211_work *wk)
69{
70 /*
71 * First time we run, do nothing -- the generic code will
72 * have switched to the right channel etc.
73 */
74 if (!wk->started) {
75 wk->timeout = jiffies + msecs_to_jiffies(wk->remain.duration);
76
77 cfg80211_ready_on_channel(wk->sdata->dev, (unsigned long) wk,
78 wk->chan, wk->chan_type,
79 wk->remain.duration, GFP_KERNEL);
80
81 return WORK_ACT_NONE;
82 }
83
84 return WORK_ACT_TIMEOUT;
85}
86
87static enum work_action __must_check
88ieee80211_offchannel_tx(struct ieee80211_work *wk)
89{
90 if (!wk->started) {
91 wk->timeout = jiffies + msecs_to_jiffies(wk->offchan_tx.wait);
92
93 /*
94 * After this, offchan_tx.frame remains but now is no
95 * longer a valid pointer -- we still need it as the
96 * cookie for canceling this work/status matching.
97 */
98 ieee80211_tx_skb(wk->sdata, wk->offchan_tx.frame);
99
100 return WORK_ACT_NONE;
101 }
102
103 return WORK_ACT_TIMEOUT;
104}
105
106static void ieee80211_work_timer(unsigned long data)
107{
108 struct ieee80211_local *local = (void *) data;
109
110 if (local->quiescing)
111 return;
112
113 ieee80211_queue_work(&local->hw, &local->work_work);
114}
115
116static void ieee80211_work_work(struct work_struct *work)
117{
118 struct ieee80211_local *local =
119 container_of(work, struct ieee80211_local, work_work);
120 struct ieee80211_work *wk, *tmp;
121 LIST_HEAD(free_work);
122 enum work_action rma;
123 bool remain_off_channel = false;
124
125 /*
126 * ieee80211_queue_work() should have picked up most cases,
127 * here we'll pick the rest.
128 */
129 if (WARN(local->suspended, "work scheduled while going to suspend\n"))
130 return;
131
132 mutex_lock(&local->mtx);
133
134 if (local->scanning) {
135 mutex_unlock(&local->mtx);
136 return;
137 }
138
139 ieee80211_recalc_idle(local);
140
141 list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
142 bool started = wk->started;
143
144 /* mark work as started if it's on the current off-channel */
145 if (!started && local->tmp_channel &&
146 wk->chan == local->tmp_channel &&
147 wk->chan_type == local->tmp_channel_type) {
148 started = true;
149 wk->timeout = jiffies;
150 }
151
152 if (!started && !local->tmp_channel) {
153 ieee80211_offchannel_stop_vifs(local, true);
154
155 local->tmp_channel = wk->chan;
156 local->tmp_channel_type = wk->chan_type;
157
158 ieee80211_hw_config(local, 0);
159
160 started = true;
161 wk->timeout = jiffies;
162 }
163
164 /* don't try to work with items that aren't started */
165 if (!started)
166 continue;
167
168 if (time_is_after_jiffies(wk->timeout)) {
169 /*
170 * This work item isn't supposed to be worked on
171 * right now, but take care to adjust the timer
172 * properly.
173 */
174 run_again(local, wk->timeout);
175 continue;
176 }
177
178 switch (wk->type) {
179 default:
180 WARN_ON(1);
181 /* nothing */
182 rma = WORK_ACT_NONE;
183 break;
184 case IEEE80211_WORK_ABORT:
185 rma = WORK_ACT_TIMEOUT;
186 break;
187 case IEEE80211_WORK_REMAIN_ON_CHANNEL:
188 rma = ieee80211_remain_on_channel_timeout(wk);
189 break;
190 case IEEE80211_WORK_OFFCHANNEL_TX:
191 rma = ieee80211_offchannel_tx(wk);
192 break;
193 }
194
195 wk->started = started;
196
197 switch (rma) {
198 case WORK_ACT_NONE:
199 /* might have changed the timeout */
200 run_again(local, wk->timeout);
201 break;
202 case WORK_ACT_TIMEOUT:
203 list_del_rcu(&wk->list);
204 synchronize_rcu();
205 list_add(&wk->list, &free_work);
206 break;
207 default:
208 WARN(1, "unexpected: %d", rma);
209 }
210 }
211
212 list_for_each_entry(wk, &local->work_list, list) {
213 if (!wk->started)
214 continue;
215 if (wk->chan != local->tmp_channel ||
216 wk->chan_type != local->tmp_channel_type)
217 continue;
218 remain_off_channel = true;
219 }
220
221 if (!remain_off_channel && local->tmp_channel) {
222 local->tmp_channel = NULL;
223 ieee80211_hw_config(local, 0);
224
225 ieee80211_offchannel_return(local, true);
226
227 /* give connection some time to breathe */
228 run_again(local, jiffies + HZ/2);
229 }
230
231 ieee80211_recalc_idle(local);
232 ieee80211_run_deferred_scan(local);
233
234 mutex_unlock(&local->mtx);
235
236 list_for_each_entry_safe(wk, tmp, &free_work, list) {
237 wk->done(wk, NULL);
238 list_del(&wk->list);
239 kfree(wk);
240 }
241}
242
243void ieee80211_add_work(struct ieee80211_work *wk)
244{
245 struct ieee80211_local *local;
246
247 if (WARN_ON(!wk->chan))
248 return;
249
250 if (WARN_ON(!wk->sdata))
251 return;
252
253 if (WARN_ON(!wk->done))
254 return;
255
256 if (WARN_ON(!ieee80211_sdata_running(wk->sdata)))
257 return;
258
259 wk->started = false;
260
261 local = wk->sdata->local;
262 mutex_lock(&local->mtx);
263 list_add_tail(&wk->list, &local->work_list);
264 mutex_unlock(&local->mtx);
265
266 ieee80211_queue_work(&local->hw, &local->work_work);
267}
268
269void ieee80211_work_init(struct ieee80211_local *local)
270{
271 INIT_LIST_HEAD(&local->work_list);
272 setup_timer(&local->work_timer, ieee80211_work_timer,
273 (unsigned long)local);
274 INIT_WORK(&local->work_work, ieee80211_work_work);
275}
276
277void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
278{
279 struct ieee80211_local *local = sdata->local;
280 struct ieee80211_work *wk;
281 bool cleanup = false;
282
283 mutex_lock(&local->mtx);
284 list_for_each_entry(wk, &local->work_list, list) {
285 if (wk->sdata != sdata)
286 continue;
287 cleanup = true;
288 wk->type = IEEE80211_WORK_ABORT;
289 wk->started = true;
290 wk->timeout = jiffies;
291 }
292 mutex_unlock(&local->mtx);
293
294 /* run cleanups etc. */
295 if (cleanup)
296 ieee80211_work_work(&local->work_work);
297
298 mutex_lock(&local->mtx);
299 list_for_each_entry(wk, &local->work_list, list) {
300 if (wk->sdata != sdata)
301 continue;
302 WARN_ON(1);
303 break;
304 }
305 mutex_unlock(&local->mtx);
306}
307
308static enum work_done_result ieee80211_remain_done(struct ieee80211_work *wk,
309 struct sk_buff *skb)
310{
311 /*
312 * We are done serving the remain-on-channel command.
313 */
314 cfg80211_remain_on_channel_expired(wk->sdata->dev, (unsigned long) wk,
315 wk->chan, wk->chan_type,
316 GFP_KERNEL);
317
318 return WORK_DONE_DESTROY;
319}
320
321int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
322 struct ieee80211_channel *chan,
323 enum nl80211_channel_type channel_type,
324 unsigned int duration, u64 *cookie)
325{
326 struct ieee80211_work *wk;
327
328 wk = kzalloc(sizeof(*wk), GFP_KERNEL);
329 if (!wk)
330 return -ENOMEM;
331
332 wk->type = IEEE80211_WORK_REMAIN_ON_CHANNEL;
333 wk->chan = chan;
334 wk->chan_type = channel_type;
335 wk->sdata = sdata;
336 wk->done = ieee80211_remain_done;
337
338 wk->remain.duration = duration;
339
340 *cookie = (unsigned long) wk;
341
342 ieee80211_add_work(wk);
343
344 return 0;
345}
346
347int ieee80211_wk_cancel_remain_on_channel(struct ieee80211_sub_if_data *sdata,
348 u64 cookie)
349{
350 struct ieee80211_local *local = sdata->local;
351 struct ieee80211_work *wk, *tmp;
352 bool found = false;
353
354 mutex_lock(&local->mtx);
355 list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
356 if ((unsigned long) wk == cookie) {
357 wk->timeout = jiffies;
358 found = true;
359 break;
360 }
361 }
362 mutex_unlock(&local->mtx);
363
364 if (!found)
365 return -ENOENT;
366
367 ieee80211_queue_work(&local->hw, &local->work_work);
368
369 return 0;
370}
diff --git a/net/mac802154/Makefile b/net/mac802154/Makefile
index ec1bd3fc127..57cf5d1a2e4 100644
--- a/net/mac802154/Makefile
+++ b/net/mac802154/Makefile
@@ -1,2 +1,2 @@
1obj-$(CONFIG_MAC802154) += mac802154.o 1obj-$(CONFIG_MAC802154) += mac802154.o
2mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o monitor.o 2mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o monitor.o wpan.o
diff --git a/net/mac802154/ieee802154_dev.c b/net/mac802154/ieee802154_dev.c
index e3edfb0661b..e748aed290a 100644
--- a/net/mac802154/ieee802154_dev.c
+++ b/net/mac802154/ieee802154_dev.c
@@ -140,6 +140,10 @@ mac802154_add_iface(struct wpan_phy *phy, const char *name, int type)
140 dev = alloc_netdev(sizeof(struct mac802154_sub_if_data), 140 dev = alloc_netdev(sizeof(struct mac802154_sub_if_data),
141 name, mac802154_monitor_setup); 141 name, mac802154_monitor_setup);
142 break; 142 break;
143 case IEEE802154_DEV_WPAN:
144 dev = alloc_netdev(sizeof(struct mac802154_sub_if_data),
145 name, mac802154_wpan_setup);
146 break;
143 default: 147 default:
144 dev = NULL; 148 dev = NULL;
145 err = -EINVAL; 149 err = -EINVAL;
diff --git a/net/mac802154/mac802154.h b/net/mac802154/mac802154.h
index 789d9c948ae..69678644a5c 100644
--- a/net/mac802154/mac802154.h
+++ b/net/mac802154/mac802154.h
@@ -93,6 +93,7 @@ struct mac802154_sub_if_data {
93#define MAC802154_CHAN_NONE (~(u8)0) /* No channel is assigned */ 93#define MAC802154_CHAN_NONE (~(u8)0) /* No channel is assigned */
94 94
95extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced; 95extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced;
96extern struct ieee802154_mlme_ops mac802154_mlme_wpan;
96 97
97int mac802154_slave_open(struct net_device *dev); 98int mac802154_slave_open(struct net_device *dev);
98int mac802154_slave_close(struct net_device *dev); 99int mac802154_slave_close(struct net_device *dev);
@@ -100,10 +101,17 @@ int mac802154_slave_close(struct net_device *dev);
100void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb); 101void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb);
101void mac802154_monitor_setup(struct net_device *dev); 102void mac802154_monitor_setup(struct net_device *dev);
102 103
104void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb);
105void mac802154_wpan_setup(struct net_device *dev);
106
103netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb, 107netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
104 u8 page, u8 chan); 108 u8 page, u8 chan);
105 109
106/* MIB callbacks */ 110/* MIB callbacks */
111void mac802154_dev_set_short_addr(struct net_device *dev, u16 val);
107void mac802154_dev_set_ieee_addr(struct net_device *dev); 112void mac802154_dev_set_ieee_addr(struct net_device *dev);
113u16 mac802154_dev_get_pan_id(const struct net_device *dev);
114void mac802154_dev_set_pan_id(struct net_device *dev, u16 val);
115void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan);
108 116
109#endif /* MAC802154_H */ 117#endif /* MAC802154_H */
diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c
index 7a5d0e052cd..7f5403e5ea9 100644
--- a/net/mac802154/mac_cmd.c
+++ b/net/mac802154/mac_cmd.c
@@ -25,12 +25,36 @@
25#include <linux/skbuff.h> 25#include <linux/skbuff.h>
26#include <linux/if_arp.h> 26#include <linux/if_arp.h>
27 27
28#include <net/ieee802154.h>
28#include <net/ieee802154_netdev.h> 29#include <net/ieee802154_netdev.h>
29#include <net/wpan-phy.h> 30#include <net/wpan-phy.h>
30#include <net/mac802154.h> 31#include <net/mac802154.h>
32#include <net/nl802154.h>
31 33
32#include "mac802154.h" 34#include "mac802154.h"
33 35
36static int mac802154_mlme_start_req(struct net_device *dev,
37 struct ieee802154_addr *addr,
38 u8 channel, u8 page,
39 u8 bcn_ord, u8 sf_ord,
40 u8 pan_coord, u8 blx,
41 u8 coord_realign)
42{
43 BUG_ON(addr->addr_type != IEEE802154_ADDR_SHORT);
44
45 mac802154_dev_set_pan_id(dev, addr->pan_id);
46 mac802154_dev_set_short_addr(dev, addr->short_addr);
47 mac802154_dev_set_ieee_addr(dev);
48 mac802154_dev_set_page_channel(dev, page, channel);
49
50 /* FIXME: add validation for unused parameters to be sane
51 * for SoftMAC
52 */
53 ieee802154_nl_start_confirm(dev, IEEE802154_SUCCESS);
54
55 return 0;
56}
57
34struct wpan_phy *mac802154_get_phy(const struct net_device *dev) 58struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
35{ 59{
36 struct mac802154_sub_if_data *priv = netdev_priv(dev); 60 struct mac802154_sub_if_data *priv = netdev_priv(dev);
@@ -43,3 +67,8 @@ struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
43struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced = { 67struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced = {
44 .get_phy = mac802154_get_phy, 68 .get_phy = mac802154_get_phy,
45}; 69};
70
71struct ieee802154_mlme_ops mac802154_mlme_wpan = {
72 .get_phy = mac802154_get_phy,
73 .start_req = mac802154_mlme_start_req,
74};
diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c
index ab59821ec72..380829d8460 100644
--- a/net/mac802154/mib.c
+++ b/net/mac802154/mib.c
@@ -28,6 +28,11 @@
28 28
29#include "mac802154.h" 29#include "mac802154.h"
30 30
31struct phy_chan_notify_work {
32 struct work_struct work;
33 struct net_device *dev;
34};
35
31struct hw_addr_filt_notify_work { 36struct hw_addr_filt_notify_work {
32 struct work_struct work; 37 struct work_struct work;
33 struct net_device *dev; 38 struct net_device *dev;
@@ -78,6 +83,23 @@ static void set_hw_addr_filt(struct net_device *dev, unsigned long changed)
78 return; 83 return;
79} 84}
80 85
86void mac802154_dev_set_short_addr(struct net_device *dev, u16 val)
87{
88 struct mac802154_sub_if_data *priv = netdev_priv(dev);
89
90 BUG_ON(dev->type != ARPHRD_IEEE802154);
91
92 spin_lock_bh(&priv->mib_lock);
93 priv->short_addr = val;
94 spin_unlock_bh(&priv->mib_lock);
95
96 if ((priv->hw->ops->set_hw_addr_filt) &&
97 (priv->hw->hw.hw_filt.short_addr != priv->short_addr)) {
98 priv->hw->hw.hw_filt.short_addr = priv->short_addr;
99 set_hw_addr_filt(dev, IEEE802515_AFILT_SADDR_CHANGED);
100 }
101}
102
81void mac802154_dev_set_ieee_addr(struct net_device *dev) 103void mac802154_dev_set_ieee_addr(struct net_device *dev)
82{ 104{
83 struct mac802154_sub_if_data *priv = netdev_priv(dev); 105 struct mac802154_sub_if_data *priv = netdev_priv(dev);
@@ -91,3 +113,73 @@ void mac802154_dev_set_ieee_addr(struct net_device *dev)
91 set_hw_addr_filt(dev, IEEE802515_AFILT_IEEEADDR_CHANGED); 113 set_hw_addr_filt(dev, IEEE802515_AFILT_IEEEADDR_CHANGED);
92 } 114 }
93} 115}
116
117u16 mac802154_dev_get_pan_id(const struct net_device *dev)
118{
119 struct mac802154_sub_if_data *priv = netdev_priv(dev);
120 u16 ret;
121
122 BUG_ON(dev->type != ARPHRD_IEEE802154);
123
124 spin_lock_bh(&priv->mib_lock);
125 ret = priv->pan_id;
126 spin_unlock_bh(&priv->mib_lock);
127
128 return ret;
129}
130
131void mac802154_dev_set_pan_id(struct net_device *dev, u16 val)
132{
133 struct mac802154_sub_if_data *priv = netdev_priv(dev);
134
135 BUG_ON(dev->type != ARPHRD_IEEE802154);
136
137 spin_lock_bh(&priv->mib_lock);
138 priv->pan_id = val;
139 spin_unlock_bh(&priv->mib_lock);
140
141 if ((priv->hw->ops->set_hw_addr_filt) &&
142 (priv->hw->hw.hw_filt.pan_id != priv->pan_id)) {
143 priv->hw->hw.hw_filt.pan_id = priv->pan_id;
144 set_hw_addr_filt(dev, IEEE802515_AFILT_PANID_CHANGED);
145 }
146}
147
148static void phy_chan_notify(struct work_struct *work)
149{
150 struct phy_chan_notify_work *nw = container_of(work,
151 struct phy_chan_notify_work, work);
152 struct mac802154_priv *hw = mac802154_slave_get_priv(nw->dev);
153 struct mac802154_sub_if_data *priv = netdev_priv(nw->dev);
154 int res;
155
156 res = hw->ops->set_channel(&hw->hw, priv->page, priv->chan);
157 if (res)
158 pr_debug("set_channel failed\n");
159
160 kfree(nw);
161}
162
163void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan)
164{
165 struct mac802154_sub_if_data *priv = netdev_priv(dev);
166 struct phy_chan_notify_work *work;
167
168 BUG_ON(dev->type != ARPHRD_IEEE802154);
169
170 spin_lock_bh(&priv->mib_lock);
171 priv->page = page;
172 priv->chan = chan;
173 spin_unlock_bh(&priv->mib_lock);
174
175 if (priv->hw->phy->current_channel != priv->chan ||
176 priv->hw->phy->current_page != priv->page) {
177 work = kzalloc(sizeof(*work), GFP_ATOMIC);
178 if (!work)
179 return;
180
181 INIT_WORK(&work->work, phy_chan_notify);
182 work->dev = dev;
183 queue_work(priv->hw->dev_workqueue, &work->work);
184 }
185}
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index 4a7d76d4f8b..38548ec2098 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -77,6 +77,7 @@ mac802154_subif_rx(struct ieee802154_dev *hw, struct sk_buff *skb, u8 lqi)
77 } 77 }
78 78
79 mac802154_monitors_rx(priv, skb); 79 mac802154_monitors_rx(priv, skb);
80 mac802154_wpans_rx(priv, skb);
80out: 81out:
81 dev_kfree_skb(skb); 82 dev_kfree_skb(skb);
82 return; 83 return;
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index 434b6873b35..1a4df39c722 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -88,6 +88,8 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
88 return NETDEV_TX_OK; 88 return NETDEV_TX_OK;
89 } 89 }
90 90
91 mac802154_monitors_rx(mac802154_to_priv(&priv->hw), skb);
92
91 if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) { 93 if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
92 u16 crc = crc_ccitt(0, skb->data, skb->len); 94 u16 crc = crc_ccitt(0, skb->data, skb->len);
93 u8 *data = skb_put(skb, 2); 95 u8 *data = skb_put(skb, 2);
diff --git a/net/mac802154/wpan.c b/net/mac802154/wpan.c
new file mode 100644
index 00000000000..f30f6d4beea
--- /dev/null
+++ b/net/mac802154/wpan.c
@@ -0,0 +1,559 @@
1/*
2 * Copyright 2007-2012 Siemens AG
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Written by:
18 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
19 * Sergey Lapin <slapin@ossfans.org>
20 * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
21 * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
22 */
23
24#include <linux/netdevice.h>
25#include <linux/module.h>
26#include <linux/if_arp.h>
27
28#include <net/rtnetlink.h>
29#include <linux/nl802154.h>
30#include <net/af_ieee802154.h>
31#include <net/mac802154.h>
32#include <net/ieee802154_netdev.h>
33#include <net/ieee802154.h>
34#include <net/wpan-phy.h>
35
36#include "mac802154.h"
37
38static inline int mac802154_fetch_skb_u8(struct sk_buff *skb, u8 *val)
39{
40 if (unlikely(!pskb_may_pull(skb, 1)))
41 return -EINVAL;
42
43 *val = skb->data[0];
44 skb_pull(skb, 1);
45
46 return 0;
47}
48
49static inline int mac802154_fetch_skb_u16(struct sk_buff *skb, u16 *val)
50{
51 if (unlikely(!pskb_may_pull(skb, 2)))
52 return -EINVAL;
53
54 *val = skb->data[0] | (skb->data[1] << 8);
55 skb_pull(skb, 2);
56
57 return 0;
58}
59
60static inline void mac802154_haddr_copy_swap(u8 *dest, const u8 *src)
61{
62 int i;
63 for (i = 0; i < IEEE802154_ADDR_LEN; i++)
64 dest[IEEE802154_ADDR_LEN - i - 1] = src[i];
65}
66
67static int
68mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
69{
70 struct mac802154_sub_if_data *priv = netdev_priv(dev);
71 struct sockaddr_ieee802154 *sa =
72 (struct sockaddr_ieee802154 *)&ifr->ifr_addr;
73 int err = -ENOIOCTLCMD;
74
75 spin_lock_bh(&priv->mib_lock);
76
77 switch (cmd) {
78 case SIOCGIFADDR:
79 if (priv->pan_id == IEEE802154_PANID_BROADCAST ||
80 priv->short_addr == IEEE802154_ADDR_BROADCAST) {
81 err = -EADDRNOTAVAIL;
82 break;
83 }
84
85 sa->family = AF_IEEE802154;
86 sa->addr.addr_type = IEEE802154_ADDR_SHORT;
87 sa->addr.pan_id = priv->pan_id;
88 sa->addr.short_addr = priv->short_addr;
89
90 err = 0;
91 break;
92 case SIOCSIFADDR:
93 dev_warn(&dev->dev,
94 "Using DEBUGing ioctl SIOCSIFADDR isn't recommened!\n");
95 if (sa->family != AF_IEEE802154 ||
96 sa->addr.addr_type != IEEE802154_ADDR_SHORT ||
97 sa->addr.pan_id == IEEE802154_PANID_BROADCAST ||
98 sa->addr.short_addr == IEEE802154_ADDR_BROADCAST ||
99 sa->addr.short_addr == IEEE802154_ADDR_UNDEF) {
100 err = -EINVAL;
101 break;
102 }
103
104 priv->pan_id = sa->addr.pan_id;
105 priv->short_addr = sa->addr.short_addr;
106
107 err = 0;
108 break;
109 }
110
111 spin_unlock_bh(&priv->mib_lock);
112 return err;
113}
114
115static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
116{
117 struct sockaddr *addr = p;
118
119 if (netif_running(dev))
120 return -EBUSY;
121
122 /* FIXME: validate addr */
123 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
124 mac802154_dev_set_ieee_addr(dev);
125 return 0;
126}
127
128static int mac802154_header_create(struct sk_buff *skb,
129 struct net_device *dev,
130 unsigned short type,
131 const void *_daddr,
132 const void *_saddr,
133 unsigned len)
134{
135 const struct ieee802154_addr *saddr = _saddr;
136 const struct ieee802154_addr *daddr = _daddr;
137 struct ieee802154_addr dev_addr;
138 struct mac802154_sub_if_data *priv = netdev_priv(dev);
139 int pos = 2;
140 u8 *head;
141 u16 fc;
142
143 if (!daddr)
144 return -EINVAL;
145
146 head = kzalloc(MAC802154_FRAME_HARD_HEADER_LEN, GFP_KERNEL);
147 if (head == NULL)
148 return -ENOMEM;
149
150 head[pos++] = mac_cb(skb)->seq; /* DSN/BSN */
151 fc = mac_cb_type(skb);
152
153 if (!saddr) {
154 spin_lock_bh(&priv->mib_lock);
155
156 if (priv->short_addr == IEEE802154_ADDR_BROADCAST ||
157 priv->short_addr == IEEE802154_ADDR_UNDEF ||
158 priv->pan_id == IEEE802154_PANID_BROADCAST) {
159 dev_addr.addr_type = IEEE802154_ADDR_LONG;
160 memcpy(dev_addr.hwaddr, dev->dev_addr,
161 IEEE802154_ADDR_LEN);
162 } else {
163 dev_addr.addr_type = IEEE802154_ADDR_SHORT;
164 dev_addr.short_addr = priv->short_addr;
165 }
166
167 dev_addr.pan_id = priv->pan_id;
168 saddr = &dev_addr;
169
170 spin_unlock_bh(&priv->mib_lock);
171 }
172
173 if (daddr->addr_type != IEEE802154_ADDR_NONE) {
174 fc |= (daddr->addr_type << IEEE802154_FC_DAMODE_SHIFT);
175
176 head[pos++] = daddr->pan_id & 0xff;
177 head[pos++] = daddr->pan_id >> 8;
178
179 if (daddr->addr_type == IEEE802154_ADDR_SHORT) {
180 head[pos++] = daddr->short_addr & 0xff;
181 head[pos++] = daddr->short_addr >> 8;
182 } else {
183 mac802154_haddr_copy_swap(head + pos, daddr->hwaddr);
184 pos += IEEE802154_ADDR_LEN;
185 }
186 }
187
188 if (saddr->addr_type != IEEE802154_ADDR_NONE) {
189 fc |= (saddr->addr_type << IEEE802154_FC_SAMODE_SHIFT);
190
191 if ((saddr->pan_id == daddr->pan_id) &&
192 (saddr->pan_id != IEEE802154_PANID_BROADCAST)) {
193 /* PANID compression/intra PAN */
194 fc |= IEEE802154_FC_INTRA_PAN;
195 } else {
196 head[pos++] = saddr->pan_id & 0xff;
197 head[pos++] = saddr->pan_id >> 8;
198 }
199
200 if (saddr->addr_type == IEEE802154_ADDR_SHORT) {
201 head[pos++] = saddr->short_addr & 0xff;
202 head[pos++] = saddr->short_addr >> 8;
203 } else {
204 mac802154_haddr_copy_swap(head + pos, saddr->hwaddr);
205 pos += IEEE802154_ADDR_LEN;
206 }
207 }
208
209 head[0] = fc;
210 head[1] = fc >> 8;
211
212 memcpy(skb_push(skb, pos), head, pos);
213 kfree(head);
214
215 return pos;
216}
217
218static int
219mac802154_header_parse(const struct sk_buff *skb, unsigned char *haddr)
220{
221 const u8 *hdr = skb_mac_header(skb);
222 const u8 *tail = skb_tail_pointer(skb);
223 struct ieee802154_addr *addr = (struct ieee802154_addr *)haddr;
224 u16 fc;
225 int da_type;
226
227 if (hdr + 3 > tail)
228 goto malformed;
229
230 fc = hdr[0] | (hdr[1] << 8);
231
232 hdr += 3;
233
234 da_type = IEEE802154_FC_DAMODE(fc);
235 addr->addr_type = IEEE802154_FC_SAMODE(fc);
236
237 switch (da_type) {
238 case IEEE802154_ADDR_NONE:
239 if (fc & IEEE802154_FC_INTRA_PAN)
240 goto malformed;
241 break;
242 case IEEE802154_ADDR_LONG:
243 if (fc & IEEE802154_FC_INTRA_PAN) {
244 if (hdr + 2 > tail)
245 goto malformed;
246 addr->pan_id = hdr[0] | (hdr[1] << 8);
247 hdr += 2;
248 }
249
250 if (hdr + IEEE802154_ADDR_LEN > tail)
251 goto malformed;
252
253 hdr += IEEE802154_ADDR_LEN;
254 break;
255 case IEEE802154_ADDR_SHORT:
256 if (fc & IEEE802154_FC_INTRA_PAN) {
257 if (hdr + 2 > tail)
258 goto malformed;
259 addr->pan_id = hdr[0] | (hdr[1] << 8);
260 hdr += 2;
261 }
262
263 if (hdr + 2 > tail)
264 goto malformed;
265
266 hdr += 2;
267 break;
268 default:
269 goto malformed;
270
271 }
272
273 switch (addr->addr_type) {
274 case IEEE802154_ADDR_NONE:
275 break;
276 case IEEE802154_ADDR_LONG:
277 if (!(fc & IEEE802154_FC_INTRA_PAN)) {
278 if (hdr + 2 > tail)
279 goto malformed;
280 addr->pan_id = hdr[0] | (hdr[1] << 8);
281 hdr += 2;
282 }
283
284 if (hdr + IEEE802154_ADDR_LEN > tail)
285 goto malformed;
286
287 mac802154_haddr_copy_swap(addr->hwaddr, hdr);
288 hdr += IEEE802154_ADDR_LEN;
289 break;
290 case IEEE802154_ADDR_SHORT:
291 if (!(fc & IEEE802154_FC_INTRA_PAN)) {
292 if (hdr + 2 > tail)
293 goto malformed;
294 addr->pan_id = hdr[0] | (hdr[1] << 8);
295 hdr += 2;
296 }
297
298 if (hdr + 2 > tail)
299 goto malformed;
300
301 addr->short_addr = hdr[0] | (hdr[1] << 8);
302 hdr += 2;
303 break;
304 default:
305 goto malformed;
306 }
307
308 return sizeof(struct ieee802154_addr);
309
310malformed:
311 pr_debug("malformed packet\n");
312 return 0;
313}
314
315static netdev_tx_t
316mac802154_wpan_xmit(struct sk_buff *skb, struct net_device *dev)
317{
318 struct mac802154_sub_if_data *priv;
319 u8 chan, page;
320
321 priv = netdev_priv(dev);
322
323 spin_lock_bh(&priv->mib_lock);
324 chan = priv->chan;
325 page = priv->page;
326 spin_unlock_bh(&priv->mib_lock);
327
328 if (chan == MAC802154_CHAN_NONE ||
329 page >= WPAN_NUM_PAGES ||
330 chan >= WPAN_NUM_CHANNELS)
331 return NETDEV_TX_OK;
332
333 skb->skb_iif = dev->ifindex;
334 dev->stats.tx_packets++;
335 dev->stats.tx_bytes += skb->len;
336
337 return mac802154_tx(priv->hw, skb, page, chan);
338}
339
340static struct header_ops mac802154_header_ops = {
341 .create = mac802154_header_create,
342 .parse = mac802154_header_parse,
343};
344
345static const struct net_device_ops mac802154_wpan_ops = {
346 .ndo_open = mac802154_slave_open,
347 .ndo_stop = mac802154_slave_close,
348 .ndo_start_xmit = mac802154_wpan_xmit,
349 .ndo_do_ioctl = mac802154_wpan_ioctl,
350 .ndo_set_mac_address = mac802154_wpan_mac_addr,
351};
352
353void mac802154_wpan_setup(struct net_device *dev)
354{
355 struct mac802154_sub_if_data *priv;
356
357 dev->addr_len = IEEE802154_ADDR_LEN;
358 memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
359
360 dev->hard_header_len = MAC802154_FRAME_HARD_HEADER_LEN;
361 dev->header_ops = &mac802154_header_ops;
362 dev->needed_tailroom = 2; /* FCS */
363 dev->mtu = IEEE802154_MTU;
364 dev->tx_queue_len = 10;
365 dev->type = ARPHRD_IEEE802154;
366 dev->flags = IFF_NOARP | IFF_BROADCAST;
367 dev->watchdog_timeo = 0;
368
369 dev->destructor = free_netdev;
370 dev->netdev_ops = &mac802154_wpan_ops;
371 dev->ml_priv = &mac802154_mlme_wpan;
372
373 priv = netdev_priv(dev);
374 priv->type = IEEE802154_DEV_WPAN;
375
376 priv->chan = MAC802154_CHAN_NONE;
377 priv->page = 0;
378
379 spin_lock_init(&priv->mib_lock);
380
381 get_random_bytes(&priv->bsn, 1);
382 get_random_bytes(&priv->dsn, 1);
383
384 priv->pan_id = IEEE802154_PANID_BROADCAST;
385 priv->short_addr = IEEE802154_ADDR_BROADCAST;
386}
387
388static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb)
389{
390 return netif_rx(skb);
391}
392
393static int
394mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb)
395{
396 pr_debug("getting packet via slave interface %s\n", sdata->dev->name);
397
398 spin_lock_bh(&sdata->mib_lock);
399
400 switch (mac_cb(skb)->da.addr_type) {
401 case IEEE802154_ADDR_NONE:
402 if (mac_cb(skb)->sa.addr_type != IEEE802154_ADDR_NONE)
403 /* FIXME: check if we are PAN coordinator */
404 skb->pkt_type = PACKET_OTHERHOST;
405 else
406 /* ACK comes with both addresses empty */
407 skb->pkt_type = PACKET_HOST;
408 break;
409 case IEEE802154_ADDR_LONG:
410 if (mac_cb(skb)->da.pan_id != sdata->pan_id &&
411 mac_cb(skb)->da.pan_id != IEEE802154_PANID_BROADCAST)
412 skb->pkt_type = PACKET_OTHERHOST;
413 else if (!memcmp(mac_cb(skb)->da.hwaddr, sdata->dev->dev_addr,
414 IEEE802154_ADDR_LEN))
415 skb->pkt_type = PACKET_HOST;
416 else
417 skb->pkt_type = PACKET_OTHERHOST;
418 break;
419 case IEEE802154_ADDR_SHORT:
420 if (mac_cb(skb)->da.pan_id != sdata->pan_id &&
421 mac_cb(skb)->da.pan_id != IEEE802154_PANID_BROADCAST)
422 skb->pkt_type = PACKET_OTHERHOST;
423 else if (mac_cb(skb)->da.short_addr == sdata->short_addr)
424 skb->pkt_type = PACKET_HOST;
425 else if (mac_cb(skb)->da.short_addr ==
426 IEEE802154_ADDR_BROADCAST)
427 skb->pkt_type = PACKET_BROADCAST;
428 else
429 skb->pkt_type = PACKET_OTHERHOST;
430 break;
431 default:
432 break;
433 }
434
435 spin_unlock_bh(&sdata->mib_lock);
436
437 skb->dev = sdata->dev;
438
439 sdata->dev->stats.rx_packets++;
440 sdata->dev->stats.rx_bytes += skb->len;
441
442 switch (mac_cb_type(skb)) {
443 case IEEE802154_FC_TYPE_DATA:
444 return mac802154_process_data(sdata->dev, skb);
445 default:
446 pr_warning("ieee802154: bad frame received (type = %d)\n",
447 mac_cb_type(skb));
448 kfree_skb(skb);
449 return NET_RX_DROP;
450 }
451}
452
453static int mac802154_parse_frame_start(struct sk_buff *skb)
454{
455 u8 *head = skb->data;
456 u16 fc;
457
458 if (mac802154_fetch_skb_u16(skb, &fc) ||
459 mac802154_fetch_skb_u8(skb, &(mac_cb(skb)->seq)))
460 goto err;
461
462 pr_debug("fc: %04x dsn: %02x\n", fc, head[2]);
463
464 mac_cb(skb)->flags = IEEE802154_FC_TYPE(fc);
465 mac_cb(skb)->sa.addr_type = IEEE802154_FC_SAMODE(fc);
466 mac_cb(skb)->da.addr_type = IEEE802154_FC_DAMODE(fc);
467
468 if (fc & IEEE802154_FC_INTRA_PAN)
469 mac_cb(skb)->flags |= MAC_CB_FLAG_INTRAPAN;
470
471 if (mac_cb(skb)->da.addr_type != IEEE802154_ADDR_NONE) {
472 if (mac802154_fetch_skb_u16(skb, &(mac_cb(skb)->da.pan_id)))
473 goto err;
474
475 /* source PAN id compression */
476 if (mac_cb_is_intrapan(skb))
477 mac_cb(skb)->sa.pan_id = mac_cb(skb)->da.pan_id;
478
479 pr_debug("dest PAN addr: %04x\n", mac_cb(skb)->da.pan_id);
480
481 if (mac_cb(skb)->da.addr_type == IEEE802154_ADDR_SHORT) {
482 u16 *da = &(mac_cb(skb)->da.short_addr);
483
484 if (mac802154_fetch_skb_u16(skb, da))
485 goto err;
486
487 pr_debug("destination address is short: %04x\n",
488 mac_cb(skb)->da.short_addr);
489 } else {
490 if (!pskb_may_pull(skb, IEEE802154_ADDR_LEN))
491 goto err;
492
493 mac802154_haddr_copy_swap(mac_cb(skb)->da.hwaddr,
494 skb->data);
495 skb_pull(skb, IEEE802154_ADDR_LEN);
496
497 pr_debug("destination address is hardware\n");
498 }
499 }
500
501 if (mac_cb(skb)->sa.addr_type != IEEE802154_ADDR_NONE) {
502 /* non PAN-compression, fetch source address id */
503 if (!(mac_cb_is_intrapan(skb))) {
504 u16 *sa_pan = &(mac_cb(skb)->sa.pan_id);
505
506 if (mac802154_fetch_skb_u16(skb, sa_pan))
507 goto err;
508 }
509
510 pr_debug("source PAN addr: %04x\n", mac_cb(skb)->da.pan_id);
511
512 if (mac_cb(skb)->sa.addr_type == IEEE802154_ADDR_SHORT) {
513 u16 *sa = &(mac_cb(skb)->sa.short_addr);
514
515 if (mac802154_fetch_skb_u16(skb, sa))
516 goto err;
517
518 pr_debug("source address is short: %04x\n",
519 mac_cb(skb)->sa.short_addr);
520 } else {
521 if (!pskb_may_pull(skb, IEEE802154_ADDR_LEN))
522 goto err;
523
524 mac802154_haddr_copy_swap(mac_cb(skb)->sa.hwaddr,
525 skb->data);
526 skb_pull(skb, IEEE802154_ADDR_LEN);
527
528 pr_debug("source address is hardware\n");
529 }
530 }
531
532 return 0;
533err:
534 return -EINVAL;
535}
536
537void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb)
538{
539 int ret;
540 struct sk_buff *sskb;
541 struct mac802154_sub_if_data *sdata;
542
543 ret = mac802154_parse_frame_start(skb);
544 if (ret) {
545 pr_debug("got invalid frame\n");
546 return;
547 }
548
549 rcu_read_lock();
550 list_for_each_entry_rcu(sdata, &priv->slaves, list) {
551 if (sdata->type != IEEE802154_DEV_WPAN)
552 continue;
553
554 sskb = skb_clone(skb, GFP_ATOMIC);
555 if (sskb)
556 mac802154_subif_frame(sdata, sskb);
557 }
558 rcu_read_unlock();
559}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 209c1ed4336..c19b214ffd5 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -335,6 +335,27 @@ config NF_CT_NETLINK_TIMEOUT
335 335
336 If unsure, say `N'. 336 If unsure, say `N'.
337 337
338config NF_CT_NETLINK_HELPER
339 tristate 'Connection tracking helpers in user-space via Netlink'
340 select NETFILTER_NETLINK
341 depends on NF_CT_NETLINK
342 depends on NETFILTER_NETLINK_QUEUE
343 depends on NETFILTER_NETLINK_QUEUE_CT
344 depends on NETFILTER_ADVANCED
345 help
346 This option enables the user-space connection tracking helpers
347 infrastructure.
348
349 If unsure, say `N'.
350
351config NETFILTER_NETLINK_QUEUE_CT
352 bool "NFQUEUE integration with Connection Tracking"
353 default n
354 depends on NETFILTER_NETLINK_QUEUE
355 help
356 If this option is enabled, NFQUEUE can include Connection Tracking
357 information together with the packet is the enqueued via NFNETLINK.
358
338endif # NF_CONNTRACK 359endif # NF_CONNTRACK
339 360
340# transparent proxy support 361# transparent proxy support
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 4e7960cc7b9..1c5160f2278 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -9,6 +9,8 @@ obj-$(CONFIG_NETFILTER) = netfilter.o
9 9
10obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o 10obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o
11obj-$(CONFIG_NETFILTER_NETLINK_ACCT) += nfnetlink_acct.o 11obj-$(CONFIG_NETFILTER_NETLINK_ACCT) += nfnetlink_acct.o
12nfnetlink_queue-y := nfnetlink_queue_core.o
13nfnetlink_queue-$(CONFIG_NETFILTER_NETLINK_QUEUE_CT) += nfnetlink_queue_ct.o
12obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o 14obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o
13obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o 15obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o
14 16
@@ -24,6 +26,7 @@ obj-$(CONFIG_NF_CT_PROTO_UDPLITE) += nf_conntrack_proto_udplite.o
24# netlink interface for nf_conntrack 26# netlink interface for nf_conntrack
25obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o 27obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o
26obj-$(CONFIG_NF_CT_NETLINK_TIMEOUT) += nfnetlink_cttimeout.o 28obj-$(CONFIG_NF_CT_NETLINK_TIMEOUT) += nfnetlink_cttimeout.o
29obj-$(CONFIG_NF_CT_NETLINK_HELPER) += nfnetlink_cthelper.o
27 30
28# connection tracking helpers 31# connection tracking helpers
29nf_conntrack_h323-objs := nf_conntrack_h323_main.o nf_conntrack_h323_asn1.o 32nf_conntrack_h323-objs := nf_conntrack_h323_main.o nf_conntrack_h323_asn1.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index e19f3653db2..0bc6b60db4d 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -264,6 +264,13 @@ void nf_conntrack_destroy(struct nf_conntrack *nfct)
264 rcu_read_unlock(); 264 rcu_read_unlock();
265} 265}
266EXPORT_SYMBOL(nf_conntrack_destroy); 266EXPORT_SYMBOL(nf_conntrack_destroy);
267
268struct nfq_ct_hook __rcu *nfq_ct_hook __read_mostly;
269EXPORT_SYMBOL_GPL(nfq_ct_hook);
270
271struct nfq_ct_nat_hook __rcu *nfq_ct_nat_hook __read_mostly;
272EXPORT_SYMBOL_GPL(nfq_ct_nat_hook);
273
267#endif /* CONFIG_NF_CONNTRACK */ 274#endif /* CONFIG_NF_CONNTRACK */
268 275
269#ifdef CONFIG_PROC_FS 276#ifdef CONFIG_PROC_FS
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index a54b018c6ee..b54eccef40b 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1742,7 +1742,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1742 { 1742 {
1743 .hook = ip_vs_reply4, 1743 .hook = ip_vs_reply4,
1744 .owner = THIS_MODULE, 1744 .owner = THIS_MODULE,
1745 .pf = PF_INET, 1745 .pf = NFPROTO_IPV4,
1746 .hooknum = NF_INET_LOCAL_IN, 1746 .hooknum = NF_INET_LOCAL_IN,
1747 .priority = NF_IP_PRI_NAT_SRC - 2, 1747 .priority = NF_IP_PRI_NAT_SRC - 2,
1748 }, 1748 },
@@ -1752,7 +1752,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1752 { 1752 {
1753 .hook = ip_vs_remote_request4, 1753 .hook = ip_vs_remote_request4,
1754 .owner = THIS_MODULE, 1754 .owner = THIS_MODULE,
1755 .pf = PF_INET, 1755 .pf = NFPROTO_IPV4,
1756 .hooknum = NF_INET_LOCAL_IN, 1756 .hooknum = NF_INET_LOCAL_IN,
1757 .priority = NF_IP_PRI_NAT_SRC - 1, 1757 .priority = NF_IP_PRI_NAT_SRC - 1,
1758 }, 1758 },
@@ -1760,7 +1760,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1760 { 1760 {
1761 .hook = ip_vs_local_reply4, 1761 .hook = ip_vs_local_reply4,
1762 .owner = THIS_MODULE, 1762 .owner = THIS_MODULE,
1763 .pf = PF_INET, 1763 .pf = NFPROTO_IPV4,
1764 .hooknum = NF_INET_LOCAL_OUT, 1764 .hooknum = NF_INET_LOCAL_OUT,
1765 .priority = NF_IP_PRI_NAT_DST + 1, 1765 .priority = NF_IP_PRI_NAT_DST + 1,
1766 }, 1766 },
@@ -1768,7 +1768,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1768 { 1768 {
1769 .hook = ip_vs_local_request4, 1769 .hook = ip_vs_local_request4,
1770 .owner = THIS_MODULE, 1770 .owner = THIS_MODULE,
1771 .pf = PF_INET, 1771 .pf = NFPROTO_IPV4,
1772 .hooknum = NF_INET_LOCAL_OUT, 1772 .hooknum = NF_INET_LOCAL_OUT,
1773 .priority = NF_IP_PRI_NAT_DST + 2, 1773 .priority = NF_IP_PRI_NAT_DST + 2,
1774 }, 1774 },
@@ -1777,7 +1777,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1777 { 1777 {
1778 .hook = ip_vs_forward_icmp, 1778 .hook = ip_vs_forward_icmp,
1779 .owner = THIS_MODULE, 1779 .owner = THIS_MODULE,
1780 .pf = PF_INET, 1780 .pf = NFPROTO_IPV4,
1781 .hooknum = NF_INET_FORWARD, 1781 .hooknum = NF_INET_FORWARD,
1782 .priority = 99, 1782 .priority = 99,
1783 }, 1783 },
@@ -1785,7 +1785,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1785 { 1785 {
1786 .hook = ip_vs_reply4, 1786 .hook = ip_vs_reply4,
1787 .owner = THIS_MODULE, 1787 .owner = THIS_MODULE,
1788 .pf = PF_INET, 1788 .pf = NFPROTO_IPV4,
1789 .hooknum = NF_INET_FORWARD, 1789 .hooknum = NF_INET_FORWARD,
1790 .priority = 100, 1790 .priority = 100,
1791 }, 1791 },
@@ -1794,7 +1794,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1794 { 1794 {
1795 .hook = ip_vs_reply6, 1795 .hook = ip_vs_reply6,
1796 .owner = THIS_MODULE, 1796 .owner = THIS_MODULE,
1797 .pf = PF_INET6, 1797 .pf = NFPROTO_IPV6,
1798 .hooknum = NF_INET_LOCAL_IN, 1798 .hooknum = NF_INET_LOCAL_IN,
1799 .priority = NF_IP6_PRI_NAT_SRC - 2, 1799 .priority = NF_IP6_PRI_NAT_SRC - 2,
1800 }, 1800 },
@@ -1804,7 +1804,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1804 { 1804 {
1805 .hook = ip_vs_remote_request6, 1805 .hook = ip_vs_remote_request6,
1806 .owner = THIS_MODULE, 1806 .owner = THIS_MODULE,
1807 .pf = PF_INET6, 1807 .pf = NFPROTO_IPV6,
1808 .hooknum = NF_INET_LOCAL_IN, 1808 .hooknum = NF_INET_LOCAL_IN,
1809 .priority = NF_IP6_PRI_NAT_SRC - 1, 1809 .priority = NF_IP6_PRI_NAT_SRC - 1,
1810 }, 1810 },
@@ -1812,7 +1812,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1812 { 1812 {
1813 .hook = ip_vs_local_reply6, 1813 .hook = ip_vs_local_reply6,
1814 .owner = THIS_MODULE, 1814 .owner = THIS_MODULE,
1815 .pf = PF_INET, 1815 .pf = NFPROTO_IPV4,
1816 .hooknum = NF_INET_LOCAL_OUT, 1816 .hooknum = NF_INET_LOCAL_OUT,
1817 .priority = NF_IP6_PRI_NAT_DST + 1, 1817 .priority = NF_IP6_PRI_NAT_DST + 1,
1818 }, 1818 },
@@ -1820,7 +1820,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1820 { 1820 {
1821 .hook = ip_vs_local_request6, 1821 .hook = ip_vs_local_request6,
1822 .owner = THIS_MODULE, 1822 .owner = THIS_MODULE,
1823 .pf = PF_INET6, 1823 .pf = NFPROTO_IPV6,
1824 .hooknum = NF_INET_LOCAL_OUT, 1824 .hooknum = NF_INET_LOCAL_OUT,
1825 .priority = NF_IP6_PRI_NAT_DST + 2, 1825 .priority = NF_IP6_PRI_NAT_DST + 2,
1826 }, 1826 },
@@ -1829,7 +1829,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1829 { 1829 {
1830 .hook = ip_vs_forward_icmp_v6, 1830 .hook = ip_vs_forward_icmp_v6,
1831 .owner = THIS_MODULE, 1831 .owner = THIS_MODULE,
1832 .pf = PF_INET6, 1832 .pf = NFPROTO_IPV6,
1833 .hooknum = NF_INET_FORWARD, 1833 .hooknum = NF_INET_FORWARD,
1834 .priority = 99, 1834 .priority = 99,
1835 }, 1835 },
@@ -1837,7 +1837,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1837 { 1837 {
1838 .hook = ip_vs_reply6, 1838 .hook = ip_vs_reply6,
1839 .owner = THIS_MODULE, 1839 .owner = THIS_MODULE,
1840 .pf = PF_INET6, 1840 .pf = NFPROTO_IPV6,
1841 .hooknum = NF_INET_FORWARD, 1841 .hooknum = NF_INET_FORWARD,
1842 .priority = 100, 1842 .priority = 100,
1843 }, 1843 },
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 7fd66dec859..71d6ecb6592 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -823,7 +823,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
823 IP_VS_ERR_RL("%s(): no memory\n", __func__); 823 IP_VS_ERR_RL("%s(): no memory\n", __func__);
824 return NF_STOLEN; 824 return NF_STOLEN;
825 } 825 }
826 kfree_skb(skb); 826 consume_skb(skb);
827 skb = new_skb; 827 skb = new_skb;
828 old_iph = ip_hdr(skb); 828 old_iph = ip_hdr(skb);
829 } 829 }
@@ -942,7 +942,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
942 IP_VS_ERR_RL("%s(): no memory\n", __func__); 942 IP_VS_ERR_RL("%s(): no memory\n", __func__);
943 return NF_STOLEN; 943 return NF_STOLEN;
944 } 944 }
945 kfree_skb(skb); 945 consume_skb(skb);
946 skb = new_skb; 946 skb = new_skb;
947 old_iph = ipv6_hdr(skb); 947 old_iph = ipv6_hdr(skb);
948 } 948 }
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index ac3af97cc46..cf4875565d6 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -531,7 +531,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
531 tstamp = nf_conn_tstamp_find(ct); 531 tstamp = nf_conn_tstamp_find(ct);
532 if (tstamp) { 532 if (tstamp) {
533 if (skb->tstamp.tv64 == 0) 533 if (skb->tstamp.tv64 == 0)
534 __net_timestamp((struct sk_buff *)skb); 534 __net_timestamp(skb);
535 535
536 tstamp->start = ktime_to_ns(skb->tstamp); 536 tstamp->start = ktime_to_ns(skb->tstamp);
537 } 537 }
@@ -819,7 +819,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
819 __set_bit(IPS_EXPECTED_BIT, &ct->status); 819 __set_bit(IPS_EXPECTED_BIT, &ct->status);
820 ct->master = exp->master; 820 ct->master = exp->master;
821 if (exp->helper) { 821 if (exp->helper) {
822 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 822 help = nf_ct_helper_ext_add(ct, exp->helper,
823 GFP_ATOMIC);
823 if (help) 824 if (help)
824 rcu_assign_pointer(help->helper, exp->helper); 825 rcu_assign_pointer(help->helper, exp->helper);
825 } 826 }
@@ -1333,7 +1334,6 @@ static void nf_conntrack_cleanup_init_net(void)
1333 while (untrack_refs() > 0) 1334 while (untrack_refs() > 0)
1334 schedule(); 1335 schedule();
1335 1336
1336 nf_conntrack_proto_fini();
1337#ifdef CONFIG_NF_CONNTRACK_ZONES 1337#ifdef CONFIG_NF_CONNTRACK_ZONES
1338 nf_ct_extend_unregister(&nf_ct_zone_extend); 1338 nf_ct_extend_unregister(&nf_ct_zone_extend);
1339#endif 1339#endif
@@ -1372,7 +1372,7 @@ void nf_conntrack_cleanup(struct net *net)
1372 netfilter framework. Roll on, two-stage module 1372 netfilter framework. Roll on, two-stage module
1373 delete... */ 1373 delete... */
1374 synchronize_net(); 1374 synchronize_net();
1375 1375 nf_conntrack_proto_fini(net);
1376 nf_conntrack_cleanup_net(net); 1376 nf_conntrack_cleanup_net(net);
1377 1377
1378 if (net_eq(net, &init_net)) { 1378 if (net_eq(net, &init_net)) {
@@ -1496,11 +1496,6 @@ static int nf_conntrack_init_init_net(void)
1496 printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n", 1496 printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
1497 NF_CONNTRACK_VERSION, nf_conntrack_htable_size, 1497 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1498 nf_conntrack_max); 1498 nf_conntrack_max);
1499
1500 ret = nf_conntrack_proto_init();
1501 if (ret < 0)
1502 goto err_proto;
1503
1504#ifdef CONFIG_NF_CONNTRACK_ZONES 1499#ifdef CONFIG_NF_CONNTRACK_ZONES
1505 ret = nf_ct_extend_register(&nf_ct_zone_extend); 1500 ret = nf_ct_extend_register(&nf_ct_zone_extend);
1506 if (ret < 0) 1501 if (ret < 0)
@@ -1518,9 +1513,7 @@ static int nf_conntrack_init_init_net(void)
1518 1513
1519#ifdef CONFIG_NF_CONNTRACK_ZONES 1514#ifdef CONFIG_NF_CONNTRACK_ZONES
1520err_extend: 1515err_extend:
1521 nf_conntrack_proto_fini();
1522#endif 1516#endif
1523err_proto:
1524 return ret; 1517 return ret;
1525} 1518}
1526 1519
@@ -1583,9 +1576,7 @@ static int nf_conntrack_init_net(struct net *net)
1583 ret = nf_conntrack_helper_init(net); 1576 ret = nf_conntrack_helper_init(net);
1584 if (ret < 0) 1577 if (ret < 0)
1585 goto err_helper; 1578 goto err_helper;
1586
1587 return 0; 1579 return 0;
1588
1589err_helper: 1580err_helper:
1590 nf_conntrack_timeout_fini(net); 1581 nf_conntrack_timeout_fini(net);
1591err_timeout: 1582err_timeout:
@@ -1622,6 +1613,9 @@ int nf_conntrack_init(struct net *net)
1622 if (ret < 0) 1613 if (ret < 0)
1623 goto out_init_net; 1614 goto out_init_net;
1624 } 1615 }
1616 ret = nf_conntrack_proto_init(net);
1617 if (ret < 0)
1618 goto out_proto;
1625 ret = nf_conntrack_init_net(net); 1619 ret = nf_conntrack_init_net(net);
1626 if (ret < 0) 1620 if (ret < 0)
1627 goto out_net; 1621 goto out_net;
@@ -1637,6 +1631,8 @@ int nf_conntrack_init(struct net *net)
1637 return 0; 1631 return 0;
1638 1632
1639out_net: 1633out_net:
1634 nf_conntrack_proto_fini(net);
1635out_proto:
1640 if (net_eq(net, &init_net)) 1636 if (net_eq(net, &init_net))
1641 nf_conntrack_cleanup_init_net(); 1637 nf_conntrack_cleanup_init_net();
1642out_init_net: 1638out_init_net:
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 641ff5f9671..1a9545965c0 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -44,7 +44,8 @@ void __nf_ct_ext_destroy(struct nf_conn *ct)
44EXPORT_SYMBOL(__nf_ct_ext_destroy); 44EXPORT_SYMBOL(__nf_ct_ext_destroy);
45 45
46static void * 46static void *
47nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp) 47nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id,
48 size_t var_alloc_len, gfp_t gfp)
48{ 49{
49 unsigned int off, len; 50 unsigned int off, len;
50 struct nf_ct_ext_type *t; 51 struct nf_ct_ext_type *t;
@@ -54,8 +55,8 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
54 t = rcu_dereference(nf_ct_ext_types[id]); 55 t = rcu_dereference(nf_ct_ext_types[id]);
55 BUG_ON(t == NULL); 56 BUG_ON(t == NULL);
56 off = ALIGN(sizeof(struct nf_ct_ext), t->align); 57 off = ALIGN(sizeof(struct nf_ct_ext), t->align);
57 len = off + t->len; 58 len = off + t->len + var_alloc_len;
58 alloc_size = t->alloc_size; 59 alloc_size = t->alloc_size + var_alloc_len;
59 rcu_read_unlock(); 60 rcu_read_unlock();
60 61
61 *ext = kzalloc(alloc_size, gfp); 62 *ext = kzalloc(alloc_size, gfp);
@@ -68,7 +69,8 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
68 return (void *)(*ext) + off; 69 return (void *)(*ext) + off;
69} 70}
70 71
71void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) 72void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
73 size_t var_alloc_len, gfp_t gfp)
72{ 74{
73 struct nf_ct_ext *old, *new; 75 struct nf_ct_ext *old, *new;
74 int i, newlen, newoff; 76 int i, newlen, newoff;
@@ -79,7 +81,7 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
79 81
80 old = ct->ext; 82 old = ct->ext;
81 if (!old) 83 if (!old)
82 return nf_ct_ext_create(&ct->ext, id, gfp); 84 return nf_ct_ext_create(&ct->ext, id, var_alloc_len, gfp);
83 85
84 if (__nf_ct_ext_exist(old, id)) 86 if (__nf_ct_ext_exist(old, id))
85 return NULL; 87 return NULL;
@@ -89,7 +91,7 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
89 BUG_ON(t == NULL); 91 BUG_ON(t == NULL);
90 92
91 newoff = ALIGN(old->len, t->align); 93 newoff = ALIGN(old->len, t->align);
92 newlen = newoff + t->len; 94 newlen = newoff + t->len + var_alloc_len;
93 rcu_read_unlock(); 95 rcu_read_unlock();
94 96
95 new = __krealloc(old, newlen, gfp); 97 new = __krealloc(old, newlen, gfp);
@@ -117,7 +119,7 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
117 memset((void *)new + newoff, 0, newlen - newoff); 119 memset((void *)new + newoff, 0, newlen - newoff);
118 return (void *)new + newoff; 120 return (void *)new + newoff;
119} 121}
120EXPORT_SYMBOL(__nf_ct_ext_add); 122EXPORT_SYMBOL(__nf_ct_ext_add_length);
121 123
122static void update_alloc_size(struct nf_ct_ext_type *type) 124static void update_alloc_size(struct nf_ct_ext_type *type)
123{ 125{
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 8c5c95c6d34..4bb771d1f57 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -358,7 +358,7 @@ static int help(struct sk_buff *skb,
358 u32 seq; 358 u32 seq;
359 int dir = CTINFO2DIR(ctinfo); 359 int dir = CTINFO2DIR(ctinfo);
360 unsigned int uninitialized_var(matchlen), uninitialized_var(matchoff); 360 unsigned int uninitialized_var(matchlen), uninitialized_var(matchoff);
361 struct nf_ct_ftp_master *ct_ftp_info = &nfct_help(ct)->help.ct_ftp_info; 361 struct nf_ct_ftp_master *ct_ftp_info = nfct_help_data(ct);
362 struct nf_conntrack_expect *exp; 362 struct nf_conntrack_expect *exp;
363 union nf_inet_addr *daddr; 363 union nf_inet_addr *daddr;
364 struct nf_conntrack_man cmd = {}; 364 struct nf_conntrack_man cmd = {};
@@ -512,7 +512,6 @@ out_update_nl:
512} 512}
513 513
514static struct nf_conntrack_helper ftp[MAX_PORTS][2] __read_mostly; 514static struct nf_conntrack_helper ftp[MAX_PORTS][2] __read_mostly;
515static char ftp_names[MAX_PORTS][2][sizeof("ftp-65535")] __read_mostly;
516 515
517static const struct nf_conntrack_expect_policy ftp_exp_policy = { 516static const struct nf_conntrack_expect_policy ftp_exp_policy = {
518 .max_expected = 1, 517 .max_expected = 1,
@@ -541,7 +540,6 @@ static void nf_conntrack_ftp_fini(void)
541static int __init nf_conntrack_ftp_init(void) 540static int __init nf_conntrack_ftp_init(void)
542{ 541{
543 int i, j = -1, ret = 0; 542 int i, j = -1, ret = 0;
544 char *tmpname;
545 543
546 ftp_buffer = kmalloc(65536, GFP_KERNEL); 544 ftp_buffer = kmalloc(65536, GFP_KERNEL);
547 if (!ftp_buffer) 545 if (!ftp_buffer)
@@ -556,17 +554,16 @@ static int __init nf_conntrack_ftp_init(void)
556 ftp[i][0].tuple.src.l3num = PF_INET; 554 ftp[i][0].tuple.src.l3num = PF_INET;
557 ftp[i][1].tuple.src.l3num = PF_INET6; 555 ftp[i][1].tuple.src.l3num = PF_INET6;
558 for (j = 0; j < 2; j++) { 556 for (j = 0; j < 2; j++) {
557 ftp[i][j].data_len = sizeof(struct nf_ct_ftp_master);
559 ftp[i][j].tuple.src.u.tcp.port = htons(ports[i]); 558 ftp[i][j].tuple.src.u.tcp.port = htons(ports[i]);
560 ftp[i][j].tuple.dst.protonum = IPPROTO_TCP; 559 ftp[i][j].tuple.dst.protonum = IPPROTO_TCP;
561 ftp[i][j].expect_policy = &ftp_exp_policy; 560 ftp[i][j].expect_policy = &ftp_exp_policy;
562 ftp[i][j].me = THIS_MODULE; 561 ftp[i][j].me = THIS_MODULE;
563 ftp[i][j].help = help; 562 ftp[i][j].help = help;
564 tmpname = &ftp_names[i][j][0];
565 if (ports[i] == FTP_PORT) 563 if (ports[i] == FTP_PORT)
566 sprintf(tmpname, "ftp"); 564 sprintf(ftp[i][j].name, "ftp");
567 else 565 else
568 sprintf(tmpname, "ftp-%d", ports[i]); 566 sprintf(ftp[i][j].name, "ftp-%d", ports[i]);
569 ftp[i][j].name = tmpname;
570 567
571 pr_debug("nf_ct_ftp: registering helper for pf: %d " 568 pr_debug("nf_ct_ftp: registering helper for pf: %d "
572 "port: %d\n", 569 "port: %d\n",
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 31f50bc3a31..4283b207e63 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -114,7 +114,7 @@ static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff,
114 struct nf_conn *ct, enum ip_conntrack_info ctinfo, 114 struct nf_conn *ct, enum ip_conntrack_info ctinfo,
115 unsigned char **data, int *datalen, int *dataoff) 115 unsigned char **data, int *datalen, int *dataoff)
116{ 116{
117 struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 117 struct nf_ct_h323_master *info = nfct_help_data(ct);
118 int dir = CTINFO2DIR(ctinfo); 118 int dir = CTINFO2DIR(ctinfo);
119 const struct tcphdr *th; 119 const struct tcphdr *th;
120 struct tcphdr _tcph; 120 struct tcphdr _tcph;
@@ -617,6 +617,7 @@ static const struct nf_conntrack_expect_policy h245_exp_policy = {
617static struct nf_conntrack_helper nf_conntrack_helper_h245 __read_mostly = { 617static struct nf_conntrack_helper nf_conntrack_helper_h245 __read_mostly = {
618 .name = "H.245", 618 .name = "H.245",
619 .me = THIS_MODULE, 619 .me = THIS_MODULE,
620 .data_len = sizeof(struct nf_ct_h323_master),
620 .tuple.src.l3num = AF_UNSPEC, 621 .tuple.src.l3num = AF_UNSPEC,
621 .tuple.dst.protonum = IPPROTO_UDP, 622 .tuple.dst.protonum = IPPROTO_UDP,
622 .help = h245_help, 623 .help = h245_help,
@@ -1169,6 +1170,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = {
1169 { 1170 {
1170 .name = "Q.931", 1171 .name = "Q.931",
1171 .me = THIS_MODULE, 1172 .me = THIS_MODULE,
1173 .data_len = sizeof(struct nf_ct_h323_master),
1172 .tuple.src.l3num = AF_INET, 1174 .tuple.src.l3num = AF_INET,
1173 .tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT), 1175 .tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT),
1174 .tuple.dst.protonum = IPPROTO_TCP, 1176 .tuple.dst.protonum = IPPROTO_TCP,
@@ -1244,7 +1246,7 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
1244 unsigned char **data, 1246 unsigned char **data,
1245 TransportAddress *taddr, int count) 1247 TransportAddress *taddr, int count)
1246{ 1248{
1247 struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 1249 struct nf_ct_h323_master *info = nfct_help_data(ct);
1248 int dir = CTINFO2DIR(ctinfo); 1250 int dir = CTINFO2DIR(ctinfo);
1249 int ret = 0; 1251 int ret = 0;
1250 int i; 1252 int i;
@@ -1359,7 +1361,7 @@ static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
1359 enum ip_conntrack_info ctinfo, 1361 enum ip_conntrack_info ctinfo,
1360 unsigned char **data, RegistrationRequest *rrq) 1362 unsigned char **data, RegistrationRequest *rrq)
1361{ 1363{
1362 struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 1364 struct nf_ct_h323_master *info = nfct_help_data(ct);
1363 int ret; 1365 int ret;
1364 typeof(set_ras_addr_hook) set_ras_addr; 1366 typeof(set_ras_addr_hook) set_ras_addr;
1365 1367
@@ -1394,7 +1396,7 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
1394 enum ip_conntrack_info ctinfo, 1396 enum ip_conntrack_info ctinfo,
1395 unsigned char **data, RegistrationConfirm *rcf) 1397 unsigned char **data, RegistrationConfirm *rcf)
1396{ 1398{
1397 struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 1399 struct nf_ct_h323_master *info = nfct_help_data(ct);
1398 int dir = CTINFO2DIR(ctinfo); 1400 int dir = CTINFO2DIR(ctinfo);
1399 int ret; 1401 int ret;
1400 struct nf_conntrack_expect *exp; 1402 struct nf_conntrack_expect *exp;
@@ -1443,7 +1445,7 @@ static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
1443 enum ip_conntrack_info ctinfo, 1445 enum ip_conntrack_info ctinfo,
1444 unsigned char **data, UnregistrationRequest *urq) 1446 unsigned char **data, UnregistrationRequest *urq)
1445{ 1447{
1446 struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 1448 struct nf_ct_h323_master *info = nfct_help_data(ct);
1447 int dir = CTINFO2DIR(ctinfo); 1449 int dir = CTINFO2DIR(ctinfo);
1448 int ret; 1450 int ret;
1449 typeof(set_sig_addr_hook) set_sig_addr; 1451 typeof(set_sig_addr_hook) set_sig_addr;
@@ -1475,7 +1477,7 @@ static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
1475 enum ip_conntrack_info ctinfo, 1477 enum ip_conntrack_info ctinfo,
1476 unsigned char **data, AdmissionRequest *arq) 1478 unsigned char **data, AdmissionRequest *arq)
1477{ 1479{
1478 const struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 1480 const struct nf_ct_h323_master *info = nfct_help_data(ct);
1479 int dir = CTINFO2DIR(ctinfo); 1481 int dir = CTINFO2DIR(ctinfo);
1480 __be16 port; 1482 __be16 port;
1481 union nf_inet_addr addr; 1483 union nf_inet_addr addr;
@@ -1742,6 +1744,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = {
1742 { 1744 {
1743 .name = "RAS", 1745 .name = "RAS",
1744 .me = THIS_MODULE, 1746 .me = THIS_MODULE,
1747 .data_len = sizeof(struct nf_ct_h323_master),
1745 .tuple.src.l3num = AF_INET, 1748 .tuple.src.l3num = AF_INET,
1746 .tuple.src.u.udp.port = cpu_to_be16(RAS_PORT), 1749 .tuple.src.u.udp.port = cpu_to_be16(RAS_PORT),
1747 .tuple.dst.protonum = IPPROTO_UDP, 1750 .tuple.dst.protonum = IPPROTO_UDP,
@@ -1751,6 +1754,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = {
1751 { 1754 {
1752 .name = "RAS", 1755 .name = "RAS",
1753 .me = THIS_MODULE, 1756 .me = THIS_MODULE,
1757 .data_len = sizeof(struct nf_ct_h323_master),
1754 .tuple.src.l3num = AF_INET6, 1758 .tuple.src.l3num = AF_INET6,
1755 .tuple.src.u.udp.port = cpu_to_be16(RAS_PORT), 1759 .tuple.src.u.udp.port = cpu_to_be16(RAS_PORT),
1756 .tuple.dst.protonum = IPPROTO_UDP, 1760 .tuple.dst.protonum = IPPROTO_UDP,
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 4fa2ff961f5..c4bc637feb7 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -30,8 +30,10 @@
30#include <net/netfilter/nf_conntrack_extend.h> 30#include <net/netfilter/nf_conntrack_extend.h>
31 31
32static DEFINE_MUTEX(nf_ct_helper_mutex); 32static DEFINE_MUTEX(nf_ct_helper_mutex);
33static struct hlist_head *nf_ct_helper_hash __read_mostly; 33struct hlist_head *nf_ct_helper_hash __read_mostly;
34static unsigned int nf_ct_helper_hsize __read_mostly; 34EXPORT_SYMBOL_GPL(nf_ct_helper_hash);
35unsigned int nf_ct_helper_hsize __read_mostly;
36EXPORT_SYMBOL_GPL(nf_ct_helper_hsize);
35static unsigned int nf_ct_helper_count __read_mostly; 37static unsigned int nf_ct_helper_count __read_mostly;
36 38
37static bool nf_ct_auto_assign_helper __read_mostly = true; 39static bool nf_ct_auto_assign_helper __read_mostly = true;
@@ -161,11 +163,14 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
161} 163}
162EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get); 164EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
163 165
164struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp) 166struct nf_conn_help *
167nf_ct_helper_ext_add(struct nf_conn *ct,
168 struct nf_conntrack_helper *helper, gfp_t gfp)
165{ 169{
166 struct nf_conn_help *help; 170 struct nf_conn_help *help;
167 171
168 help = nf_ct_ext_add(ct, NF_CT_EXT_HELPER, gfp); 172 help = nf_ct_ext_add_length(ct, NF_CT_EXT_HELPER,
173 helper->data_len, gfp);
169 if (help) 174 if (help)
170 INIT_HLIST_HEAD(&help->expectations); 175 INIT_HLIST_HEAD(&help->expectations);
171 else 176 else
@@ -218,13 +223,19 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
218 } 223 }
219 224
220 if (help == NULL) { 225 if (help == NULL) {
221 help = nf_ct_helper_ext_add(ct, flags); 226 help = nf_ct_helper_ext_add(ct, helper, flags);
222 if (help == NULL) { 227 if (help == NULL) {
223 ret = -ENOMEM; 228 ret = -ENOMEM;
224 goto out; 229 goto out;
225 } 230 }
226 } else { 231 } else {
227 memset(&help->help, 0, sizeof(help->help)); 232 /* We only allow helper re-assignment of the same sort since
233 * we cannot reallocate the helper extension area.
234 */
235 if (help->helper != helper) {
236 RCU_INIT_POINTER(help->helper, NULL);
237 goto out;
238 }
228 } 239 }
229 240
230 rcu_assign_pointer(help->helper, helper); 241 rcu_assign_pointer(help->helper, helper);
@@ -319,6 +330,9 @@ EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol);
319 330
320int nf_conntrack_helper_register(struct nf_conntrack_helper *me) 331int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
321{ 332{
333 int ret = 0;
334 struct nf_conntrack_helper *cur;
335 struct hlist_node *n;
322 unsigned int h = helper_hash(&me->tuple); 336 unsigned int h = helper_hash(&me->tuple);
323 337
324 BUG_ON(me->expect_policy == NULL); 338 BUG_ON(me->expect_policy == NULL);
@@ -326,11 +340,19 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
326 BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1); 340 BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1);
327 341
328 mutex_lock(&nf_ct_helper_mutex); 342 mutex_lock(&nf_ct_helper_mutex);
343 hlist_for_each_entry(cur, n, &nf_ct_helper_hash[h], hnode) {
344 if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 &&
345 cur->tuple.src.l3num == me->tuple.src.l3num &&
346 cur->tuple.dst.protonum == me->tuple.dst.protonum) {
347 ret = -EEXIST;
348 goto out;
349 }
350 }
329 hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]); 351 hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]);
330 nf_ct_helper_count++; 352 nf_ct_helper_count++;
353out:
331 mutex_unlock(&nf_ct_helper_mutex); 354 mutex_unlock(&nf_ct_helper_mutex);
332 355 return ret;
333 return 0;
334} 356}
335EXPORT_SYMBOL_GPL(nf_conntrack_helper_register); 357EXPORT_SYMBOL_GPL(nf_conntrack_helper_register);
336 358
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 81366c11827..009c52cfd1e 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -221,7 +221,6 @@ static int help(struct sk_buff *skb, unsigned int protoff,
221} 221}
222 222
223static struct nf_conntrack_helper irc[MAX_PORTS] __read_mostly; 223static struct nf_conntrack_helper irc[MAX_PORTS] __read_mostly;
224static char irc_names[MAX_PORTS][sizeof("irc-65535")] __read_mostly;
225static struct nf_conntrack_expect_policy irc_exp_policy; 224static struct nf_conntrack_expect_policy irc_exp_policy;
226 225
227static void nf_conntrack_irc_fini(void); 226static void nf_conntrack_irc_fini(void);
@@ -229,7 +228,6 @@ static void nf_conntrack_irc_fini(void);
229static int __init nf_conntrack_irc_init(void) 228static int __init nf_conntrack_irc_init(void)
230{ 229{
231 int i, ret; 230 int i, ret;
232 char *tmpname;
233 231
234 if (max_dcc_channels < 1) { 232 if (max_dcc_channels < 1) {
235 printk(KERN_ERR "nf_ct_irc: max_dcc_channels must not be zero\n"); 233 printk(KERN_ERR "nf_ct_irc: max_dcc_channels must not be zero\n");
@@ -255,12 +253,10 @@ static int __init nf_conntrack_irc_init(void)
255 irc[i].me = THIS_MODULE; 253 irc[i].me = THIS_MODULE;
256 irc[i].help = help; 254 irc[i].help = help;
257 255
258 tmpname = &irc_names[i][0];
259 if (ports[i] == IRC_PORT) 256 if (ports[i] == IRC_PORT)
260 sprintf(tmpname, "irc"); 257 sprintf(irc[i].name, "irc");
261 else 258 else
262 sprintf(tmpname, "irc-%u", i); 259 sprintf(irc[i].name, "irc-%u", i);
263 irc[i].name = tmpname;
264 260
265 ret = nf_conntrack_helper_register(&irc[i]); 261 ret = nf_conntrack_helper_register(&irc[i]);
266 if (ret) { 262 if (ret) {
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 6f4b00a8fc7..14f67a2cbcb 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -4,7 +4,7 @@
4 * (C) 2001 by Jay Schulist <jschlst@samba.org> 4 * (C) 2001 by Jay Schulist <jschlst@samba.org>
5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org> 5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2003 by Patrick Mchardy <kaber@trash.net> 6 * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7 * (C) 2005-2011 by Pablo Neira Ayuso <pablo@netfilter.org> 7 * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org>
8 * 8 *
9 * Initial connection tracking via netlink development funded and 9 * Initial connection tracking via netlink development funded and
10 * generally made possible by Network Robots, Inc. (www.networkrobots.com) 10 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
@@ -46,6 +46,7 @@
46#ifdef CONFIG_NF_NAT_NEEDED 46#ifdef CONFIG_NF_NAT_NEEDED
47#include <net/netfilter/nf_nat_core.h> 47#include <net/netfilter/nf_nat_core.h>
48#include <net/netfilter/nf_nat_protocol.h> 48#include <net/netfilter/nf_nat_protocol.h>
49#include <net/netfilter/nf_nat_helper.h>
49#endif 50#endif
50 51
51#include <linux/netfilter/nfnetlink.h> 52#include <linux/netfilter/nfnetlink.h>
@@ -477,7 +478,6 @@ nla_put_failure:
477 return -1; 478 return -1;
478} 479}
479 480
480#ifdef CONFIG_NF_CONNTRACK_EVENTS
481static inline size_t 481static inline size_t
482ctnetlink_proto_size(const struct nf_conn *ct) 482ctnetlink_proto_size(const struct nf_conn *ct)
483{ 483{
@@ -564,6 +564,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
564 ; 564 ;
565} 565}
566 566
567#ifdef CONFIG_NF_CONNTRACK_EVENTS
567static int 568static int
568ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) 569ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
569{ 570{
@@ -901,7 +902,8 @@ static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
901}; 902};
902 903
903static inline int 904static inline int
904ctnetlink_parse_help(const struct nlattr *attr, char **helper_name) 905ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
906 struct nlattr **helpinfo)
905{ 907{
906 struct nlattr *tb[CTA_HELP_MAX+1]; 908 struct nlattr *tb[CTA_HELP_MAX+1];
907 909
@@ -912,6 +914,9 @@ ctnetlink_parse_help(const struct nlattr *attr, char **helper_name)
912 914
913 *helper_name = nla_data(tb[CTA_HELP_NAME]); 915 *helper_name = nla_data(tb[CTA_HELP_NAME]);
914 916
917 if (tb[CTA_HELP_INFO])
918 *helpinfo = tb[CTA_HELP_INFO];
919
915 return 0; 920 return 0;
916} 921}
917 922
@@ -1172,13 +1177,14 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1172 struct nf_conntrack_helper *helper; 1177 struct nf_conntrack_helper *helper;
1173 struct nf_conn_help *help = nfct_help(ct); 1178 struct nf_conn_help *help = nfct_help(ct);
1174 char *helpname = NULL; 1179 char *helpname = NULL;
1180 struct nlattr *helpinfo = NULL;
1175 int err; 1181 int err;
1176 1182
1177 /* don't change helper of sibling connections */ 1183 /* don't change helper of sibling connections */
1178 if (ct->master) 1184 if (ct->master)
1179 return -EBUSY; 1185 return -EBUSY;
1180 1186
1181 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname); 1187 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1182 if (err < 0) 1188 if (err < 0)
1183 return err; 1189 return err;
1184 1190
@@ -1213,20 +1219,17 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1213 } 1219 }
1214 1220
1215 if (help) { 1221 if (help) {
1216 if (help->helper == helper) 1222 if (help->helper == helper) {
1223 /* update private helper data if allowed. */
1224 if (helper->from_nlattr && helpinfo)
1225 helper->from_nlattr(helpinfo, ct);
1217 return 0; 1226 return 0;
1218 if (help->helper) 1227 } else
1219 return -EBUSY; 1228 return -EBUSY;
1220 /* need to zero data of old helper */
1221 memset(&help->help, 0, sizeof(help->help));
1222 } else {
1223 /* we cannot set a helper for an existing conntrack */
1224 return -EOPNOTSUPP;
1225 } 1229 }
1226 1230
1227 rcu_assign_pointer(help->helper, helper); 1231 /* we cannot set a helper for an existing conntrack */
1228 1232 return -EOPNOTSUPP;
1229 return 0;
1230} 1233}
1231 1234
1232static inline int 1235static inline int
@@ -1410,8 +1413,9 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
1410 rcu_read_lock(); 1413 rcu_read_lock();
1411 if (cda[CTA_HELP]) { 1414 if (cda[CTA_HELP]) {
1412 char *helpname = NULL; 1415 char *helpname = NULL;
1413 1416 struct nlattr *helpinfo = NULL;
1414 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname); 1417
1418 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1415 if (err < 0) 1419 if (err < 0)
1416 goto err2; 1420 goto err2;
1417 1421
@@ -1440,11 +1444,14 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
1440 } else { 1444 } else {
1441 struct nf_conn_help *help; 1445 struct nf_conn_help *help;
1442 1446
1443 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 1447 help = nf_ct_helper_ext_add(ct, helper, GFP_ATOMIC);
1444 if (help == NULL) { 1448 if (help == NULL) {
1445 err = -ENOMEM; 1449 err = -ENOMEM;
1446 goto err2; 1450 goto err2;
1447 } 1451 }
1452 /* set private helper data if allowed. */
1453 if (helper->from_nlattr && helpinfo)
1454 helper->from_nlattr(helpinfo, ct);
1448 1455
1449 /* not in hash table yet so not strictly necessary */ 1456 /* not in hash table yet so not strictly necessary */
1450 RCU_INIT_POINTER(help->helper, helper); 1457 RCU_INIT_POINTER(help->helper, helper);
@@ -1620,6 +1627,288 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1620 return err; 1627 return err;
1621} 1628}
1622 1629
1630static int
1631ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1632 __u16 cpu, const struct ip_conntrack_stat *st)
1633{
1634 struct nlmsghdr *nlh;
1635 struct nfgenmsg *nfmsg;
1636 unsigned int flags = pid ? NLM_F_MULTI : 0, event;
1637
1638 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS_CPU);
1639 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
1640 if (nlh == NULL)
1641 goto nlmsg_failure;
1642
1643 nfmsg = nlmsg_data(nlh);
1644 nfmsg->nfgen_family = AF_UNSPEC;
1645 nfmsg->version = NFNETLINK_V0;
1646 nfmsg->res_id = htons(cpu);
1647
1648 if (nla_put_be32(skb, CTA_STATS_SEARCHED, htonl(st->searched)) ||
1649 nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) ||
1650 nla_put_be32(skb, CTA_STATS_NEW, htonl(st->new)) ||
1651 nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) ||
1652 nla_put_be32(skb, CTA_STATS_IGNORE, htonl(st->ignore)) ||
1653 nla_put_be32(skb, CTA_STATS_DELETE, htonl(st->delete)) ||
1654 nla_put_be32(skb, CTA_STATS_DELETE_LIST, htonl(st->delete_list)) ||
1655 nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) ||
1656 nla_put_be32(skb, CTA_STATS_INSERT_FAILED,
1657 htonl(st->insert_failed)) ||
1658 nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) ||
1659 nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) ||
1660 nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) ||
1661 nla_put_be32(skb, CTA_STATS_SEARCH_RESTART,
1662 htonl(st->search_restart)))
1663 goto nla_put_failure;
1664
1665 nlmsg_end(skb, nlh);
1666 return skb->len;
1667
1668nla_put_failure:
1669nlmsg_failure:
1670 nlmsg_cancel(skb, nlh);
1671 return -1;
1672}
1673
1674static int
1675ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
1676{
1677 int cpu;
1678 struct net *net = sock_net(skb->sk);
1679
1680 if (cb->args[0] == nr_cpu_ids)
1681 return 0;
1682
1683 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
1684 const struct ip_conntrack_stat *st;
1685
1686 if (!cpu_possible(cpu))
1687 continue;
1688
1689 st = per_cpu_ptr(net->ct.stat, cpu);
1690 if (ctnetlink_ct_stat_cpu_fill_info(skb,
1691 NETLINK_CB(cb->skb).pid,
1692 cb->nlh->nlmsg_seq,
1693 cpu, st) < 0)
1694 break;
1695 }
1696 cb->args[0] = cpu;
1697
1698 return skb->len;
1699}
1700
1701static int
1702ctnetlink_stat_ct_cpu(struct sock *ctnl, struct sk_buff *skb,
1703 const struct nlmsghdr *nlh,
1704 const struct nlattr * const cda[])
1705{
1706 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1707 struct netlink_dump_control c = {
1708 .dump = ctnetlink_ct_stat_cpu_dump,
1709 };
1710 return netlink_dump_start(ctnl, skb, nlh, &c);
1711 }
1712
1713 return 0;
1714}
1715
1716static int
1717ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
1718 struct net *net)
1719{
1720 struct nlmsghdr *nlh;
1721 struct nfgenmsg *nfmsg;
1722 unsigned int flags = pid ? NLM_F_MULTI : 0, event;
1723 unsigned int nr_conntracks = atomic_read(&net->ct.count);
1724
1725 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS);
1726 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
1727 if (nlh == NULL)
1728 goto nlmsg_failure;
1729
1730 nfmsg = nlmsg_data(nlh);
1731 nfmsg->nfgen_family = AF_UNSPEC;
1732 nfmsg->version = NFNETLINK_V0;
1733 nfmsg->res_id = 0;
1734
1735 if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks)))
1736 goto nla_put_failure;
1737
1738 nlmsg_end(skb, nlh);
1739 return skb->len;
1740
1741nla_put_failure:
1742nlmsg_failure:
1743 nlmsg_cancel(skb, nlh);
1744 return -1;
1745}
1746
1747static int
1748ctnetlink_stat_ct(struct sock *ctnl, struct sk_buff *skb,
1749 const struct nlmsghdr *nlh,
1750 const struct nlattr * const cda[])
1751{
1752 struct sk_buff *skb2;
1753 int err;
1754
1755 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1756 if (skb2 == NULL)
1757 return -ENOMEM;
1758
1759 err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).pid,
1760 nlh->nlmsg_seq,
1761 NFNL_MSG_TYPE(nlh->nlmsg_type),
1762 sock_net(skb->sk));
1763 if (err <= 0)
1764 goto free;
1765
1766 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
1767 if (err < 0)
1768 goto out;
1769
1770 return 0;
1771
1772free:
1773 kfree_skb(skb2);
1774out:
1775 /* this avoids a loop in nfnetlink. */
1776 return err == -EAGAIN ? -ENOBUFS : err;
1777}
1778
1779#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
1780static size_t
1781ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
1782{
1783 return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
1784 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
1785 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
1786 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
1787 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
1788 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
1789 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
1790 + nla_total_size(0) /* CTA_PROTOINFO */
1791 + nla_total_size(0) /* CTA_HELP */
1792 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
1793 + ctnetlink_secctx_size(ct)
1794#ifdef CONFIG_NF_NAT_NEEDED
1795 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
1796 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
1797#endif
1798#ifdef CONFIG_NF_CONNTRACK_MARK
1799 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
1800#endif
1801 + ctnetlink_proto_size(ct)
1802 ;
1803}
1804
1805static int
1806ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
1807{
1808 struct nlattr *nest_parms;
1809
1810 rcu_read_lock();
1811 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
1812 if (!nest_parms)
1813 goto nla_put_failure;
1814 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
1815 goto nla_put_failure;
1816 nla_nest_end(skb, nest_parms);
1817
1818 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
1819 if (!nest_parms)
1820 goto nla_put_failure;
1821 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
1822 goto nla_put_failure;
1823 nla_nest_end(skb, nest_parms);
1824
1825 if (nf_ct_zone(ct)) {
1826 if (nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
1827 goto nla_put_failure;
1828 }
1829
1830 if (ctnetlink_dump_id(skb, ct) < 0)
1831 goto nla_put_failure;
1832
1833 if (ctnetlink_dump_status(skb, ct) < 0)
1834 goto nla_put_failure;
1835
1836 if (ctnetlink_dump_timeout(skb, ct) < 0)
1837 goto nla_put_failure;
1838
1839 if (ctnetlink_dump_protoinfo(skb, ct) < 0)
1840 goto nla_put_failure;
1841
1842 if (ctnetlink_dump_helpinfo(skb, ct) < 0)
1843 goto nla_put_failure;
1844
1845#ifdef CONFIG_NF_CONNTRACK_SECMARK
1846 if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0)
1847 goto nla_put_failure;
1848#endif
1849 if (ct->master && ctnetlink_dump_master(skb, ct) < 0)
1850 goto nla_put_failure;
1851
1852 if ((ct->status & IPS_SEQ_ADJUST) &&
1853 ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
1854 goto nla_put_failure;
1855
1856#ifdef CONFIG_NF_CONNTRACK_MARK
1857 if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0)
1858 goto nla_put_failure;
1859#endif
1860 rcu_read_unlock();
1861 return 0;
1862
1863nla_put_failure:
1864 rcu_read_unlock();
1865 return -ENOSPC;
1866}
1867
1868static int
1869ctnetlink_nfqueue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
1870{
1871 int err;
1872
1873 if (cda[CTA_TIMEOUT]) {
1874 err = ctnetlink_change_timeout(ct, cda);
1875 if (err < 0)
1876 return err;
1877 }
1878 if (cda[CTA_STATUS]) {
1879 err = ctnetlink_change_status(ct, cda);
1880 if (err < 0)
1881 return err;
1882 }
1883 if (cda[CTA_HELP]) {
1884 err = ctnetlink_change_helper(ct, cda);
1885 if (err < 0)
1886 return err;
1887 }
1888#if defined(CONFIG_NF_CONNTRACK_MARK)
1889 if (cda[CTA_MARK])
1890 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1891#endif
1892 return 0;
1893}
1894
1895static int
1896ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)
1897{
1898 struct nlattr *cda[CTA_MAX+1];
1899
1900 nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy);
1901
1902 return ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct);
1903}
1904
1905static struct nfq_ct_hook ctnetlink_nfqueue_hook = {
1906 .build_size = ctnetlink_nfqueue_build_size,
1907 .build = ctnetlink_nfqueue_build,
1908 .parse = ctnetlink_nfqueue_parse,
1909};
1910#endif /* CONFIG_NETFILTER_NETLINK_QUEUE_CT */
1911
1623/*********************************************************************** 1912/***********************************************************************
1624 * EXPECT 1913 * EXPECT
1625 ***********************************************************************/ 1914 ***********************************************************************/
@@ -2300,6 +2589,79 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
2300 return err; 2589 return err;
2301} 2590}
2302 2591
2592static int
2593ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int cpu,
2594 const struct ip_conntrack_stat *st)
2595{
2596 struct nlmsghdr *nlh;
2597 struct nfgenmsg *nfmsg;
2598 unsigned int flags = pid ? NLM_F_MULTI : 0, event;
2599
2600 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_EXP_GET_STATS_CPU);
2601 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
2602 if (nlh == NULL)
2603 goto nlmsg_failure;
2604
2605 nfmsg = nlmsg_data(nlh);
2606 nfmsg->nfgen_family = AF_UNSPEC;
2607 nfmsg->version = NFNETLINK_V0;
2608 nfmsg->res_id = htons(cpu);
2609
2610 if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) ||
2611 nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) ||
2612 nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete)))
2613 goto nla_put_failure;
2614
2615 nlmsg_end(skb, nlh);
2616 return skb->len;
2617
2618nla_put_failure:
2619nlmsg_failure:
2620 nlmsg_cancel(skb, nlh);
2621 return -1;
2622}
2623
2624static int
2625ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
2626{
2627 int cpu;
2628 struct net *net = sock_net(skb->sk);
2629
2630 if (cb->args[0] == nr_cpu_ids)
2631 return 0;
2632
2633 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
2634 const struct ip_conntrack_stat *st;
2635
2636 if (!cpu_possible(cpu))
2637 continue;
2638
2639 st = per_cpu_ptr(net->ct.stat, cpu);
2640 if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).pid,
2641 cb->nlh->nlmsg_seq,
2642 cpu, st) < 0)
2643 break;
2644 }
2645 cb->args[0] = cpu;
2646
2647 return skb->len;
2648}
2649
2650static int
2651ctnetlink_stat_exp_cpu(struct sock *ctnl, struct sk_buff *skb,
2652 const struct nlmsghdr *nlh,
2653 const struct nlattr * const cda[])
2654{
2655 if (nlh->nlmsg_flags & NLM_F_DUMP) {
2656 struct netlink_dump_control c = {
2657 .dump = ctnetlink_exp_stat_cpu_dump,
2658 };
2659 return netlink_dump_start(ctnl, skb, nlh, &c);
2660 }
2661
2662 return 0;
2663}
2664
2303#ifdef CONFIG_NF_CONNTRACK_EVENTS 2665#ifdef CONFIG_NF_CONNTRACK_EVENTS
2304static struct nf_ct_event_notifier ctnl_notifier = { 2666static struct nf_ct_event_notifier ctnl_notifier = {
2305 .fcn = ctnetlink_conntrack_event, 2667 .fcn = ctnetlink_conntrack_event,
@@ -2323,6 +2685,8 @@ static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
2323 [IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack, 2685 [IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack,
2324 .attr_count = CTA_MAX, 2686 .attr_count = CTA_MAX,
2325 .policy = ct_nla_policy }, 2687 .policy = ct_nla_policy },
2688 [IPCTNL_MSG_CT_GET_STATS_CPU] = { .call = ctnetlink_stat_ct_cpu },
2689 [IPCTNL_MSG_CT_GET_STATS] = { .call = ctnetlink_stat_ct },
2326}; 2690};
2327 2691
2328static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = { 2692static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
@@ -2335,6 +2699,7 @@ static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
2335 [IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect, 2699 [IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect,
2336 .attr_count = CTA_EXPECT_MAX, 2700 .attr_count = CTA_EXPECT_MAX,
2337 .policy = exp_nla_policy }, 2701 .policy = exp_nla_policy },
2702 [IPCTNL_MSG_EXP_GET_STATS_CPU] = { .call = ctnetlink_stat_exp_cpu },
2338}; 2703};
2339 2704
2340static const struct nfnetlink_subsystem ctnl_subsys = { 2705static const struct nfnetlink_subsystem ctnl_subsys = {
@@ -2424,7 +2789,10 @@ static int __init ctnetlink_init(void)
2424 pr_err("ctnetlink_init: cannot register pernet operations\n"); 2789 pr_err("ctnetlink_init: cannot register pernet operations\n");
2425 goto err_unreg_exp_subsys; 2790 goto err_unreg_exp_subsys;
2426 } 2791 }
2427 2792#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
2793 /* setup interaction between nf_queue and nf_conntrack_netlink. */
2794 RCU_INIT_POINTER(nfq_ct_hook, &ctnetlink_nfqueue_hook);
2795#endif
2428 return 0; 2796 return 0;
2429 2797
2430err_unreg_exp_subsys: 2798err_unreg_exp_subsys:
@@ -2442,6 +2810,9 @@ static void __exit ctnetlink_exit(void)
2442 unregister_pernet_subsys(&ctnetlink_net_ops); 2810 unregister_pernet_subsys(&ctnetlink_net_ops);
2443 nfnetlink_subsys_unregister(&ctnl_exp_subsys); 2811 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
2444 nfnetlink_subsys_unregister(&ctnl_subsys); 2812 nfnetlink_subsys_unregister(&ctnl_subsys);
2813#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
2814 RCU_INIT_POINTER(nfq_ct_hook, NULL);
2815#endif
2445} 2816}
2446 2817
2447module_init(ctnetlink_init); 2818module_init(ctnetlink_init);
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 31d56b23b9e..6fed9ec3524 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -174,7 +174,7 @@ static int destroy_sibling_or_exp(struct net *net, struct nf_conn *ct,
174static void pptp_destroy_siblings(struct nf_conn *ct) 174static void pptp_destroy_siblings(struct nf_conn *ct)
175{ 175{
176 struct net *net = nf_ct_net(ct); 176 struct net *net = nf_ct_net(ct);
177 const struct nf_conn_help *help = nfct_help(ct); 177 const struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct);
178 struct nf_conntrack_tuple t; 178 struct nf_conntrack_tuple t;
179 179
180 nf_ct_gre_keymap_destroy(ct); 180 nf_ct_gre_keymap_destroy(ct);
@@ -182,16 +182,16 @@ static void pptp_destroy_siblings(struct nf_conn *ct)
182 /* try original (pns->pac) tuple */ 182 /* try original (pns->pac) tuple */
183 memcpy(&t, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, sizeof(t)); 183 memcpy(&t, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, sizeof(t));
184 t.dst.protonum = IPPROTO_GRE; 184 t.dst.protonum = IPPROTO_GRE;
185 t.src.u.gre.key = help->help.ct_pptp_info.pns_call_id; 185 t.src.u.gre.key = ct_pptp_info->pns_call_id;
186 t.dst.u.gre.key = help->help.ct_pptp_info.pac_call_id; 186 t.dst.u.gre.key = ct_pptp_info->pac_call_id;
187 if (!destroy_sibling_or_exp(net, ct, &t)) 187 if (!destroy_sibling_or_exp(net, ct, &t))
188 pr_debug("failed to timeout original pns->pac ct/exp\n"); 188 pr_debug("failed to timeout original pns->pac ct/exp\n");
189 189
190 /* try reply (pac->pns) tuple */ 190 /* try reply (pac->pns) tuple */
191 memcpy(&t, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, sizeof(t)); 191 memcpy(&t, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, sizeof(t));
192 t.dst.protonum = IPPROTO_GRE; 192 t.dst.protonum = IPPROTO_GRE;
193 t.src.u.gre.key = help->help.ct_pptp_info.pac_call_id; 193 t.src.u.gre.key = ct_pptp_info->pac_call_id;
194 t.dst.u.gre.key = help->help.ct_pptp_info.pns_call_id; 194 t.dst.u.gre.key = ct_pptp_info->pns_call_id;
195 if (!destroy_sibling_or_exp(net, ct, &t)) 195 if (!destroy_sibling_or_exp(net, ct, &t))
196 pr_debug("failed to timeout reply pac->pns ct/exp\n"); 196 pr_debug("failed to timeout reply pac->pns ct/exp\n");
197} 197}
@@ -269,7 +269,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
269 struct nf_conn *ct, 269 struct nf_conn *ct,
270 enum ip_conntrack_info ctinfo) 270 enum ip_conntrack_info ctinfo)
271{ 271{
272 struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info; 272 struct nf_ct_pptp_master *info = nfct_help_data(ct);
273 u_int16_t msg; 273 u_int16_t msg;
274 __be16 cid = 0, pcid = 0; 274 __be16 cid = 0, pcid = 0;
275 typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound; 275 typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound;
@@ -396,7 +396,7 @@ pptp_outbound_pkt(struct sk_buff *skb,
396 struct nf_conn *ct, 396 struct nf_conn *ct,
397 enum ip_conntrack_info ctinfo) 397 enum ip_conntrack_info ctinfo)
398{ 398{
399 struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info; 399 struct nf_ct_pptp_master *info = nfct_help_data(ct);
400 u_int16_t msg; 400 u_int16_t msg;
401 __be16 cid = 0, pcid = 0; 401 __be16 cid = 0, pcid = 0;
402 typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound; 402 typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound;
@@ -506,7 +506,7 @@ conntrack_pptp_help(struct sk_buff *skb, unsigned int protoff,
506 506
507{ 507{
508 int dir = CTINFO2DIR(ctinfo); 508 int dir = CTINFO2DIR(ctinfo);
509 const struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info; 509 const struct nf_ct_pptp_master *info = nfct_help_data(ct);
510 const struct tcphdr *tcph; 510 const struct tcphdr *tcph;
511 struct tcphdr _tcph; 511 struct tcphdr _tcph;
512 const struct pptp_pkt_hdr *pptph; 512 const struct pptp_pkt_hdr *pptph;
@@ -592,6 +592,7 @@ static const struct nf_conntrack_expect_policy pptp_exp_policy = {
592static struct nf_conntrack_helper pptp __read_mostly = { 592static struct nf_conntrack_helper pptp __read_mostly = {
593 .name = "pptp", 593 .name = "pptp",
594 .me = THIS_MODULE, 594 .me = THIS_MODULE,
595 .data_len = sizeof(struct nf_ct_pptp_master),
595 .tuple.src.l3num = AF_INET, 596 .tuple.src.l3num = AF_INET,
596 .tuple.src.u.tcp.port = cpu_to_be16(PPTP_CONTROL_PORT), 597 .tuple.src.u.tcp.port = cpu_to_be16(PPTP_CONTROL_PORT),
597 .tuple.dst.protonum = IPPROTO_TCP, 598 .tuple.dst.protonum = IPPROTO_TCP,
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 8b631b07a64..0dc63854390 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -36,28 +36,32 @@ static DEFINE_MUTEX(nf_ct_proto_mutex);
36 36
37#ifdef CONFIG_SYSCTL 37#ifdef CONFIG_SYSCTL
38static int 38static int
39nf_ct_register_sysctl(struct ctl_table_header **header, const char *path, 39nf_ct_register_sysctl(struct net *net,
40 struct ctl_table *table, unsigned int *users) 40 struct ctl_table_header **header,
41 const char *path,
42 struct ctl_table *table)
41{ 43{
42 if (*header == NULL) { 44 if (*header == NULL) {
43 *header = register_net_sysctl(&init_net, path, table); 45 *header = register_net_sysctl(net, path, table);
44 if (*header == NULL) 46 if (*header == NULL)
45 return -ENOMEM; 47 return -ENOMEM;
46 } 48 }
47 if (users != NULL) 49
48 (*users)++;
49 return 0; 50 return 0;
50} 51}
51 52
52static void 53static void
53nf_ct_unregister_sysctl(struct ctl_table_header **header, 54nf_ct_unregister_sysctl(struct ctl_table_header **header,
54 struct ctl_table *table, unsigned int *users) 55 struct ctl_table **table,
56 unsigned int users)
55{ 57{
56 if (users != NULL && --*users > 0) 58 if (users > 0)
57 return; 59 return;
58 60
59 unregister_net_sysctl_table(*header); 61 unregister_net_sysctl_table(*header);
62 kfree(*table);
60 *header = NULL; 63 *header = NULL;
64 *table = NULL;
61} 65}
62#endif 66#endif
63 67
@@ -161,30 +165,56 @@ static int kill_l4proto(struct nf_conn *i, void *data)
161 nf_ct_l3num(i) == l4proto->l3proto; 165 nf_ct_l3num(i) == l4proto->l3proto;
162} 166}
163 167
164static int nf_ct_l3proto_register_sysctl(struct nf_conntrack_l3proto *l3proto) 168static struct nf_ip_net *nf_ct_l3proto_net(struct net *net,
169 struct nf_conntrack_l3proto *l3proto)
165{ 170{
166 int err = 0; 171 if (l3proto->l3proto == PF_INET)
172 return &net->ct.nf_ct_proto;
173 else
174 return NULL;
175}
167 176
168#ifdef CONFIG_SYSCTL 177static int nf_ct_l3proto_register_sysctl(struct net *net,
169 if (l3proto->ctl_table != NULL) { 178 struct nf_conntrack_l3proto *l3proto)
170 err = nf_ct_register_sysctl(&l3proto->ctl_table_header, 179{
180 int err = 0;
181 struct nf_ip_net *in = nf_ct_l3proto_net(net, l3proto);
182 /* nf_conntrack_l3proto_ipv6 doesn't support sysctl */
183 if (in == NULL)
184 return 0;
185
186#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
187 if (in->ctl_table != NULL) {
188 err = nf_ct_register_sysctl(net,
189 &in->ctl_table_header,
171 l3proto->ctl_table_path, 190 l3proto->ctl_table_path,
172 l3proto->ctl_table, NULL); 191 in->ctl_table);
192 if (err < 0) {
193 kfree(in->ctl_table);
194 in->ctl_table = NULL;
195 }
173 } 196 }
174#endif 197#endif
175 return err; 198 return err;
176} 199}
177 200
178static void nf_ct_l3proto_unregister_sysctl(struct nf_conntrack_l3proto *l3proto) 201static void nf_ct_l3proto_unregister_sysctl(struct net *net,
202 struct nf_conntrack_l3proto *l3proto)
179{ 203{
180#ifdef CONFIG_SYSCTL 204 struct nf_ip_net *in = nf_ct_l3proto_net(net, l3proto);
181 if (l3proto->ctl_table_header != NULL) 205
182 nf_ct_unregister_sysctl(&l3proto->ctl_table_header, 206 if (in == NULL)
183 l3proto->ctl_table, NULL); 207 return;
208#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
209 if (in->ctl_table_header != NULL)
210 nf_ct_unregister_sysctl(&in->ctl_table_header,
211 &in->ctl_table,
212 0);
184#endif 213#endif
185} 214}
186 215
187int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto) 216static int
217nf_conntrack_l3proto_register_net(struct nf_conntrack_l3proto *proto)
188{ 218{
189 int ret = 0; 219 int ret = 0;
190 struct nf_conntrack_l3proto *old; 220 struct nf_conntrack_l3proto *old;
@@ -203,10 +233,6 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
203 goto out_unlock; 233 goto out_unlock;
204 } 234 }
205 235
206 ret = nf_ct_l3proto_register_sysctl(proto);
207 if (ret < 0)
208 goto out_unlock;
209
210 if (proto->nlattr_tuple_size) 236 if (proto->nlattr_tuple_size)
211 proto->nla_size = 3 * proto->nlattr_tuple_size(); 237 proto->nla_size = 3 * proto->nlattr_tuple_size();
212 238
@@ -215,13 +241,37 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
215out_unlock: 241out_unlock:
216 mutex_unlock(&nf_ct_proto_mutex); 242 mutex_unlock(&nf_ct_proto_mutex);
217 return ret; 243 return ret;
244
218} 245}
219EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_register);
220 246
221void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto) 247int nf_conntrack_l3proto_register(struct net *net,
248 struct nf_conntrack_l3proto *proto)
222{ 249{
223 struct net *net; 250 int ret = 0;
251
252 if (proto->init_net) {
253 ret = proto->init_net(net);
254 if (ret < 0)
255 return ret;
256 }
257
258 ret = nf_ct_l3proto_register_sysctl(net, proto);
259 if (ret < 0)
260 return ret;
224 261
262 if (net == &init_net) {
263 ret = nf_conntrack_l3proto_register_net(proto);
264 if (ret < 0)
265 nf_ct_l3proto_unregister_sysctl(net, proto);
266 }
267
268 return ret;
269}
270EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_register);
271
272static void
273nf_conntrack_l3proto_unregister_net(struct nf_conntrack_l3proto *proto)
274{
225 BUG_ON(proto->l3proto >= AF_MAX); 275 BUG_ON(proto->l3proto >= AF_MAX);
226 276
227 mutex_lock(&nf_ct_proto_mutex); 277 mutex_lock(&nf_ct_proto_mutex);
@@ -230,68 +280,107 @@ void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
230 ) != proto); 280 ) != proto);
231 rcu_assign_pointer(nf_ct_l3protos[proto->l3proto], 281 rcu_assign_pointer(nf_ct_l3protos[proto->l3proto],
232 &nf_conntrack_l3proto_generic); 282 &nf_conntrack_l3proto_generic);
233 nf_ct_l3proto_unregister_sysctl(proto);
234 mutex_unlock(&nf_ct_proto_mutex); 283 mutex_unlock(&nf_ct_proto_mutex);
235 284
236 synchronize_rcu(); 285 synchronize_rcu();
286}
287
288void nf_conntrack_l3proto_unregister(struct net *net,
289 struct nf_conntrack_l3proto *proto)
290{
291 if (net == &init_net)
292 nf_conntrack_l3proto_unregister_net(proto);
293
294 nf_ct_l3proto_unregister_sysctl(net, proto);
237 295
238 /* Remove all contrack entries for this protocol */ 296 /* Remove all contrack entries for this protocol */
239 rtnl_lock(); 297 rtnl_lock();
240 for_each_net(net) 298 nf_ct_iterate_cleanup(net, kill_l3proto, proto);
241 nf_ct_iterate_cleanup(net, kill_l3proto, proto);
242 rtnl_unlock(); 299 rtnl_unlock();
243} 300}
244EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister); 301EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister);
245 302
246static int nf_ct_l4proto_register_sysctl(struct nf_conntrack_l4proto *l4proto) 303static struct nf_proto_net *nf_ct_l4proto_net(struct net *net,
304 struct nf_conntrack_l4proto *l4proto)
305{
306 if (l4proto->get_net_proto) {
307 /* statically built-in protocols use static per-net */
308 return l4proto->get_net_proto(net);
309 } else if (l4proto->net_id) {
310 /* ... and loadable protocols use dynamic per-net */
311 return net_generic(net, *l4proto->net_id);
312 }
313 return NULL;
314}
315
316static
317int nf_ct_l4proto_register_sysctl(struct net *net,
318 struct nf_proto_net *pn,
319 struct nf_conntrack_l4proto *l4proto)
247{ 320{
248 int err = 0; 321 int err = 0;
249 322
250#ifdef CONFIG_SYSCTL 323#ifdef CONFIG_SYSCTL
251 if (l4proto->ctl_table != NULL) { 324 if (pn->ctl_table != NULL) {
252 err = nf_ct_register_sysctl(l4proto->ctl_table_header, 325 err = nf_ct_register_sysctl(net,
326 &pn->ctl_table_header,
253 "net/netfilter", 327 "net/netfilter",
254 l4proto->ctl_table, 328 pn->ctl_table);
255 l4proto->ctl_table_users); 329 if (err < 0) {
256 if (err < 0) 330 if (!pn->users) {
257 goto out; 331 kfree(pn->ctl_table);
332 pn->ctl_table = NULL;
333 }
334 }
258 } 335 }
259#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT 336#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
260 if (l4proto->ctl_compat_table != NULL) { 337 if (l4proto->l3proto != AF_INET6 && pn->ctl_compat_table != NULL) {
261 err = nf_ct_register_sysctl(&l4proto->ctl_compat_table_header, 338 if (err < 0) {
339 nf_ct_kfree_compat_sysctl_table(pn);
340 goto out;
341 }
342 err = nf_ct_register_sysctl(net,
343 &pn->ctl_compat_header,
262 "net/ipv4/netfilter", 344 "net/ipv4/netfilter",
263 l4proto->ctl_compat_table, NULL); 345 pn->ctl_compat_table);
264 if (err == 0) 346 if (err == 0)
265 goto out; 347 goto out;
266 nf_ct_unregister_sysctl(l4proto->ctl_table_header, 348
267 l4proto->ctl_table, 349 nf_ct_kfree_compat_sysctl_table(pn);
268 l4proto->ctl_table_users); 350 nf_ct_unregister_sysctl(&pn->ctl_table_header,
351 &pn->ctl_table,
352 pn->users);
269 } 353 }
270#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
271out: 354out:
355#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
272#endif /* CONFIG_SYSCTL */ 356#endif /* CONFIG_SYSCTL */
273 return err; 357 return err;
274} 358}
275 359
276static void nf_ct_l4proto_unregister_sysctl(struct nf_conntrack_l4proto *l4proto) 360static
361void nf_ct_l4proto_unregister_sysctl(struct net *net,
362 struct nf_proto_net *pn,
363 struct nf_conntrack_l4proto *l4proto)
277{ 364{
278#ifdef CONFIG_SYSCTL 365#ifdef CONFIG_SYSCTL
279 if (l4proto->ctl_table_header != NULL && 366 if (pn->ctl_table_header != NULL)
280 *l4proto->ctl_table_header != NULL) 367 nf_ct_unregister_sysctl(&pn->ctl_table_header,
281 nf_ct_unregister_sysctl(l4proto->ctl_table_header, 368 &pn->ctl_table,
282 l4proto->ctl_table, 369 pn->users);
283 l4proto->ctl_table_users); 370
284#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT 371#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
285 if (l4proto->ctl_compat_table_header != NULL) 372 if (l4proto->l3proto != AF_INET6 && pn->ctl_compat_header != NULL)
286 nf_ct_unregister_sysctl(&l4proto->ctl_compat_table_header, 373 nf_ct_unregister_sysctl(&pn->ctl_compat_header,
287 l4proto->ctl_compat_table, NULL); 374 &pn->ctl_compat_table,
375 0);
288#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ 376#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
289#endif /* CONFIG_SYSCTL */ 377#endif /* CONFIG_SYSCTL */
290} 378}
291 379
292/* FIXME: Allow NULL functions and sub in pointers to generic for 380/* FIXME: Allow NULL functions and sub in pointers to generic for
293 them. --RR */ 381 them. --RR */
294int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto) 382static int
383nf_conntrack_l4proto_register_net(struct nf_conntrack_l4proto *l4proto)
295{ 384{
296 int ret = 0; 385 int ret = 0;
297 386
@@ -333,10 +422,6 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
333 goto out_unlock; 422 goto out_unlock;
334 } 423 }
335 424
336 ret = nf_ct_l4proto_register_sysctl(l4proto);
337 if (ret < 0)
338 goto out_unlock;
339
340 l4proto->nla_size = 0; 425 l4proto->nla_size = 0;
341 if (l4proto->nlattr_size) 426 if (l4proto->nlattr_size)
342 l4proto->nla_size += l4proto->nlattr_size(); 427 l4proto->nla_size += l4proto->nlattr_size();
@@ -345,17 +430,48 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
345 430
346 rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto], 431 rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
347 l4proto); 432 l4proto);
348
349out_unlock: 433out_unlock:
350 mutex_unlock(&nf_ct_proto_mutex); 434 mutex_unlock(&nf_ct_proto_mutex);
351 return ret; 435 return ret;
352} 436}
353EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_register);
354 437
355void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto) 438int nf_conntrack_l4proto_register(struct net *net,
439 struct nf_conntrack_l4proto *l4proto)
356{ 440{
357 struct net *net; 441 int ret = 0;
442 struct nf_proto_net *pn = NULL;
358 443
444 if (l4proto->init_net) {
445 ret = l4proto->init_net(net, l4proto->l3proto);
446 if (ret < 0)
447 goto out;
448 }
449
450 pn = nf_ct_l4proto_net(net, l4proto);
451 if (pn == NULL)
452 goto out;
453
454 ret = nf_ct_l4proto_register_sysctl(net, pn, l4proto);
455 if (ret < 0)
456 goto out;
457
458 if (net == &init_net) {
459 ret = nf_conntrack_l4proto_register_net(l4proto);
460 if (ret < 0) {
461 nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
462 goto out;
463 }
464 }
465
466 pn->users++;
467out:
468 return ret;
469}
470EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_register);
471
472static void
473nf_conntrack_l4proto_unregister_net(struct nf_conntrack_l4proto *l4proto)
474{
359 BUG_ON(l4proto->l3proto >= PF_MAX); 475 BUG_ON(l4proto->l3proto >= PF_MAX);
360 476
361 mutex_lock(&nf_ct_proto_mutex); 477 mutex_lock(&nf_ct_proto_mutex);
@@ -365,41 +481,73 @@ void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto)
365 ) != l4proto); 481 ) != l4proto);
366 rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto], 482 rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
367 &nf_conntrack_l4proto_generic); 483 &nf_conntrack_l4proto_generic);
368 nf_ct_l4proto_unregister_sysctl(l4proto);
369 mutex_unlock(&nf_ct_proto_mutex); 484 mutex_unlock(&nf_ct_proto_mutex);
370 485
371 synchronize_rcu(); 486 synchronize_rcu();
487}
488
489void nf_conntrack_l4proto_unregister(struct net *net,
490 struct nf_conntrack_l4proto *l4proto)
491{
492 struct nf_proto_net *pn = NULL;
493
494 if (net == &init_net)
495 nf_conntrack_l4proto_unregister_net(l4proto);
496
497 pn = nf_ct_l4proto_net(net, l4proto);
498 if (pn == NULL)
499 return;
500
501 pn->users--;
502 nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
372 503
373 /* Remove all contrack entries for this protocol */ 504 /* Remove all contrack entries for this protocol */
374 rtnl_lock(); 505 rtnl_lock();
375 for_each_net(net) 506 nf_ct_iterate_cleanup(net, kill_l4proto, l4proto);
376 nf_ct_iterate_cleanup(net, kill_l4proto, l4proto);
377 rtnl_unlock(); 507 rtnl_unlock();
378} 508}
379EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister); 509EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister);
380 510
381int nf_conntrack_proto_init(void) 511int nf_conntrack_proto_init(struct net *net)
382{ 512{
383 unsigned int i; 513 unsigned int i;
384 int err; 514 int err;
515 struct nf_proto_net *pn = nf_ct_l4proto_net(net,
516 &nf_conntrack_l4proto_generic);
385 517
386 err = nf_ct_l4proto_register_sysctl(&nf_conntrack_l4proto_generic); 518 err = nf_conntrack_l4proto_generic.init_net(net,
519 nf_conntrack_l4proto_generic.l3proto);
520 if (err < 0)
521 return err;
522 err = nf_ct_l4proto_register_sysctl(net,
523 pn,
524 &nf_conntrack_l4proto_generic);
387 if (err < 0) 525 if (err < 0)
388 return err; 526 return err;
389 527
390 for (i = 0; i < AF_MAX; i++) 528 if (net == &init_net) {
391 rcu_assign_pointer(nf_ct_l3protos[i], 529 for (i = 0; i < AF_MAX; i++)
392 &nf_conntrack_l3proto_generic); 530 rcu_assign_pointer(nf_ct_l3protos[i],
531 &nf_conntrack_l3proto_generic);
532 }
533
534 pn->users++;
393 return 0; 535 return 0;
394} 536}
395 537
396void nf_conntrack_proto_fini(void) 538void nf_conntrack_proto_fini(struct net *net)
397{ 539{
398 unsigned int i; 540 unsigned int i;
399 541 struct nf_proto_net *pn = nf_ct_l4proto_net(net,
400 nf_ct_l4proto_unregister_sysctl(&nf_conntrack_l4proto_generic); 542 &nf_conntrack_l4proto_generic);
401 543
402 /* free l3proto protocol tables */ 544 pn->users--;
403 for (i = 0; i < PF_MAX; i++) 545 nf_ct_l4proto_unregister_sysctl(net,
404 kfree(nf_ct_protos[i]); 546 pn,
547 &nf_conntrack_l4proto_generic);
548 if (net == &init_net) {
549 /* free l3proto protocol tables */
550 for (i = 0; i < PF_MAX; i++)
551 kfree(nf_ct_protos[i]);
552 }
405} 553}
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index ef706a485be..6535326cf07 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -387,12 +387,9 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
387/* this module per-net specifics */ 387/* this module per-net specifics */
388static int dccp_net_id __read_mostly; 388static int dccp_net_id __read_mostly;
389struct dccp_net { 389struct dccp_net {
390 struct nf_proto_net pn;
390 int dccp_loose; 391 int dccp_loose;
391 unsigned int dccp_timeout[CT_DCCP_MAX + 1]; 392 unsigned int dccp_timeout[CT_DCCP_MAX + 1];
392#ifdef CONFIG_SYSCTL
393 struct ctl_table_header *sysctl_header;
394 struct ctl_table *sysctl_table;
395#endif
396}; 393};
397 394
398static inline struct dccp_net *dccp_pernet(struct net *net) 395static inline struct dccp_net *dccp_pernet(struct net *net)
@@ -715,9 +712,10 @@ static int dccp_nlattr_size(void)
715#include <linux/netfilter/nfnetlink.h> 712#include <linux/netfilter/nfnetlink.h>
716#include <linux/netfilter/nfnetlink_cttimeout.h> 713#include <linux/netfilter/nfnetlink_cttimeout.h>
717 714
718static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data) 715static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
716 struct net *net, void *data)
719{ 717{
720 struct dccp_net *dn = dccp_pernet(&init_net); 718 struct dccp_net *dn = dccp_pernet(net);
721 unsigned int *timeouts = data; 719 unsigned int *timeouts = data;
722 int i; 720 int i;
723 721
@@ -817,6 +815,51 @@ static struct ctl_table dccp_sysctl_table[] = {
817}; 815};
818#endif /* CONFIG_SYSCTL */ 816#endif /* CONFIG_SYSCTL */
819 817
818static int dccp_kmemdup_sysctl_table(struct nf_proto_net *pn,
819 struct dccp_net *dn)
820{
821#ifdef CONFIG_SYSCTL
822 if (pn->ctl_table)
823 return 0;
824
825 pn->ctl_table = kmemdup(dccp_sysctl_table,
826 sizeof(dccp_sysctl_table),
827 GFP_KERNEL);
828 if (!pn->ctl_table)
829 return -ENOMEM;
830
831 pn->ctl_table[0].data = &dn->dccp_timeout[CT_DCCP_REQUEST];
832 pn->ctl_table[1].data = &dn->dccp_timeout[CT_DCCP_RESPOND];
833 pn->ctl_table[2].data = &dn->dccp_timeout[CT_DCCP_PARTOPEN];
834 pn->ctl_table[3].data = &dn->dccp_timeout[CT_DCCP_OPEN];
835 pn->ctl_table[4].data = &dn->dccp_timeout[CT_DCCP_CLOSEREQ];
836 pn->ctl_table[5].data = &dn->dccp_timeout[CT_DCCP_CLOSING];
837 pn->ctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT];
838 pn->ctl_table[7].data = &dn->dccp_loose;
839#endif
840 return 0;
841}
842
843static int dccp_init_net(struct net *net, u_int16_t proto)
844{
845 struct dccp_net *dn = dccp_pernet(net);
846 struct nf_proto_net *pn = &dn->pn;
847
848 if (!pn->users) {
849 /* default values */
850 dn->dccp_loose = 1;
851 dn->dccp_timeout[CT_DCCP_REQUEST] = 2 * DCCP_MSL;
852 dn->dccp_timeout[CT_DCCP_RESPOND] = 4 * DCCP_MSL;
853 dn->dccp_timeout[CT_DCCP_PARTOPEN] = 4 * DCCP_MSL;
854 dn->dccp_timeout[CT_DCCP_OPEN] = 12 * 3600 * HZ;
855 dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ;
856 dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ;
857 dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL;
858 }
859
860 return dccp_kmemdup_sysctl_table(pn, dn);
861}
862
820static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = { 863static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
821 .l3proto = AF_INET, 864 .l3proto = AF_INET,
822 .l4proto = IPPROTO_DCCP, 865 .l4proto = IPPROTO_DCCP,
@@ -847,6 +890,8 @@ static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
847 .nla_policy = dccp_timeout_nla_policy, 890 .nla_policy = dccp_timeout_nla_policy,
848 }, 891 },
849#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 892#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
893 .net_id = &dccp_net_id,
894 .init_net = dccp_init_net,
850}; 895};
851 896
852static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = { 897static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
@@ -879,55 +924,39 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
879 .nla_policy = dccp_timeout_nla_policy, 924 .nla_policy = dccp_timeout_nla_policy,
880 }, 925 },
881#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 926#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
927 .net_id = &dccp_net_id,
928 .init_net = dccp_init_net,
882}; 929};
883 930
884static __net_init int dccp_net_init(struct net *net) 931static __net_init int dccp_net_init(struct net *net)
885{ 932{
886 struct dccp_net *dn = dccp_pernet(net); 933 int ret = 0;
887 934 ret = nf_conntrack_l4proto_register(net,
888 /* default values */ 935 &dccp_proto4);
889 dn->dccp_loose = 1; 936 if (ret < 0) {
890 dn->dccp_timeout[CT_DCCP_REQUEST] = 2 * DCCP_MSL; 937 pr_err("nf_conntrack_l4proto_dccp4 :protocol register failed.\n");
891 dn->dccp_timeout[CT_DCCP_RESPOND] = 4 * DCCP_MSL; 938 goto out;
892 dn->dccp_timeout[CT_DCCP_PARTOPEN] = 4 * DCCP_MSL; 939 }
893 dn->dccp_timeout[CT_DCCP_OPEN] = 12 * 3600 * HZ; 940 ret = nf_conntrack_l4proto_register(net,
894 dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ; 941 &dccp_proto6);
895 dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ; 942 if (ret < 0) {
896 dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL; 943 pr_err("nf_conntrack_l4proto_dccp6 :protocol register failed.\n");
897 944 goto cleanup_dccp4;
898#ifdef CONFIG_SYSCTL
899 dn->sysctl_table = kmemdup(dccp_sysctl_table,
900 sizeof(dccp_sysctl_table), GFP_KERNEL);
901 if (!dn->sysctl_table)
902 return -ENOMEM;
903
904 dn->sysctl_table[0].data = &dn->dccp_timeout[CT_DCCP_REQUEST];
905 dn->sysctl_table[1].data = &dn->dccp_timeout[CT_DCCP_RESPOND];
906 dn->sysctl_table[2].data = &dn->dccp_timeout[CT_DCCP_PARTOPEN];
907 dn->sysctl_table[3].data = &dn->dccp_timeout[CT_DCCP_OPEN];
908 dn->sysctl_table[4].data = &dn->dccp_timeout[CT_DCCP_CLOSEREQ];
909 dn->sysctl_table[5].data = &dn->dccp_timeout[CT_DCCP_CLOSING];
910 dn->sysctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT];
911 dn->sysctl_table[7].data = &dn->dccp_loose;
912
913 dn->sysctl_header = register_net_sysctl(net, "net/netfilter",
914 dn->sysctl_table);
915 if (!dn->sysctl_header) {
916 kfree(dn->sysctl_table);
917 return -ENOMEM;
918 } 945 }
919#endif
920
921 return 0; 946 return 0;
947cleanup_dccp4:
948 nf_conntrack_l4proto_unregister(net,
949 &dccp_proto4);
950out:
951 return ret;
922} 952}
923 953
924static __net_exit void dccp_net_exit(struct net *net) 954static __net_exit void dccp_net_exit(struct net *net)
925{ 955{
926 struct dccp_net *dn = dccp_pernet(net); 956 nf_conntrack_l4proto_unregister(net,
927#ifdef CONFIG_SYSCTL 957 &dccp_proto6);
928 unregister_net_sysctl_table(dn->sysctl_header); 958 nf_conntrack_l4proto_unregister(net,
929 kfree(dn->sysctl_table); 959 &dccp_proto4);
930#endif
931} 960}
932 961
933static struct pernet_operations dccp_net_ops = { 962static struct pernet_operations dccp_net_ops = {
@@ -939,34 +968,12 @@ static struct pernet_operations dccp_net_ops = {
939 968
940static int __init nf_conntrack_proto_dccp_init(void) 969static int __init nf_conntrack_proto_dccp_init(void)
941{ 970{
942 int err; 971 return register_pernet_subsys(&dccp_net_ops);
943
944 err = register_pernet_subsys(&dccp_net_ops);
945 if (err < 0)
946 goto err1;
947
948 err = nf_conntrack_l4proto_register(&dccp_proto4);
949 if (err < 0)
950 goto err2;
951
952 err = nf_conntrack_l4proto_register(&dccp_proto6);
953 if (err < 0)
954 goto err3;
955 return 0;
956
957err3:
958 nf_conntrack_l4proto_unregister(&dccp_proto4);
959err2:
960 unregister_pernet_subsys(&dccp_net_ops);
961err1:
962 return err;
963} 972}
964 973
965static void __exit nf_conntrack_proto_dccp_fini(void) 974static void __exit nf_conntrack_proto_dccp_fini(void)
966{ 975{
967 unregister_pernet_subsys(&dccp_net_ops); 976 unregister_pernet_subsys(&dccp_net_ops);
968 nf_conntrack_l4proto_unregister(&dccp_proto6);
969 nf_conntrack_l4proto_unregister(&dccp_proto4);
970} 977}
971 978
972module_init(nf_conntrack_proto_dccp_init); 979module_init(nf_conntrack_proto_dccp_init);
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index d8923d54b35..d25f2937764 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -14,6 +14,11 @@
14 14
15static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ; 15static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
16 16
17static inline struct nf_generic_net *generic_pernet(struct net *net)
18{
19 return &net->ct.nf_ct_proto.generic;
20}
21
17static bool generic_pkt_to_tuple(const struct sk_buff *skb, 22static bool generic_pkt_to_tuple(const struct sk_buff *skb,
18 unsigned int dataoff, 23 unsigned int dataoff,
19 struct nf_conntrack_tuple *tuple) 24 struct nf_conntrack_tuple *tuple)
@@ -42,7 +47,7 @@ static int generic_print_tuple(struct seq_file *s,
42 47
43static unsigned int *generic_get_timeouts(struct net *net) 48static unsigned int *generic_get_timeouts(struct net *net)
44{ 49{
45 return &nf_ct_generic_timeout; 50 return &(generic_pernet(net)->timeout);
46} 51}
47 52
48/* Returns verdict for packet, or -1 for invalid. */ 53/* Returns verdict for packet, or -1 for invalid. */
@@ -70,16 +75,18 @@ static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
70#include <linux/netfilter/nfnetlink.h> 75#include <linux/netfilter/nfnetlink.h>
71#include <linux/netfilter/nfnetlink_cttimeout.h> 76#include <linux/netfilter/nfnetlink_cttimeout.h>
72 77
73static int generic_timeout_nlattr_to_obj(struct nlattr *tb[], void *data) 78static int generic_timeout_nlattr_to_obj(struct nlattr *tb[],
79 struct net *net, void *data)
74{ 80{
75 unsigned int *timeout = data; 81 unsigned int *timeout = data;
82 struct nf_generic_net *gn = generic_pernet(net);
76 83
77 if (tb[CTA_TIMEOUT_GENERIC_TIMEOUT]) 84 if (tb[CTA_TIMEOUT_GENERIC_TIMEOUT])
78 *timeout = 85 *timeout =
79 ntohl(nla_get_be32(tb[CTA_TIMEOUT_GENERIC_TIMEOUT])) * HZ; 86 ntohl(nla_get_be32(tb[CTA_TIMEOUT_GENERIC_TIMEOUT])) * HZ;
80 else { 87 else {
81 /* Set default generic timeout. */ 88 /* Set default generic timeout. */
82 *timeout = nf_ct_generic_timeout; 89 *timeout = gn->timeout;
83 } 90 }
84 91
85 return 0; 92 return 0;
@@ -106,11 +113,9 @@ generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = {
106#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 113#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
107 114
108#ifdef CONFIG_SYSCTL 115#ifdef CONFIG_SYSCTL
109static struct ctl_table_header *generic_sysctl_header;
110static struct ctl_table generic_sysctl_table[] = { 116static struct ctl_table generic_sysctl_table[] = {
111 { 117 {
112 .procname = "nf_conntrack_generic_timeout", 118 .procname = "nf_conntrack_generic_timeout",
113 .data = &nf_ct_generic_timeout,
114 .maxlen = sizeof(unsigned int), 119 .maxlen = sizeof(unsigned int),
115 .mode = 0644, 120 .mode = 0644,
116 .proc_handler = proc_dointvec_jiffies, 121 .proc_handler = proc_dointvec_jiffies,
@@ -121,7 +126,6 @@ static struct ctl_table generic_sysctl_table[] = {
121static struct ctl_table generic_compat_sysctl_table[] = { 126static struct ctl_table generic_compat_sysctl_table[] = {
122 { 127 {
123 .procname = "ip_conntrack_generic_timeout", 128 .procname = "ip_conntrack_generic_timeout",
124 .data = &nf_ct_generic_timeout,
125 .maxlen = sizeof(unsigned int), 129 .maxlen = sizeof(unsigned int),
126 .mode = 0644, 130 .mode = 0644,
127 .proc_handler = proc_dointvec_jiffies, 131 .proc_handler = proc_dointvec_jiffies,
@@ -131,6 +135,62 @@ static struct ctl_table generic_compat_sysctl_table[] = {
131#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ 135#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
132#endif /* CONFIG_SYSCTL */ 136#endif /* CONFIG_SYSCTL */
133 137
138static int generic_kmemdup_sysctl_table(struct nf_proto_net *pn,
139 struct nf_generic_net *gn)
140{
141#ifdef CONFIG_SYSCTL
142 pn->ctl_table = kmemdup(generic_sysctl_table,
143 sizeof(generic_sysctl_table),
144 GFP_KERNEL);
145 if (!pn->ctl_table)
146 return -ENOMEM;
147
148 pn->ctl_table[0].data = &gn->timeout;
149#endif
150 return 0;
151}
152
153static int generic_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
154 struct nf_generic_net *gn)
155{
156#ifdef CONFIG_SYSCTL
157#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
158 pn->ctl_compat_table = kmemdup(generic_compat_sysctl_table,
159 sizeof(generic_compat_sysctl_table),
160 GFP_KERNEL);
161 if (!pn->ctl_compat_table)
162 return -ENOMEM;
163
164 pn->ctl_compat_table[0].data = &gn->timeout;
165#endif
166#endif
167 return 0;
168}
169
170static int generic_init_net(struct net *net, u_int16_t proto)
171{
172 int ret;
173 struct nf_generic_net *gn = generic_pernet(net);
174 struct nf_proto_net *pn = &gn->pn;
175
176 gn->timeout = nf_ct_generic_timeout;
177
178 ret = generic_kmemdup_compat_sysctl_table(pn, gn);
179 if (ret < 0)
180 return ret;
181
182 ret = generic_kmemdup_sysctl_table(pn, gn);
183 if (ret < 0)
184 nf_ct_kfree_compat_sysctl_table(pn);
185
186 return ret;
187}
188
189static struct nf_proto_net *generic_get_net_proto(struct net *net)
190{
191 return &net->ct.nf_ct_proto.generic.pn;
192}
193
134struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly = 194struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly =
135{ 195{
136 .l3proto = PF_UNSPEC, 196 .l3proto = PF_UNSPEC,
@@ -151,11 +211,6 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly =
151 .nla_policy = generic_timeout_nla_policy, 211 .nla_policy = generic_timeout_nla_policy,
152 }, 212 },
153#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 213#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
154#ifdef CONFIG_SYSCTL 214 .init_net = generic_init_net,
155 .ctl_table_header = &generic_sysctl_header, 215 .get_net_proto = generic_get_net_proto,
156 .ctl_table = generic_sysctl_table,
157#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
158 .ctl_compat_table = generic_compat_sysctl_table,
159#endif
160#endif
161}; 216};
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 4bf6b4e4b77..b09b7af7f6f 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -54,13 +54,20 @@ static unsigned int gre_timeouts[GRE_CT_MAX] = {
54 54
55static int proto_gre_net_id __read_mostly; 55static int proto_gre_net_id __read_mostly;
56struct netns_proto_gre { 56struct netns_proto_gre {
57 struct nf_proto_net nf;
57 rwlock_t keymap_lock; 58 rwlock_t keymap_lock;
58 struct list_head keymap_list; 59 struct list_head keymap_list;
60 unsigned int gre_timeouts[GRE_CT_MAX];
59}; 61};
60 62
63static inline struct netns_proto_gre *gre_pernet(struct net *net)
64{
65 return net_generic(net, proto_gre_net_id);
66}
67
61void nf_ct_gre_keymap_flush(struct net *net) 68void nf_ct_gre_keymap_flush(struct net *net)
62{ 69{
63 struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id); 70 struct netns_proto_gre *net_gre = gre_pernet(net);
64 struct nf_ct_gre_keymap *km, *tmp; 71 struct nf_ct_gre_keymap *km, *tmp;
65 72
66 write_lock_bh(&net_gre->keymap_lock); 73 write_lock_bh(&net_gre->keymap_lock);
@@ -85,7 +92,7 @@ static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km,
85/* look up the source key for a given tuple */ 92/* look up the source key for a given tuple */
86static __be16 gre_keymap_lookup(struct net *net, struct nf_conntrack_tuple *t) 93static __be16 gre_keymap_lookup(struct net *net, struct nf_conntrack_tuple *t)
87{ 94{
88 struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id); 95 struct netns_proto_gre *net_gre = gre_pernet(net);
89 struct nf_ct_gre_keymap *km; 96 struct nf_ct_gre_keymap *km;
90 __be16 key = 0; 97 __be16 key = 0;
91 98
@@ -109,11 +116,11 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
109 struct nf_conntrack_tuple *t) 116 struct nf_conntrack_tuple *t)
110{ 117{
111 struct net *net = nf_ct_net(ct); 118 struct net *net = nf_ct_net(ct);
112 struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id); 119 struct netns_proto_gre *net_gre = gre_pernet(net);
113 struct nf_conn_help *help = nfct_help(ct); 120 struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct);
114 struct nf_ct_gre_keymap **kmp, *km; 121 struct nf_ct_gre_keymap **kmp, *km;
115 122
116 kmp = &help->help.ct_pptp_info.keymap[dir]; 123 kmp = &ct_pptp_info->keymap[dir];
117 if (*kmp) { 124 if (*kmp) {
118 /* check whether it's a retransmission */ 125 /* check whether it's a retransmission */
119 read_lock_bh(&net_gre->keymap_lock); 126 read_lock_bh(&net_gre->keymap_lock);
@@ -150,20 +157,20 @@ EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_add);
150void nf_ct_gre_keymap_destroy(struct nf_conn *ct) 157void nf_ct_gre_keymap_destroy(struct nf_conn *ct)
151{ 158{
152 struct net *net = nf_ct_net(ct); 159 struct net *net = nf_ct_net(ct);
153 struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id); 160 struct netns_proto_gre *net_gre = gre_pernet(net);
154 struct nf_conn_help *help = nfct_help(ct); 161 struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct);
155 enum ip_conntrack_dir dir; 162 enum ip_conntrack_dir dir;
156 163
157 pr_debug("entering for ct %p\n", ct); 164 pr_debug("entering for ct %p\n", ct);
158 165
159 write_lock_bh(&net_gre->keymap_lock); 166 write_lock_bh(&net_gre->keymap_lock);
160 for (dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++) { 167 for (dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++) {
161 if (help->help.ct_pptp_info.keymap[dir]) { 168 if (ct_pptp_info->keymap[dir]) {
162 pr_debug("removing %p from list\n", 169 pr_debug("removing %p from list\n",
163 help->help.ct_pptp_info.keymap[dir]); 170 ct_pptp_info->keymap[dir]);
164 list_del(&help->help.ct_pptp_info.keymap[dir]->list); 171 list_del(&ct_pptp_info->keymap[dir]->list);
165 kfree(help->help.ct_pptp_info.keymap[dir]); 172 kfree(ct_pptp_info->keymap[dir]);
166 help->help.ct_pptp_info.keymap[dir] = NULL; 173 ct_pptp_info->keymap[dir] = NULL;
167 } 174 }
168 } 175 }
169 write_unlock_bh(&net_gre->keymap_lock); 176 write_unlock_bh(&net_gre->keymap_lock);
@@ -237,7 +244,7 @@ static int gre_print_conntrack(struct seq_file *s, struct nf_conn *ct)
237 244
238static unsigned int *gre_get_timeouts(struct net *net) 245static unsigned int *gre_get_timeouts(struct net *net)
239{ 246{
240 return gre_timeouts; 247 return gre_pernet(net)->gre_timeouts;
241} 248}
242 249
243/* Returns verdict for packet, and may modify conntrack */ 250/* Returns verdict for packet, and may modify conntrack */
@@ -297,13 +304,15 @@ static void gre_destroy(struct nf_conn *ct)
297#include <linux/netfilter/nfnetlink.h> 304#include <linux/netfilter/nfnetlink.h>
298#include <linux/netfilter/nfnetlink_cttimeout.h> 305#include <linux/netfilter/nfnetlink_cttimeout.h>
299 306
300static int gre_timeout_nlattr_to_obj(struct nlattr *tb[], void *data) 307static int gre_timeout_nlattr_to_obj(struct nlattr *tb[],
308 struct net *net, void *data)
301{ 309{
302 unsigned int *timeouts = data; 310 unsigned int *timeouts = data;
311 struct netns_proto_gre *net_gre = gre_pernet(net);
303 312
304 /* set default timeouts for GRE. */ 313 /* set default timeouts for GRE. */
305 timeouts[GRE_CT_UNREPLIED] = gre_timeouts[GRE_CT_UNREPLIED]; 314 timeouts[GRE_CT_UNREPLIED] = net_gre->gre_timeouts[GRE_CT_UNREPLIED];
306 timeouts[GRE_CT_REPLIED] = gre_timeouts[GRE_CT_REPLIED]; 315 timeouts[GRE_CT_REPLIED] = net_gre->gre_timeouts[GRE_CT_REPLIED];
307 316
308 if (tb[CTA_TIMEOUT_GRE_UNREPLIED]) { 317 if (tb[CTA_TIMEOUT_GRE_UNREPLIED]) {
309 timeouts[GRE_CT_UNREPLIED] = 318 timeouts[GRE_CT_UNREPLIED] =
@@ -339,6 +348,19 @@ gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = {
339}; 348};
340#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 349#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
341 350
351static int gre_init_net(struct net *net, u_int16_t proto)
352{
353 struct netns_proto_gre *net_gre = gre_pernet(net);
354 int i;
355
356 rwlock_init(&net_gre->keymap_lock);
357 INIT_LIST_HEAD(&net_gre->keymap_list);
358 for (i = 0; i < GRE_CT_MAX; i++)
359 net_gre->gre_timeouts[i] = gre_timeouts[i];
360
361 return 0;
362}
363
342/* protocol helper struct */ 364/* protocol helper struct */
343static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = { 365static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
344 .l3proto = AF_INET, 366 .l3proto = AF_INET,
@@ -368,20 +390,22 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
368 .nla_policy = gre_timeout_nla_policy, 390 .nla_policy = gre_timeout_nla_policy,
369 }, 391 },
370#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 392#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
393 .net_id = &proto_gre_net_id,
394 .init_net = gre_init_net,
371}; 395};
372 396
373static int proto_gre_net_init(struct net *net) 397static int proto_gre_net_init(struct net *net)
374{ 398{
375 struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id); 399 int ret = 0;
376 400 ret = nf_conntrack_l4proto_register(net, &nf_conntrack_l4proto_gre4);
377 rwlock_init(&net_gre->keymap_lock); 401 if (ret < 0)
378 INIT_LIST_HEAD(&net_gre->keymap_list); 402 pr_err("nf_conntrack_l4proto_gre4 :protocol register failed.\n");
379 403 return ret;
380 return 0;
381} 404}
382 405
383static void proto_gre_net_exit(struct net *net) 406static void proto_gre_net_exit(struct net *net)
384{ 407{
408 nf_conntrack_l4proto_unregister(net, &nf_conntrack_l4proto_gre4);
385 nf_ct_gre_keymap_flush(net); 409 nf_ct_gre_keymap_flush(net);
386} 410}
387 411
@@ -394,20 +418,11 @@ static struct pernet_operations proto_gre_net_ops = {
394 418
395static int __init nf_ct_proto_gre_init(void) 419static int __init nf_ct_proto_gre_init(void)
396{ 420{
397 int rv; 421 return register_pernet_subsys(&proto_gre_net_ops);
398
399 rv = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_gre4);
400 if (rv < 0)
401 return rv;
402 rv = register_pernet_subsys(&proto_gre_net_ops);
403 if (rv < 0)
404 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_gre4);
405 return rv;
406} 422}
407 423
408static void __exit nf_ct_proto_gre_fini(void) 424static void __exit nf_ct_proto_gre_fini(void)
409{ 425{
410 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_gre4);
411 unregister_pernet_subsys(&proto_gre_net_ops); 426 unregister_pernet_subsys(&proto_gre_net_ops);
412} 427}
413 428
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 996db2fa21f..c746d61f83e 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -127,6 +127,17 @@ static const u8 sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = {
127 } 127 }
128}; 128};
129 129
130static int sctp_net_id __read_mostly;
131struct sctp_net {
132 struct nf_proto_net pn;
133 unsigned int timeouts[SCTP_CONNTRACK_MAX];
134};
135
136static inline struct sctp_net *sctp_pernet(struct net *net)
137{
138 return net_generic(net, sctp_net_id);
139}
140
130static bool sctp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, 141static bool sctp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
131 struct nf_conntrack_tuple *tuple) 142 struct nf_conntrack_tuple *tuple)
132{ 143{
@@ -281,7 +292,7 @@ static int sctp_new_state(enum ip_conntrack_dir dir,
281 292
282static unsigned int *sctp_get_timeouts(struct net *net) 293static unsigned int *sctp_get_timeouts(struct net *net)
283{ 294{
284 return sctp_timeouts; 295 return sctp_pernet(net)->timeouts;
285} 296}
286 297
287/* Returns verdict for packet, or -NF_ACCEPT for invalid. */ 298/* Returns verdict for packet, or -NF_ACCEPT for invalid. */
@@ -551,14 +562,16 @@ static int sctp_nlattr_size(void)
551#include <linux/netfilter/nfnetlink.h> 562#include <linux/netfilter/nfnetlink.h>
552#include <linux/netfilter/nfnetlink_cttimeout.h> 563#include <linux/netfilter/nfnetlink_cttimeout.h>
553 564
554static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data) 565static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[],
566 struct net *net, void *data)
555{ 567{
556 unsigned int *timeouts = data; 568 unsigned int *timeouts = data;
569 struct sctp_net *sn = sctp_pernet(net);
557 int i; 570 int i;
558 571
559 /* set default SCTP timeouts. */ 572 /* set default SCTP timeouts. */
560 for (i=0; i<SCTP_CONNTRACK_MAX; i++) 573 for (i=0; i<SCTP_CONNTRACK_MAX; i++)
561 timeouts[i] = sctp_timeouts[i]; 574 timeouts[i] = sn->timeouts[i];
562 575
563 /* there's a 1:1 mapping between attributes and protocol states. */ 576 /* there's a 1:1 mapping between attributes and protocol states. */
564 for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) { 577 for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) {
@@ -599,54 +612,45 @@ sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
599 612
600 613
601#ifdef CONFIG_SYSCTL 614#ifdef CONFIG_SYSCTL
602static unsigned int sctp_sysctl_table_users;
603static struct ctl_table_header *sctp_sysctl_header;
604static struct ctl_table sctp_sysctl_table[] = { 615static struct ctl_table sctp_sysctl_table[] = {
605 { 616 {
606 .procname = "nf_conntrack_sctp_timeout_closed", 617 .procname = "nf_conntrack_sctp_timeout_closed",
607 .data = &sctp_timeouts[SCTP_CONNTRACK_CLOSED],
608 .maxlen = sizeof(unsigned int), 618 .maxlen = sizeof(unsigned int),
609 .mode = 0644, 619 .mode = 0644,
610 .proc_handler = proc_dointvec_jiffies, 620 .proc_handler = proc_dointvec_jiffies,
611 }, 621 },
612 { 622 {
613 .procname = "nf_conntrack_sctp_timeout_cookie_wait", 623 .procname = "nf_conntrack_sctp_timeout_cookie_wait",
614 .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_WAIT],
615 .maxlen = sizeof(unsigned int), 624 .maxlen = sizeof(unsigned int),
616 .mode = 0644, 625 .mode = 0644,
617 .proc_handler = proc_dointvec_jiffies, 626 .proc_handler = proc_dointvec_jiffies,
618 }, 627 },
619 { 628 {
620 .procname = "nf_conntrack_sctp_timeout_cookie_echoed", 629 .procname = "nf_conntrack_sctp_timeout_cookie_echoed",
621 .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_ECHOED],
622 .maxlen = sizeof(unsigned int), 630 .maxlen = sizeof(unsigned int),
623 .mode = 0644, 631 .mode = 0644,
624 .proc_handler = proc_dointvec_jiffies, 632 .proc_handler = proc_dointvec_jiffies,
625 }, 633 },
626 { 634 {
627 .procname = "nf_conntrack_sctp_timeout_established", 635 .procname = "nf_conntrack_sctp_timeout_established",
628 .data = &sctp_timeouts[SCTP_CONNTRACK_ESTABLISHED],
629 .maxlen = sizeof(unsigned int), 636 .maxlen = sizeof(unsigned int),
630 .mode = 0644, 637 .mode = 0644,
631 .proc_handler = proc_dointvec_jiffies, 638 .proc_handler = proc_dointvec_jiffies,
632 }, 639 },
633 { 640 {
634 .procname = "nf_conntrack_sctp_timeout_shutdown_sent", 641 .procname = "nf_conntrack_sctp_timeout_shutdown_sent",
635 .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT],
636 .maxlen = sizeof(unsigned int), 642 .maxlen = sizeof(unsigned int),
637 .mode = 0644, 643 .mode = 0644,
638 .proc_handler = proc_dointvec_jiffies, 644 .proc_handler = proc_dointvec_jiffies,
639 }, 645 },
640 { 646 {
641 .procname = "nf_conntrack_sctp_timeout_shutdown_recd", 647 .procname = "nf_conntrack_sctp_timeout_shutdown_recd",
642 .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD],
643 .maxlen = sizeof(unsigned int), 648 .maxlen = sizeof(unsigned int),
644 .mode = 0644, 649 .mode = 0644,
645 .proc_handler = proc_dointvec_jiffies, 650 .proc_handler = proc_dointvec_jiffies,
646 }, 651 },
647 { 652 {
648 .procname = "nf_conntrack_sctp_timeout_shutdown_ack_sent", 653 .procname = "nf_conntrack_sctp_timeout_shutdown_ack_sent",
649 .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT],
650 .maxlen = sizeof(unsigned int), 654 .maxlen = sizeof(unsigned int),
651 .mode = 0644, 655 .mode = 0644,
652 .proc_handler = proc_dointvec_jiffies, 656 .proc_handler = proc_dointvec_jiffies,
@@ -658,49 +662,42 @@ static struct ctl_table sctp_sysctl_table[] = {
658static struct ctl_table sctp_compat_sysctl_table[] = { 662static struct ctl_table sctp_compat_sysctl_table[] = {
659 { 663 {
660 .procname = "ip_conntrack_sctp_timeout_closed", 664 .procname = "ip_conntrack_sctp_timeout_closed",
661 .data = &sctp_timeouts[SCTP_CONNTRACK_CLOSED],
662 .maxlen = sizeof(unsigned int), 665 .maxlen = sizeof(unsigned int),
663 .mode = 0644, 666 .mode = 0644,
664 .proc_handler = proc_dointvec_jiffies, 667 .proc_handler = proc_dointvec_jiffies,
665 }, 668 },
666 { 669 {
667 .procname = "ip_conntrack_sctp_timeout_cookie_wait", 670 .procname = "ip_conntrack_sctp_timeout_cookie_wait",
668 .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_WAIT],
669 .maxlen = sizeof(unsigned int), 671 .maxlen = sizeof(unsigned int),
670 .mode = 0644, 672 .mode = 0644,
671 .proc_handler = proc_dointvec_jiffies, 673 .proc_handler = proc_dointvec_jiffies,
672 }, 674 },
673 { 675 {
674 .procname = "ip_conntrack_sctp_timeout_cookie_echoed", 676 .procname = "ip_conntrack_sctp_timeout_cookie_echoed",
675 .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_ECHOED],
676 .maxlen = sizeof(unsigned int), 677 .maxlen = sizeof(unsigned int),
677 .mode = 0644, 678 .mode = 0644,
678 .proc_handler = proc_dointvec_jiffies, 679 .proc_handler = proc_dointvec_jiffies,
679 }, 680 },
680 { 681 {
681 .procname = "ip_conntrack_sctp_timeout_established", 682 .procname = "ip_conntrack_sctp_timeout_established",
682 .data = &sctp_timeouts[SCTP_CONNTRACK_ESTABLISHED],
683 .maxlen = sizeof(unsigned int), 683 .maxlen = sizeof(unsigned int),
684 .mode = 0644, 684 .mode = 0644,
685 .proc_handler = proc_dointvec_jiffies, 685 .proc_handler = proc_dointvec_jiffies,
686 }, 686 },
687 { 687 {
688 .procname = "ip_conntrack_sctp_timeout_shutdown_sent", 688 .procname = "ip_conntrack_sctp_timeout_shutdown_sent",
689 .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT],
690 .maxlen = sizeof(unsigned int), 689 .maxlen = sizeof(unsigned int),
691 .mode = 0644, 690 .mode = 0644,
692 .proc_handler = proc_dointvec_jiffies, 691 .proc_handler = proc_dointvec_jiffies,
693 }, 692 },
694 { 693 {
695 .procname = "ip_conntrack_sctp_timeout_shutdown_recd", 694 .procname = "ip_conntrack_sctp_timeout_shutdown_recd",
696 .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD],
697 .maxlen = sizeof(unsigned int), 695 .maxlen = sizeof(unsigned int),
698 .mode = 0644, 696 .mode = 0644,
699 .proc_handler = proc_dointvec_jiffies, 697 .proc_handler = proc_dointvec_jiffies,
700 }, 698 },
701 { 699 {
702 .procname = "ip_conntrack_sctp_timeout_shutdown_ack_sent", 700 .procname = "ip_conntrack_sctp_timeout_shutdown_ack_sent",
703 .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT],
704 .maxlen = sizeof(unsigned int), 701 .maxlen = sizeof(unsigned int),
705 .mode = 0644, 702 .mode = 0644,
706 .proc_handler = proc_dointvec_jiffies, 703 .proc_handler = proc_dointvec_jiffies,
@@ -710,6 +707,80 @@ static struct ctl_table sctp_compat_sysctl_table[] = {
710#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ 707#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
711#endif 708#endif
712 709
710static int sctp_kmemdup_sysctl_table(struct nf_proto_net *pn,
711 struct sctp_net *sn)
712{
713#ifdef CONFIG_SYSCTL
714 if (pn->ctl_table)
715 return 0;
716
717 pn->ctl_table = kmemdup(sctp_sysctl_table,
718 sizeof(sctp_sysctl_table),
719 GFP_KERNEL);
720 if (!pn->ctl_table)
721 return -ENOMEM;
722
723 pn->ctl_table[0].data = &sn->timeouts[SCTP_CONNTRACK_CLOSED];
724 pn->ctl_table[1].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_WAIT];
725 pn->ctl_table[2].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_ECHOED];
726 pn->ctl_table[3].data = &sn->timeouts[SCTP_CONNTRACK_ESTABLISHED];
727 pn->ctl_table[4].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT];
728 pn->ctl_table[5].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD];
729 pn->ctl_table[6].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT];
730#endif
731 return 0;
732}
733
734static int sctp_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
735 struct sctp_net *sn)
736{
737#ifdef CONFIG_SYSCTL
738#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
739 pn->ctl_compat_table = kmemdup(sctp_compat_sysctl_table,
740 sizeof(sctp_compat_sysctl_table),
741 GFP_KERNEL);
742 if (!pn->ctl_compat_table)
743 return -ENOMEM;
744
745 pn->ctl_compat_table[0].data = &sn->timeouts[SCTP_CONNTRACK_CLOSED];
746 pn->ctl_compat_table[1].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_WAIT];
747 pn->ctl_compat_table[2].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_ECHOED];
748 pn->ctl_compat_table[3].data = &sn->timeouts[SCTP_CONNTRACK_ESTABLISHED];
749 pn->ctl_compat_table[4].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT];
750 pn->ctl_compat_table[5].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD];
751 pn->ctl_compat_table[6].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT];
752#endif
753#endif
754 return 0;
755}
756
757static int sctp_init_net(struct net *net, u_int16_t proto)
758{
759 int ret;
760 struct sctp_net *sn = sctp_pernet(net);
761 struct nf_proto_net *pn = &sn->pn;
762
763 if (!pn->users) {
764 int i;
765
766 for (i = 0; i < SCTP_CONNTRACK_MAX; i++)
767 sn->timeouts[i] = sctp_timeouts[i];
768 }
769
770 if (proto == AF_INET) {
771 ret = sctp_kmemdup_compat_sysctl_table(pn, sn);
772 if (ret < 0)
773 return ret;
774
775 ret = sctp_kmemdup_sysctl_table(pn, sn);
776 if (ret < 0)
777 nf_ct_kfree_compat_sysctl_table(pn);
778 } else
779 ret = sctp_kmemdup_sysctl_table(pn, sn);
780
781 return ret;
782}
783
713static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = { 784static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
714 .l3proto = PF_INET, 785 .l3proto = PF_INET,
715 .l4proto = IPPROTO_SCTP, 786 .l4proto = IPPROTO_SCTP,
@@ -740,14 +811,8 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
740 .nla_policy = sctp_timeout_nla_policy, 811 .nla_policy = sctp_timeout_nla_policy,
741 }, 812 },
742#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 813#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
743#ifdef CONFIG_SYSCTL 814 .net_id = &sctp_net_id,
744 .ctl_table_users = &sctp_sysctl_table_users, 815 .init_net = sctp_init_net,
745 .ctl_table_header = &sctp_sysctl_header,
746 .ctl_table = sctp_sysctl_table,
747#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
748 .ctl_compat_table = sctp_compat_sysctl_table,
749#endif
750#endif
751}; 816};
752 817
753static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = { 818static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
@@ -780,40 +845,58 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
780 }, 845 },
781#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 846#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
782#endif 847#endif
783#ifdef CONFIG_SYSCTL 848 .net_id = &sctp_net_id,
784 .ctl_table_users = &sctp_sysctl_table_users, 849 .init_net = sctp_init_net,
785 .ctl_table_header = &sctp_sysctl_header,
786 .ctl_table = sctp_sysctl_table,
787#endif
788}; 850};
789 851
790static int __init nf_conntrack_proto_sctp_init(void) 852static int sctp_net_init(struct net *net)
791{ 853{
792 int ret; 854 int ret = 0;
793 855
794 ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp4); 856 ret = nf_conntrack_l4proto_register(net,
795 if (ret) { 857 &nf_conntrack_l4proto_sctp4);
796 pr_err("nf_conntrack_l4proto_sctp4: protocol register failed\n"); 858 if (ret < 0) {
859 pr_err("nf_conntrack_l4proto_sctp4 :protocol register failed.\n");
797 goto out; 860 goto out;
798 } 861 }
799 ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp6); 862 ret = nf_conntrack_l4proto_register(net,
800 if (ret) { 863 &nf_conntrack_l4proto_sctp6);
801 pr_err("nf_conntrack_l4proto_sctp6: protocol register failed\n"); 864 if (ret < 0) {
865 pr_err("nf_conntrack_l4proto_sctp6 :protocol register failed.\n");
802 goto cleanup_sctp4; 866 goto cleanup_sctp4;
803 } 867 }
868 return 0;
804 869
870cleanup_sctp4:
871 nf_conntrack_l4proto_unregister(net,
872 &nf_conntrack_l4proto_sctp4);
873out:
805 return ret; 874 return ret;
875}
806 876
807 cleanup_sctp4: 877static void sctp_net_exit(struct net *net)
808 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4); 878{
809 out: 879 nf_conntrack_l4proto_unregister(net,
810 return ret; 880 &nf_conntrack_l4proto_sctp6);
881 nf_conntrack_l4proto_unregister(net,
882 &nf_conntrack_l4proto_sctp4);
883}
884
885static struct pernet_operations sctp_net_ops = {
886 .init = sctp_net_init,
887 .exit = sctp_net_exit,
888 .id = &sctp_net_id,
889 .size = sizeof(struct sctp_net),
890};
891
892static int __init nf_conntrack_proto_sctp_init(void)
893{
894 return register_pernet_subsys(&sctp_net_ops);
811} 895}
812 896
813static void __exit nf_conntrack_proto_sctp_fini(void) 897static void __exit nf_conntrack_proto_sctp_fini(void)
814{ 898{
815 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp6); 899 unregister_pernet_subsys(&sctp_net_ops);
816 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4);
817} 900}
818 901
819module_init(nf_conntrack_proto_sctp_init); 902module_init(nf_conntrack_proto_sctp_init);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 21ff1a99f53..a5ac11ebef3 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -270,6 +270,11 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
270 } 270 }
271}; 271};
272 272
273static inline struct nf_tcp_net *tcp_pernet(struct net *net)
274{
275 return &net->ct.nf_ct_proto.tcp;
276}
277
273static bool tcp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, 278static bool tcp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
274 struct nf_conntrack_tuple *tuple) 279 struct nf_conntrack_tuple *tuple)
275{ 280{
@@ -516,6 +521,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
516 u_int8_t pf) 521 u_int8_t pf)
517{ 522{
518 struct net *net = nf_ct_net(ct); 523 struct net *net = nf_ct_net(ct);
524 struct nf_tcp_net *tn = tcp_pernet(net);
519 struct ip_ct_tcp_state *sender = &state->seen[dir]; 525 struct ip_ct_tcp_state *sender = &state->seen[dir];
520 struct ip_ct_tcp_state *receiver = &state->seen[!dir]; 526 struct ip_ct_tcp_state *receiver = &state->seen[!dir];
521 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; 527 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
@@ -720,7 +726,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
720 } else { 726 } else {
721 res = false; 727 res = false;
722 if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL || 728 if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL ||
723 nf_ct_tcp_be_liberal) 729 tn->tcp_be_liberal)
724 res = true; 730 res = true;
725 if (!res && LOG_INVALID(net, IPPROTO_TCP)) 731 if (!res && LOG_INVALID(net, IPPROTO_TCP))
726 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 732 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
@@ -815,7 +821,7 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
815 821
816static unsigned int *tcp_get_timeouts(struct net *net) 822static unsigned int *tcp_get_timeouts(struct net *net)
817{ 823{
818 return tcp_timeouts; 824 return tcp_pernet(net)->timeouts;
819} 825}
820 826
821/* Returns verdict for packet, or -1 for invalid. */ 827/* Returns verdict for packet, or -1 for invalid. */
@@ -828,6 +834,7 @@ static int tcp_packet(struct nf_conn *ct,
828 unsigned int *timeouts) 834 unsigned int *timeouts)
829{ 835{
830 struct net *net = nf_ct_net(ct); 836 struct net *net = nf_ct_net(ct);
837 struct nf_tcp_net *tn = tcp_pernet(net);
831 struct nf_conntrack_tuple *tuple; 838 struct nf_conntrack_tuple *tuple;
832 enum tcp_conntrack new_state, old_state; 839 enum tcp_conntrack new_state, old_state;
833 enum ip_conntrack_dir dir; 840 enum ip_conntrack_dir dir;
@@ -1020,7 +1027,7 @@ static int tcp_packet(struct nf_conn *ct,
1020 && new_state == TCP_CONNTRACK_FIN_WAIT) 1027 && new_state == TCP_CONNTRACK_FIN_WAIT)
1021 ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT; 1028 ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
1022 1029
1023 if (ct->proto.tcp.retrans >= nf_ct_tcp_max_retrans && 1030 if (ct->proto.tcp.retrans >= tn->tcp_max_retrans &&
1024 timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS]) 1031 timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
1025 timeout = timeouts[TCP_CONNTRACK_RETRANS]; 1032 timeout = timeouts[TCP_CONNTRACK_RETRANS];
1026 else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) & 1033 else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
@@ -1065,6 +1072,8 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
1065 enum tcp_conntrack new_state; 1072 enum tcp_conntrack new_state;
1066 const struct tcphdr *th; 1073 const struct tcphdr *th;
1067 struct tcphdr _tcph; 1074 struct tcphdr _tcph;
1075 struct net *net = nf_ct_net(ct);
1076 struct nf_tcp_net *tn = tcp_pernet(net);
1068 const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0]; 1077 const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
1069 const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1]; 1078 const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
1070 1079
@@ -1093,7 +1102,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
1093 ct->proto.tcp.seen[0].td_end; 1102 ct->proto.tcp.seen[0].td_end;
1094 1103
1095 tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]); 1104 tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
1096 } else if (nf_ct_tcp_loose == 0) { 1105 } else if (tn->tcp_loose == 0) {
1097 /* Don't try to pick up connections. */ 1106 /* Don't try to pick up connections. */
1098 return false; 1107 return false;
1099 } else { 1108 } else {
@@ -1251,14 +1260,16 @@ static int tcp_nlattr_tuple_size(void)
1251#include <linux/netfilter/nfnetlink.h> 1260#include <linux/netfilter/nfnetlink.h>
1252#include <linux/netfilter/nfnetlink_cttimeout.h> 1261#include <linux/netfilter/nfnetlink_cttimeout.h>
1253 1262
1254static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data) 1263static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
1264 struct net *net, void *data)
1255{ 1265{
1256 unsigned int *timeouts = data; 1266 unsigned int *timeouts = data;
1267 struct nf_tcp_net *tn = tcp_pernet(net);
1257 int i; 1268 int i;
1258 1269
1259 /* set default TCP timeouts. */ 1270 /* set default TCP timeouts. */
1260 for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++) 1271 for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++)
1261 timeouts[i] = tcp_timeouts[i]; 1272 timeouts[i] = tn->timeouts[i];
1262 1273
1263 if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) { 1274 if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) {
1264 timeouts[TCP_CONNTRACK_SYN_SENT] = 1275 timeouts[TCP_CONNTRACK_SYN_SENT] =
@@ -1355,96 +1366,81 @@ static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
1355#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 1366#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
1356 1367
1357#ifdef CONFIG_SYSCTL 1368#ifdef CONFIG_SYSCTL
1358static unsigned int tcp_sysctl_table_users;
1359static struct ctl_table_header *tcp_sysctl_header;
1360static struct ctl_table tcp_sysctl_table[] = { 1369static struct ctl_table tcp_sysctl_table[] = {
1361 { 1370 {
1362 .procname = "nf_conntrack_tcp_timeout_syn_sent", 1371 .procname = "nf_conntrack_tcp_timeout_syn_sent",
1363 .data = &tcp_timeouts[TCP_CONNTRACK_SYN_SENT],
1364 .maxlen = sizeof(unsigned int), 1372 .maxlen = sizeof(unsigned int),
1365 .mode = 0644, 1373 .mode = 0644,
1366 .proc_handler = proc_dointvec_jiffies, 1374 .proc_handler = proc_dointvec_jiffies,
1367 }, 1375 },
1368 { 1376 {
1369 .procname = "nf_conntrack_tcp_timeout_syn_recv", 1377 .procname = "nf_conntrack_tcp_timeout_syn_recv",
1370 .data = &tcp_timeouts[TCP_CONNTRACK_SYN_RECV],
1371 .maxlen = sizeof(unsigned int), 1378 .maxlen = sizeof(unsigned int),
1372 .mode = 0644, 1379 .mode = 0644,
1373 .proc_handler = proc_dointvec_jiffies, 1380 .proc_handler = proc_dointvec_jiffies,
1374 }, 1381 },
1375 { 1382 {
1376 .procname = "nf_conntrack_tcp_timeout_established", 1383 .procname = "nf_conntrack_tcp_timeout_established",
1377 .data = &tcp_timeouts[TCP_CONNTRACK_ESTABLISHED],
1378 .maxlen = sizeof(unsigned int), 1384 .maxlen = sizeof(unsigned int),
1379 .mode = 0644, 1385 .mode = 0644,
1380 .proc_handler = proc_dointvec_jiffies, 1386 .proc_handler = proc_dointvec_jiffies,
1381 }, 1387 },
1382 { 1388 {
1383 .procname = "nf_conntrack_tcp_timeout_fin_wait", 1389 .procname = "nf_conntrack_tcp_timeout_fin_wait",
1384 .data = &tcp_timeouts[TCP_CONNTRACK_FIN_WAIT],
1385 .maxlen = sizeof(unsigned int), 1390 .maxlen = sizeof(unsigned int),
1386 .mode = 0644, 1391 .mode = 0644,
1387 .proc_handler = proc_dointvec_jiffies, 1392 .proc_handler = proc_dointvec_jiffies,
1388 }, 1393 },
1389 { 1394 {
1390 .procname = "nf_conntrack_tcp_timeout_close_wait", 1395 .procname = "nf_conntrack_tcp_timeout_close_wait",
1391 .data = &tcp_timeouts[TCP_CONNTRACK_CLOSE_WAIT],
1392 .maxlen = sizeof(unsigned int), 1396 .maxlen = sizeof(unsigned int),
1393 .mode = 0644, 1397 .mode = 0644,
1394 .proc_handler = proc_dointvec_jiffies, 1398 .proc_handler = proc_dointvec_jiffies,
1395 }, 1399 },
1396 { 1400 {
1397 .procname = "nf_conntrack_tcp_timeout_last_ack", 1401 .procname = "nf_conntrack_tcp_timeout_last_ack",
1398 .data = &tcp_timeouts[TCP_CONNTRACK_LAST_ACK],
1399 .maxlen = sizeof(unsigned int), 1402 .maxlen = sizeof(unsigned int),
1400 .mode = 0644, 1403 .mode = 0644,
1401 .proc_handler = proc_dointvec_jiffies, 1404 .proc_handler = proc_dointvec_jiffies,
1402 }, 1405 },
1403 { 1406 {
1404 .procname = "nf_conntrack_tcp_timeout_time_wait", 1407 .procname = "nf_conntrack_tcp_timeout_time_wait",
1405 .data = &tcp_timeouts[TCP_CONNTRACK_TIME_WAIT],
1406 .maxlen = sizeof(unsigned int), 1408 .maxlen = sizeof(unsigned int),
1407 .mode = 0644, 1409 .mode = 0644,
1408 .proc_handler = proc_dointvec_jiffies, 1410 .proc_handler = proc_dointvec_jiffies,
1409 }, 1411 },
1410 { 1412 {
1411 .procname = "nf_conntrack_tcp_timeout_close", 1413 .procname = "nf_conntrack_tcp_timeout_close",
1412 .data = &tcp_timeouts[TCP_CONNTRACK_CLOSE],
1413 .maxlen = sizeof(unsigned int), 1414 .maxlen = sizeof(unsigned int),
1414 .mode = 0644, 1415 .mode = 0644,
1415 .proc_handler = proc_dointvec_jiffies, 1416 .proc_handler = proc_dointvec_jiffies,
1416 }, 1417 },
1417 { 1418 {
1418 .procname = "nf_conntrack_tcp_timeout_max_retrans", 1419 .procname = "nf_conntrack_tcp_timeout_max_retrans",
1419 .data = &tcp_timeouts[TCP_CONNTRACK_RETRANS],
1420 .maxlen = sizeof(unsigned int), 1420 .maxlen = sizeof(unsigned int),
1421 .mode = 0644, 1421 .mode = 0644,
1422 .proc_handler = proc_dointvec_jiffies, 1422 .proc_handler = proc_dointvec_jiffies,
1423 }, 1423 },
1424 { 1424 {
1425 .procname = "nf_conntrack_tcp_timeout_unacknowledged", 1425 .procname = "nf_conntrack_tcp_timeout_unacknowledged",
1426 .data = &tcp_timeouts[TCP_CONNTRACK_UNACK],
1427 .maxlen = sizeof(unsigned int), 1426 .maxlen = sizeof(unsigned int),
1428 .mode = 0644, 1427 .mode = 0644,
1429 .proc_handler = proc_dointvec_jiffies, 1428 .proc_handler = proc_dointvec_jiffies,
1430 }, 1429 },
1431 { 1430 {
1432 .procname = "nf_conntrack_tcp_loose", 1431 .procname = "nf_conntrack_tcp_loose",
1433 .data = &nf_ct_tcp_loose,
1434 .maxlen = sizeof(unsigned int), 1432 .maxlen = sizeof(unsigned int),
1435 .mode = 0644, 1433 .mode = 0644,
1436 .proc_handler = proc_dointvec, 1434 .proc_handler = proc_dointvec,
1437 }, 1435 },
1438 { 1436 {
1439 .procname = "nf_conntrack_tcp_be_liberal", 1437 .procname = "nf_conntrack_tcp_be_liberal",
1440 .data = &nf_ct_tcp_be_liberal,
1441 .maxlen = sizeof(unsigned int), 1438 .maxlen = sizeof(unsigned int),
1442 .mode = 0644, 1439 .mode = 0644,
1443 .proc_handler = proc_dointvec, 1440 .proc_handler = proc_dointvec,
1444 }, 1441 },
1445 { 1442 {
1446 .procname = "nf_conntrack_tcp_max_retrans", 1443 .procname = "nf_conntrack_tcp_max_retrans",
1447 .data = &nf_ct_tcp_max_retrans,
1448 .maxlen = sizeof(unsigned int), 1444 .maxlen = sizeof(unsigned int),
1449 .mode = 0644, 1445 .mode = 0644,
1450 .proc_handler = proc_dointvec, 1446 .proc_handler = proc_dointvec,
@@ -1456,91 +1452,78 @@ static struct ctl_table tcp_sysctl_table[] = {
1456static struct ctl_table tcp_compat_sysctl_table[] = { 1452static struct ctl_table tcp_compat_sysctl_table[] = {
1457 { 1453 {
1458 .procname = "ip_conntrack_tcp_timeout_syn_sent", 1454 .procname = "ip_conntrack_tcp_timeout_syn_sent",
1459 .data = &tcp_timeouts[TCP_CONNTRACK_SYN_SENT],
1460 .maxlen = sizeof(unsigned int), 1455 .maxlen = sizeof(unsigned int),
1461 .mode = 0644, 1456 .mode = 0644,
1462 .proc_handler = proc_dointvec_jiffies, 1457 .proc_handler = proc_dointvec_jiffies,
1463 }, 1458 },
1464 { 1459 {
1465 .procname = "ip_conntrack_tcp_timeout_syn_sent2", 1460 .procname = "ip_conntrack_tcp_timeout_syn_sent2",
1466 .data = &tcp_timeouts[TCP_CONNTRACK_SYN_SENT2],
1467 .maxlen = sizeof(unsigned int), 1461 .maxlen = sizeof(unsigned int),
1468 .mode = 0644, 1462 .mode = 0644,
1469 .proc_handler = proc_dointvec_jiffies, 1463 .proc_handler = proc_dointvec_jiffies,
1470 }, 1464 },
1471 { 1465 {
1472 .procname = "ip_conntrack_tcp_timeout_syn_recv", 1466 .procname = "ip_conntrack_tcp_timeout_syn_recv",
1473 .data = &tcp_timeouts[TCP_CONNTRACK_SYN_RECV],
1474 .maxlen = sizeof(unsigned int), 1467 .maxlen = sizeof(unsigned int),
1475 .mode = 0644, 1468 .mode = 0644,
1476 .proc_handler = proc_dointvec_jiffies, 1469 .proc_handler = proc_dointvec_jiffies,
1477 }, 1470 },
1478 { 1471 {
1479 .procname = "ip_conntrack_tcp_timeout_established", 1472 .procname = "ip_conntrack_tcp_timeout_established",
1480 .data = &tcp_timeouts[TCP_CONNTRACK_ESTABLISHED],
1481 .maxlen = sizeof(unsigned int), 1473 .maxlen = sizeof(unsigned int),
1482 .mode = 0644, 1474 .mode = 0644,
1483 .proc_handler = proc_dointvec_jiffies, 1475 .proc_handler = proc_dointvec_jiffies,
1484 }, 1476 },
1485 { 1477 {
1486 .procname = "ip_conntrack_tcp_timeout_fin_wait", 1478 .procname = "ip_conntrack_tcp_timeout_fin_wait",
1487 .data = &tcp_timeouts[TCP_CONNTRACK_FIN_WAIT],
1488 .maxlen = sizeof(unsigned int), 1479 .maxlen = sizeof(unsigned int),
1489 .mode = 0644, 1480 .mode = 0644,
1490 .proc_handler = proc_dointvec_jiffies, 1481 .proc_handler = proc_dointvec_jiffies,
1491 }, 1482 },
1492 { 1483 {
1493 .procname = "ip_conntrack_tcp_timeout_close_wait", 1484 .procname = "ip_conntrack_tcp_timeout_close_wait",
1494 .data = &tcp_timeouts[TCP_CONNTRACK_CLOSE_WAIT],
1495 .maxlen = sizeof(unsigned int), 1485 .maxlen = sizeof(unsigned int),
1496 .mode = 0644, 1486 .mode = 0644,
1497 .proc_handler = proc_dointvec_jiffies, 1487 .proc_handler = proc_dointvec_jiffies,
1498 }, 1488 },
1499 { 1489 {
1500 .procname = "ip_conntrack_tcp_timeout_last_ack", 1490 .procname = "ip_conntrack_tcp_timeout_last_ack",
1501 .data = &tcp_timeouts[TCP_CONNTRACK_LAST_ACK],
1502 .maxlen = sizeof(unsigned int), 1491 .maxlen = sizeof(unsigned int),
1503 .mode = 0644, 1492 .mode = 0644,
1504 .proc_handler = proc_dointvec_jiffies, 1493 .proc_handler = proc_dointvec_jiffies,
1505 }, 1494 },
1506 { 1495 {
1507 .procname = "ip_conntrack_tcp_timeout_time_wait", 1496 .procname = "ip_conntrack_tcp_timeout_time_wait",
1508 .data = &tcp_timeouts[TCP_CONNTRACK_TIME_WAIT],
1509 .maxlen = sizeof(unsigned int), 1497 .maxlen = sizeof(unsigned int),
1510 .mode = 0644, 1498 .mode = 0644,
1511 .proc_handler = proc_dointvec_jiffies, 1499 .proc_handler = proc_dointvec_jiffies,
1512 }, 1500 },
1513 { 1501 {
1514 .procname = "ip_conntrack_tcp_timeout_close", 1502 .procname = "ip_conntrack_tcp_timeout_close",
1515 .data = &tcp_timeouts[TCP_CONNTRACK_CLOSE],
1516 .maxlen = sizeof(unsigned int), 1503 .maxlen = sizeof(unsigned int),
1517 .mode = 0644, 1504 .mode = 0644,
1518 .proc_handler = proc_dointvec_jiffies, 1505 .proc_handler = proc_dointvec_jiffies,
1519 }, 1506 },
1520 { 1507 {
1521 .procname = "ip_conntrack_tcp_timeout_max_retrans", 1508 .procname = "ip_conntrack_tcp_timeout_max_retrans",
1522 .data = &tcp_timeouts[TCP_CONNTRACK_RETRANS],
1523 .maxlen = sizeof(unsigned int), 1509 .maxlen = sizeof(unsigned int),
1524 .mode = 0644, 1510 .mode = 0644,
1525 .proc_handler = proc_dointvec_jiffies, 1511 .proc_handler = proc_dointvec_jiffies,
1526 }, 1512 },
1527 { 1513 {
1528 .procname = "ip_conntrack_tcp_loose", 1514 .procname = "ip_conntrack_tcp_loose",
1529 .data = &nf_ct_tcp_loose,
1530 .maxlen = sizeof(unsigned int), 1515 .maxlen = sizeof(unsigned int),
1531 .mode = 0644, 1516 .mode = 0644,
1532 .proc_handler = proc_dointvec, 1517 .proc_handler = proc_dointvec,
1533 }, 1518 },
1534 { 1519 {
1535 .procname = "ip_conntrack_tcp_be_liberal", 1520 .procname = "ip_conntrack_tcp_be_liberal",
1536 .data = &nf_ct_tcp_be_liberal,
1537 .maxlen = sizeof(unsigned int), 1521 .maxlen = sizeof(unsigned int),
1538 .mode = 0644, 1522 .mode = 0644,
1539 .proc_handler = proc_dointvec, 1523 .proc_handler = proc_dointvec,
1540 }, 1524 },
1541 { 1525 {
1542 .procname = "ip_conntrack_tcp_max_retrans", 1526 .procname = "ip_conntrack_tcp_max_retrans",
1543 .data = &nf_ct_tcp_max_retrans,
1544 .maxlen = sizeof(unsigned int), 1527 .maxlen = sizeof(unsigned int),
1545 .mode = 0644, 1528 .mode = 0644,
1546 .proc_handler = proc_dointvec, 1529 .proc_handler = proc_dointvec,
@@ -1550,6 +1533,101 @@ static struct ctl_table tcp_compat_sysctl_table[] = {
1550#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ 1533#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
1551#endif /* CONFIG_SYSCTL */ 1534#endif /* CONFIG_SYSCTL */
1552 1535
1536static int tcp_kmemdup_sysctl_table(struct nf_proto_net *pn,
1537 struct nf_tcp_net *tn)
1538{
1539#ifdef CONFIG_SYSCTL
1540 if (pn->ctl_table)
1541 return 0;
1542
1543 pn->ctl_table = kmemdup(tcp_sysctl_table,
1544 sizeof(tcp_sysctl_table),
1545 GFP_KERNEL);
1546 if (!pn->ctl_table)
1547 return -ENOMEM;
1548
1549 pn->ctl_table[0].data = &tn->timeouts[TCP_CONNTRACK_SYN_SENT];
1550 pn->ctl_table[1].data = &tn->timeouts[TCP_CONNTRACK_SYN_RECV];
1551 pn->ctl_table[2].data = &tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
1552 pn->ctl_table[3].data = &tn->timeouts[TCP_CONNTRACK_FIN_WAIT];
1553 pn->ctl_table[4].data = &tn->timeouts[TCP_CONNTRACK_CLOSE_WAIT];
1554 pn->ctl_table[5].data = &tn->timeouts[TCP_CONNTRACK_LAST_ACK];
1555 pn->ctl_table[6].data = &tn->timeouts[TCP_CONNTRACK_TIME_WAIT];
1556 pn->ctl_table[7].data = &tn->timeouts[TCP_CONNTRACK_CLOSE];
1557 pn->ctl_table[8].data = &tn->timeouts[TCP_CONNTRACK_RETRANS];
1558 pn->ctl_table[9].data = &tn->timeouts[TCP_CONNTRACK_UNACK];
1559 pn->ctl_table[10].data = &tn->tcp_loose;
1560 pn->ctl_table[11].data = &tn->tcp_be_liberal;
1561 pn->ctl_table[12].data = &tn->tcp_max_retrans;
1562#endif
1563 return 0;
1564}
1565
1566static int tcp_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
1567 struct nf_tcp_net *tn)
1568{
1569#ifdef CONFIG_SYSCTL
1570#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
1571 pn->ctl_compat_table = kmemdup(tcp_compat_sysctl_table,
1572 sizeof(tcp_compat_sysctl_table),
1573 GFP_KERNEL);
1574 if (!pn->ctl_compat_table)
1575 return -ENOMEM;
1576
1577 pn->ctl_compat_table[0].data = &tn->timeouts[TCP_CONNTRACK_SYN_SENT];
1578 pn->ctl_compat_table[1].data = &tn->timeouts[TCP_CONNTRACK_SYN_SENT2];
1579 pn->ctl_compat_table[2].data = &tn->timeouts[TCP_CONNTRACK_SYN_RECV];
1580 pn->ctl_compat_table[3].data = &tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
1581 pn->ctl_compat_table[4].data = &tn->timeouts[TCP_CONNTRACK_FIN_WAIT];
1582 pn->ctl_compat_table[5].data = &tn->timeouts[TCP_CONNTRACK_CLOSE_WAIT];
1583 pn->ctl_compat_table[6].data = &tn->timeouts[TCP_CONNTRACK_LAST_ACK];
1584 pn->ctl_compat_table[7].data = &tn->timeouts[TCP_CONNTRACK_TIME_WAIT];
1585 pn->ctl_compat_table[8].data = &tn->timeouts[TCP_CONNTRACK_CLOSE];
1586 pn->ctl_compat_table[9].data = &tn->timeouts[TCP_CONNTRACK_RETRANS];
1587 pn->ctl_compat_table[10].data = &tn->tcp_loose;
1588 pn->ctl_compat_table[11].data = &tn->tcp_be_liberal;
1589 pn->ctl_compat_table[12].data = &tn->tcp_max_retrans;
1590#endif
1591#endif
1592 return 0;
1593}
1594
1595static int tcp_init_net(struct net *net, u_int16_t proto)
1596{
1597 int ret;
1598 struct nf_tcp_net *tn = tcp_pernet(net);
1599 struct nf_proto_net *pn = &tn->pn;
1600
1601 if (!pn->users) {
1602 int i;
1603
1604 for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
1605 tn->timeouts[i] = tcp_timeouts[i];
1606
1607 tn->tcp_loose = nf_ct_tcp_loose;
1608 tn->tcp_be_liberal = nf_ct_tcp_be_liberal;
1609 tn->tcp_max_retrans = nf_ct_tcp_max_retrans;
1610 }
1611
1612 if (proto == AF_INET) {
1613 ret = tcp_kmemdup_compat_sysctl_table(pn, tn);
1614 if (ret < 0)
1615 return ret;
1616
1617 ret = tcp_kmemdup_sysctl_table(pn, tn);
1618 if (ret < 0)
1619 nf_ct_kfree_compat_sysctl_table(pn);
1620 } else
1621 ret = tcp_kmemdup_sysctl_table(pn, tn);
1622
1623 return ret;
1624}
1625
1626static struct nf_proto_net *tcp_get_net_proto(struct net *net)
1627{
1628 return &net->ct.nf_ct_proto.tcp.pn;
1629}
1630
1553struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly = 1631struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
1554{ 1632{
1555 .l3proto = PF_INET, 1633 .l3proto = PF_INET,
@@ -1582,14 +1660,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
1582 .nla_policy = tcp_timeout_nla_policy, 1660 .nla_policy = tcp_timeout_nla_policy,
1583 }, 1661 },
1584#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 1662#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
1585#ifdef CONFIG_SYSCTL 1663 .init_net = tcp_init_net,
1586 .ctl_table_users = &tcp_sysctl_table_users, 1664 .get_net_proto = tcp_get_net_proto,
1587 .ctl_table_header = &tcp_sysctl_header,
1588 .ctl_table = tcp_sysctl_table,
1589#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
1590 .ctl_compat_table = tcp_compat_sysctl_table,
1591#endif
1592#endif
1593}; 1665};
1594EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp4); 1666EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp4);
1595 1667
@@ -1625,10 +1697,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
1625 .nla_policy = tcp_timeout_nla_policy, 1697 .nla_policy = tcp_timeout_nla_policy,
1626 }, 1698 },
1627#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 1699#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
1628#ifdef CONFIG_SYSCTL 1700 .init_net = tcp_init_net,
1629 .ctl_table_users = &tcp_sysctl_table_users, 1701 .get_net_proto = tcp_get_net_proto,
1630 .ctl_table_header = &tcp_sysctl_header,
1631 .ctl_table = tcp_sysctl_table,
1632#endif
1633}; 1702};
1634EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp6); 1703EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp6);
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 7259a6bdeb4..59623cc56e8 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -25,17 +25,16 @@
25#include <net/netfilter/ipv4/nf_conntrack_ipv4.h> 25#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
26#include <net/netfilter/ipv6/nf_conntrack_ipv6.h> 26#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
27 27
28enum udp_conntrack {
29 UDP_CT_UNREPLIED,
30 UDP_CT_REPLIED,
31 UDP_CT_MAX
32};
33
34static unsigned int udp_timeouts[UDP_CT_MAX] = { 28static unsigned int udp_timeouts[UDP_CT_MAX] = {
35 [UDP_CT_UNREPLIED] = 30*HZ, 29 [UDP_CT_UNREPLIED] = 30*HZ,
36 [UDP_CT_REPLIED] = 180*HZ, 30 [UDP_CT_REPLIED] = 180*HZ,
37}; 31};
38 32
33static inline struct nf_udp_net *udp_pernet(struct net *net)
34{
35 return &net->ct.nf_ct_proto.udp;
36}
37
39static bool udp_pkt_to_tuple(const struct sk_buff *skb, 38static bool udp_pkt_to_tuple(const struct sk_buff *skb,
40 unsigned int dataoff, 39 unsigned int dataoff,
41 struct nf_conntrack_tuple *tuple) 40 struct nf_conntrack_tuple *tuple)
@@ -73,7 +72,7 @@ static int udp_print_tuple(struct seq_file *s,
73 72
74static unsigned int *udp_get_timeouts(struct net *net) 73static unsigned int *udp_get_timeouts(struct net *net)
75{ 74{
76 return udp_timeouts; 75 return udp_pernet(net)->timeouts;
77} 76}
78 77
79/* Returns verdict for packet, and may modify conntracktype */ 78/* Returns verdict for packet, and may modify conntracktype */
@@ -157,13 +156,15 @@ static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
157#include <linux/netfilter/nfnetlink.h> 156#include <linux/netfilter/nfnetlink.h>
158#include <linux/netfilter/nfnetlink_cttimeout.h> 157#include <linux/netfilter/nfnetlink_cttimeout.h>
159 158
160static int udp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data) 159static int udp_timeout_nlattr_to_obj(struct nlattr *tb[],
160 struct net *net, void *data)
161{ 161{
162 unsigned int *timeouts = data; 162 unsigned int *timeouts = data;
163 struct nf_udp_net *un = udp_pernet(net);
163 164
164 /* set default timeouts for UDP. */ 165 /* set default timeouts for UDP. */
165 timeouts[UDP_CT_UNREPLIED] = udp_timeouts[UDP_CT_UNREPLIED]; 166 timeouts[UDP_CT_UNREPLIED] = un->timeouts[UDP_CT_UNREPLIED];
166 timeouts[UDP_CT_REPLIED] = udp_timeouts[UDP_CT_REPLIED]; 167 timeouts[UDP_CT_REPLIED] = un->timeouts[UDP_CT_REPLIED];
167 168
168 if (tb[CTA_TIMEOUT_UDP_UNREPLIED]) { 169 if (tb[CTA_TIMEOUT_UDP_UNREPLIED]) {
169 timeouts[UDP_CT_UNREPLIED] = 170 timeouts[UDP_CT_UNREPLIED] =
@@ -200,19 +201,15 @@ udp_timeout_nla_policy[CTA_TIMEOUT_UDP_MAX+1] = {
200#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 201#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
201 202
202#ifdef CONFIG_SYSCTL 203#ifdef CONFIG_SYSCTL
203static unsigned int udp_sysctl_table_users;
204static struct ctl_table_header *udp_sysctl_header;
205static struct ctl_table udp_sysctl_table[] = { 204static struct ctl_table udp_sysctl_table[] = {
206 { 205 {
207 .procname = "nf_conntrack_udp_timeout", 206 .procname = "nf_conntrack_udp_timeout",
208 .data = &udp_timeouts[UDP_CT_UNREPLIED],
209 .maxlen = sizeof(unsigned int), 207 .maxlen = sizeof(unsigned int),
210 .mode = 0644, 208 .mode = 0644,
211 .proc_handler = proc_dointvec_jiffies, 209 .proc_handler = proc_dointvec_jiffies,
212 }, 210 },
213 { 211 {
214 .procname = "nf_conntrack_udp_timeout_stream", 212 .procname = "nf_conntrack_udp_timeout_stream",
215 .data = &udp_timeouts[UDP_CT_REPLIED],
216 .maxlen = sizeof(unsigned int), 213 .maxlen = sizeof(unsigned int),
217 .mode = 0644, 214 .mode = 0644,
218 .proc_handler = proc_dointvec_jiffies, 215 .proc_handler = proc_dointvec_jiffies,
@@ -223,14 +220,12 @@ static struct ctl_table udp_sysctl_table[] = {
223static struct ctl_table udp_compat_sysctl_table[] = { 220static struct ctl_table udp_compat_sysctl_table[] = {
224 { 221 {
225 .procname = "ip_conntrack_udp_timeout", 222 .procname = "ip_conntrack_udp_timeout",
226 .data = &udp_timeouts[UDP_CT_UNREPLIED],
227 .maxlen = sizeof(unsigned int), 223 .maxlen = sizeof(unsigned int),
228 .mode = 0644, 224 .mode = 0644,
229 .proc_handler = proc_dointvec_jiffies, 225 .proc_handler = proc_dointvec_jiffies,
230 }, 226 },
231 { 227 {
232 .procname = "ip_conntrack_udp_timeout_stream", 228 .procname = "ip_conntrack_udp_timeout_stream",
233 .data = &udp_timeouts[UDP_CT_REPLIED],
234 .maxlen = sizeof(unsigned int), 229 .maxlen = sizeof(unsigned int),
235 .mode = 0644, 230 .mode = 0644,
236 .proc_handler = proc_dointvec_jiffies, 231 .proc_handler = proc_dointvec_jiffies,
@@ -240,6 +235,73 @@ static struct ctl_table udp_compat_sysctl_table[] = {
240#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ 235#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
241#endif /* CONFIG_SYSCTL */ 236#endif /* CONFIG_SYSCTL */
242 237
238static int udp_kmemdup_sysctl_table(struct nf_proto_net *pn,
239 struct nf_udp_net *un)
240{
241#ifdef CONFIG_SYSCTL
242 if (pn->ctl_table)
243 return 0;
244 pn->ctl_table = kmemdup(udp_sysctl_table,
245 sizeof(udp_sysctl_table),
246 GFP_KERNEL);
247 if (!pn->ctl_table)
248 return -ENOMEM;
249 pn->ctl_table[0].data = &un->timeouts[UDP_CT_UNREPLIED];
250 pn->ctl_table[1].data = &un->timeouts[UDP_CT_REPLIED];
251#endif
252 return 0;
253}
254
255static int udp_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
256 struct nf_udp_net *un)
257{
258#ifdef CONFIG_SYSCTL
259#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
260 pn->ctl_compat_table = kmemdup(udp_compat_sysctl_table,
261 sizeof(udp_compat_sysctl_table),
262 GFP_KERNEL);
263 if (!pn->ctl_compat_table)
264 return -ENOMEM;
265
266 pn->ctl_compat_table[0].data = &un->timeouts[UDP_CT_UNREPLIED];
267 pn->ctl_compat_table[1].data = &un->timeouts[UDP_CT_REPLIED];
268#endif
269#endif
270 return 0;
271}
272
273static int udp_init_net(struct net *net, u_int16_t proto)
274{
275 int ret;
276 struct nf_udp_net *un = udp_pernet(net);
277 struct nf_proto_net *pn = &un->pn;
278
279 if (!pn->users) {
280 int i;
281
282 for (i = 0; i < UDP_CT_MAX; i++)
283 un->timeouts[i] = udp_timeouts[i];
284 }
285
286 if (proto == AF_INET) {
287 ret = udp_kmemdup_compat_sysctl_table(pn, un);
288 if (ret < 0)
289 return ret;
290
291 ret = udp_kmemdup_sysctl_table(pn, un);
292 if (ret < 0)
293 nf_ct_kfree_compat_sysctl_table(pn);
294 } else
295 ret = udp_kmemdup_sysctl_table(pn, un);
296
297 return ret;
298}
299
300static struct nf_proto_net *udp_get_net_proto(struct net *net)
301{
302 return &net->ct.nf_ct_proto.udp.pn;
303}
304
243struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly = 305struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
244{ 306{
245 .l3proto = PF_INET, 307 .l3proto = PF_INET,
@@ -267,14 +329,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
267 .nla_policy = udp_timeout_nla_policy, 329 .nla_policy = udp_timeout_nla_policy,
268 }, 330 },
269#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 331#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
270#ifdef CONFIG_SYSCTL 332 .init_net = udp_init_net,
271 .ctl_table_users = &udp_sysctl_table_users, 333 .get_net_proto = udp_get_net_proto,
272 .ctl_table_header = &udp_sysctl_header,
273 .ctl_table = udp_sysctl_table,
274#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
275 .ctl_compat_table = udp_compat_sysctl_table,
276#endif
277#endif
278}; 334};
279EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4); 335EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4);
280 336
@@ -305,10 +361,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
305 .nla_policy = udp_timeout_nla_policy, 361 .nla_policy = udp_timeout_nla_policy,
306 }, 362 },
307#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 363#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
308#ifdef CONFIG_SYSCTL 364 .init_net = udp_init_net,
309 .ctl_table_users = &udp_sysctl_table_users, 365 .get_net_proto = udp_get_net_proto,
310 .ctl_table_header = &udp_sysctl_header,
311 .ctl_table = udp_sysctl_table,
312#endif
313}; 366};
314EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp6); 367EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp6);
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 4d60a5376aa..4b66df20928 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -35,6 +35,17 @@ static unsigned int udplite_timeouts[UDPLITE_CT_MAX] = {
35 [UDPLITE_CT_REPLIED] = 180*HZ, 35 [UDPLITE_CT_REPLIED] = 180*HZ,
36}; 36};
37 37
38static int udplite_net_id __read_mostly;
39struct udplite_net {
40 struct nf_proto_net pn;
41 unsigned int timeouts[UDPLITE_CT_MAX];
42};
43
44static inline struct udplite_net *udplite_pernet(struct net *net)
45{
46 return net_generic(net, udplite_net_id);
47}
48
38static bool udplite_pkt_to_tuple(const struct sk_buff *skb, 49static bool udplite_pkt_to_tuple(const struct sk_buff *skb,
39 unsigned int dataoff, 50 unsigned int dataoff,
40 struct nf_conntrack_tuple *tuple) 51 struct nf_conntrack_tuple *tuple)
@@ -70,7 +81,7 @@ static int udplite_print_tuple(struct seq_file *s,
70 81
71static unsigned int *udplite_get_timeouts(struct net *net) 82static unsigned int *udplite_get_timeouts(struct net *net)
72{ 83{
73 return udplite_timeouts; 84 return udplite_pernet(net)->timeouts;
74} 85}
75 86
76/* Returns verdict for packet, and may modify conntracktype */ 87/* Returns verdict for packet, and may modify conntracktype */
@@ -161,13 +172,15 @@ static int udplite_error(struct net *net, struct nf_conn *tmpl,
161#include <linux/netfilter/nfnetlink.h> 172#include <linux/netfilter/nfnetlink.h>
162#include <linux/netfilter/nfnetlink_cttimeout.h> 173#include <linux/netfilter/nfnetlink_cttimeout.h>
163 174
164static int udplite_timeout_nlattr_to_obj(struct nlattr *tb[], void *data) 175static int udplite_timeout_nlattr_to_obj(struct nlattr *tb[],
176 struct net *net, void *data)
165{ 177{
166 unsigned int *timeouts = data; 178 unsigned int *timeouts = data;
179 struct udplite_net *un = udplite_pernet(net);
167 180
168 /* set default timeouts for UDPlite. */ 181 /* set default timeouts for UDPlite. */
169 timeouts[UDPLITE_CT_UNREPLIED] = udplite_timeouts[UDPLITE_CT_UNREPLIED]; 182 timeouts[UDPLITE_CT_UNREPLIED] = un->timeouts[UDPLITE_CT_UNREPLIED];
170 timeouts[UDPLITE_CT_REPLIED] = udplite_timeouts[UDPLITE_CT_REPLIED]; 183 timeouts[UDPLITE_CT_REPLIED] = un->timeouts[UDPLITE_CT_REPLIED];
171 184
172 if (tb[CTA_TIMEOUT_UDPLITE_UNREPLIED]) { 185 if (tb[CTA_TIMEOUT_UDPLITE_UNREPLIED]) {
173 timeouts[UDPLITE_CT_UNREPLIED] = 186 timeouts[UDPLITE_CT_UNREPLIED] =
@@ -204,19 +217,15 @@ udplite_timeout_nla_policy[CTA_TIMEOUT_UDPLITE_MAX+1] = {
204#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 217#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
205 218
206#ifdef CONFIG_SYSCTL 219#ifdef CONFIG_SYSCTL
207static unsigned int udplite_sysctl_table_users;
208static struct ctl_table_header *udplite_sysctl_header;
209static struct ctl_table udplite_sysctl_table[] = { 220static struct ctl_table udplite_sysctl_table[] = {
210 { 221 {
211 .procname = "nf_conntrack_udplite_timeout", 222 .procname = "nf_conntrack_udplite_timeout",
212 .data = &udplite_timeouts[UDPLITE_CT_UNREPLIED],
213 .maxlen = sizeof(unsigned int), 223 .maxlen = sizeof(unsigned int),
214 .mode = 0644, 224 .mode = 0644,
215 .proc_handler = proc_dointvec_jiffies, 225 .proc_handler = proc_dointvec_jiffies,
216 }, 226 },
217 { 227 {
218 .procname = "nf_conntrack_udplite_timeout_stream", 228 .procname = "nf_conntrack_udplite_timeout_stream",
219 .data = &udplite_timeouts[UDPLITE_CT_REPLIED],
220 .maxlen = sizeof(unsigned int), 229 .maxlen = sizeof(unsigned int),
221 .mode = 0644, 230 .mode = 0644,
222 .proc_handler = proc_dointvec_jiffies, 231 .proc_handler = proc_dointvec_jiffies,
@@ -225,6 +234,40 @@ static struct ctl_table udplite_sysctl_table[] = {
225}; 234};
226#endif /* CONFIG_SYSCTL */ 235#endif /* CONFIG_SYSCTL */
227 236
237static int udplite_kmemdup_sysctl_table(struct nf_proto_net *pn,
238 struct udplite_net *un)
239{
240#ifdef CONFIG_SYSCTL
241 if (pn->ctl_table)
242 return 0;
243
244 pn->ctl_table = kmemdup(udplite_sysctl_table,
245 sizeof(udplite_sysctl_table),
246 GFP_KERNEL);
247 if (!pn->ctl_table)
248 return -ENOMEM;
249
250 pn->ctl_table[0].data = &un->timeouts[UDPLITE_CT_UNREPLIED];
251 pn->ctl_table[1].data = &un->timeouts[UDPLITE_CT_REPLIED];
252#endif
253 return 0;
254}
255
256static int udplite_init_net(struct net *net, u_int16_t proto)
257{
258 struct udplite_net *un = udplite_pernet(net);
259 struct nf_proto_net *pn = &un->pn;
260
261 if (!pn->users) {
262 int i;
263
264 for (i = 0 ; i < UDPLITE_CT_MAX; i++)
265 un->timeouts[i] = udplite_timeouts[i];
266 }
267
268 return udplite_kmemdup_sysctl_table(pn, un);
269}
270
228static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly = 271static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
229{ 272{
230 .l3proto = PF_INET, 273 .l3proto = PF_INET,
@@ -253,11 +296,8 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
253 .nla_policy = udplite_timeout_nla_policy, 296 .nla_policy = udplite_timeout_nla_policy,
254 }, 297 },
255#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 298#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
256#ifdef CONFIG_SYSCTL 299 .net_id = &udplite_net_id,
257 .ctl_table_users = &udplite_sysctl_table_users, 300 .init_net = udplite_init_net,
258 .ctl_table_header = &udplite_sysctl_header,
259 .ctl_table = udplite_sysctl_table,
260#endif
261}; 301};
262 302
263static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly = 303static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
@@ -288,34 +328,55 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
288 .nla_policy = udplite_timeout_nla_policy, 328 .nla_policy = udplite_timeout_nla_policy,
289 }, 329 },
290#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 330#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
291#ifdef CONFIG_SYSCTL 331 .net_id = &udplite_net_id,
292 .ctl_table_users = &udplite_sysctl_table_users, 332 .init_net = udplite_init_net,
293 .ctl_table_header = &udplite_sysctl_header,
294 .ctl_table = udplite_sysctl_table,
295#endif
296}; 333};
297 334
298static int __init nf_conntrack_proto_udplite_init(void) 335static int udplite_net_init(struct net *net)
299{ 336{
300 int err; 337 int ret = 0;
301 338
302 err = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udplite4); 339 ret = nf_conntrack_l4proto_register(net,
303 if (err < 0) 340 &nf_conntrack_l4proto_udplite4);
304 goto err1; 341 if (ret < 0) {
305 err = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udplite6); 342 pr_err("nf_conntrack_l4proto_udplite4 :protocol register failed.\n");
306 if (err < 0) 343 goto out;
307 goto err2; 344 }
345 ret = nf_conntrack_l4proto_register(net,
346 &nf_conntrack_l4proto_udplite6);
347 if (ret < 0) {
348 pr_err("nf_conntrack_l4proto_udplite4 :protocol register failed.\n");
349 goto cleanup_udplite4;
350 }
308 return 0; 351 return 0;
309err2: 352
310 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udplite4); 353cleanup_udplite4:
311err1: 354 nf_conntrack_l4proto_unregister(net, &nf_conntrack_l4proto_udplite4);
312 return err; 355out:
356 return ret;
357}
358
359static void udplite_net_exit(struct net *net)
360{
361 nf_conntrack_l4proto_unregister(net, &nf_conntrack_l4proto_udplite6);
362 nf_conntrack_l4proto_unregister(net, &nf_conntrack_l4proto_udplite4);
363}
364
365static struct pernet_operations udplite_net_ops = {
366 .init = udplite_net_init,
367 .exit = udplite_net_exit,
368 .id = &udplite_net_id,
369 .size = sizeof(struct udplite_net),
370};
371
372static int __init nf_conntrack_proto_udplite_init(void)
373{
374 return register_pernet_subsys(&udplite_net_ops);
313} 375}
314 376
315static void __exit nf_conntrack_proto_udplite_exit(void) 377static void __exit nf_conntrack_proto_udplite_exit(void)
316{ 378{
317 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udplite6); 379 unregister_pernet_subsys(&udplite_net_ops);
318 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udplite4);
319} 380}
320 381
321module_init(nf_conntrack_proto_udplite_init); 382module_init(nf_conntrack_proto_udplite_init);
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
index 8501823b3f9..295429f3908 100644
--- a/net/netfilter/nf_conntrack_sane.c
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -69,13 +69,12 @@ static int help(struct sk_buff *skb,
69 void *sb_ptr; 69 void *sb_ptr;
70 int ret = NF_ACCEPT; 70 int ret = NF_ACCEPT;
71 int dir = CTINFO2DIR(ctinfo); 71 int dir = CTINFO2DIR(ctinfo);
72 struct nf_ct_sane_master *ct_sane_info; 72 struct nf_ct_sane_master *ct_sane_info = nfct_help_data(ct);
73 struct nf_conntrack_expect *exp; 73 struct nf_conntrack_expect *exp;
74 struct nf_conntrack_tuple *tuple; 74 struct nf_conntrack_tuple *tuple;
75 struct sane_request *req; 75 struct sane_request *req;
76 struct sane_reply_net_start *reply; 76 struct sane_reply_net_start *reply;
77 77
78 ct_sane_info = &nfct_help(ct)->help.ct_sane_info;
79 /* Until there's been traffic both ways, don't look in packets. */ 78 /* Until there's been traffic both ways, don't look in packets. */
80 if (ctinfo != IP_CT_ESTABLISHED && 79 if (ctinfo != IP_CT_ESTABLISHED &&
81 ctinfo != IP_CT_ESTABLISHED_REPLY) 80 ctinfo != IP_CT_ESTABLISHED_REPLY)
@@ -163,7 +162,6 @@ out:
163} 162}
164 163
165static struct nf_conntrack_helper sane[MAX_PORTS][2] __read_mostly; 164static struct nf_conntrack_helper sane[MAX_PORTS][2] __read_mostly;
166static char sane_names[MAX_PORTS][2][sizeof("sane-65535")] __read_mostly;
167 165
168static const struct nf_conntrack_expect_policy sane_exp_policy = { 166static const struct nf_conntrack_expect_policy sane_exp_policy = {
169 .max_expected = 1, 167 .max_expected = 1,
@@ -190,7 +188,6 @@ static void nf_conntrack_sane_fini(void)
190static int __init nf_conntrack_sane_init(void) 188static int __init nf_conntrack_sane_init(void)
191{ 189{
192 int i, j = -1, ret = 0; 190 int i, j = -1, ret = 0;
193 char *tmpname;
194 191
195 sane_buffer = kmalloc(65536, GFP_KERNEL); 192 sane_buffer = kmalloc(65536, GFP_KERNEL);
196 if (!sane_buffer) 193 if (!sane_buffer)
@@ -205,17 +202,16 @@ static int __init nf_conntrack_sane_init(void)
205 sane[i][0].tuple.src.l3num = PF_INET; 202 sane[i][0].tuple.src.l3num = PF_INET;
206 sane[i][1].tuple.src.l3num = PF_INET6; 203 sane[i][1].tuple.src.l3num = PF_INET6;
207 for (j = 0; j < 2; j++) { 204 for (j = 0; j < 2; j++) {
205 sane[i][j].data_len = sizeof(struct nf_ct_sane_master);
208 sane[i][j].tuple.src.u.tcp.port = htons(ports[i]); 206 sane[i][j].tuple.src.u.tcp.port = htons(ports[i]);
209 sane[i][j].tuple.dst.protonum = IPPROTO_TCP; 207 sane[i][j].tuple.dst.protonum = IPPROTO_TCP;
210 sane[i][j].expect_policy = &sane_exp_policy; 208 sane[i][j].expect_policy = &sane_exp_policy;
211 sane[i][j].me = THIS_MODULE; 209 sane[i][j].me = THIS_MODULE;
212 sane[i][j].help = help; 210 sane[i][j].help = help;
213 tmpname = &sane_names[i][j][0];
214 if (ports[i] == SANE_PORT) 211 if (ports[i] == SANE_PORT)
215 sprintf(tmpname, "sane"); 212 sprintf(sane[i][j].name, "sane");
216 else 213 else
217 sprintf(tmpname, "sane-%d", ports[i]); 214 sprintf(sane[i][j].name, "sane-%d", ports[i]);
218 sane[i][j].name = tmpname;
219 215
220 pr_debug("nf_ct_sane: registering helper for pf: %d " 216 pr_debug("nf_ct_sane: registering helper for pf: %d "
221 "port: %d\n", 217 "port: %d\n",
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 93faf6a3a63..758a1bacc12 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1075,12 +1075,12 @@ static int process_invite_response(struct sk_buff *skb, unsigned int dataoff,
1075{ 1075{
1076 enum ip_conntrack_info ctinfo; 1076 enum ip_conntrack_info ctinfo;
1077 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1077 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1078 struct nf_conn_help *help = nfct_help(ct); 1078 struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
1079 1079
1080 if ((code >= 100 && code <= 199) || 1080 if ((code >= 100 && code <= 199) ||
1081 (code >= 200 && code <= 299)) 1081 (code >= 200 && code <= 299))
1082 return process_sdp(skb, dataoff, dptr, datalen, cseq); 1082 return process_sdp(skb, dataoff, dptr, datalen, cseq);
1083 else if (help->help.ct_sip_info.invite_cseq == cseq) 1083 else if (ct_sip_info->invite_cseq == cseq)
1084 flush_expectations(ct, true); 1084 flush_expectations(ct, true);
1085 return NF_ACCEPT; 1085 return NF_ACCEPT;
1086} 1086}
@@ -1091,12 +1091,12 @@ static int process_update_response(struct sk_buff *skb, unsigned int dataoff,
1091{ 1091{
1092 enum ip_conntrack_info ctinfo; 1092 enum ip_conntrack_info ctinfo;
1093 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1093 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1094 struct nf_conn_help *help = nfct_help(ct); 1094 struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
1095 1095
1096 if ((code >= 100 && code <= 199) || 1096 if ((code >= 100 && code <= 199) ||
1097 (code >= 200 && code <= 299)) 1097 (code >= 200 && code <= 299))
1098 return process_sdp(skb, dataoff, dptr, datalen, cseq); 1098 return process_sdp(skb, dataoff, dptr, datalen, cseq);
1099 else if (help->help.ct_sip_info.invite_cseq == cseq) 1099 else if (ct_sip_info->invite_cseq == cseq)
1100 flush_expectations(ct, true); 1100 flush_expectations(ct, true);
1101 return NF_ACCEPT; 1101 return NF_ACCEPT;
1102} 1102}
@@ -1107,12 +1107,12 @@ static int process_prack_response(struct sk_buff *skb, unsigned int dataoff,
1107{ 1107{
1108 enum ip_conntrack_info ctinfo; 1108 enum ip_conntrack_info ctinfo;
1109 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1109 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1110 struct nf_conn_help *help = nfct_help(ct); 1110 struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
1111 1111
1112 if ((code >= 100 && code <= 199) || 1112 if ((code >= 100 && code <= 199) ||
1113 (code >= 200 && code <= 299)) 1113 (code >= 200 && code <= 299))
1114 return process_sdp(skb, dataoff, dptr, datalen, cseq); 1114 return process_sdp(skb, dataoff, dptr, datalen, cseq);
1115 else if (help->help.ct_sip_info.invite_cseq == cseq) 1115 else if (ct_sip_info->invite_cseq == cseq)
1116 flush_expectations(ct, true); 1116 flush_expectations(ct, true);
1117 return NF_ACCEPT; 1117 return NF_ACCEPT;
1118} 1118}
@@ -1123,13 +1123,13 @@ static int process_invite_request(struct sk_buff *skb, unsigned int dataoff,
1123{ 1123{
1124 enum ip_conntrack_info ctinfo; 1124 enum ip_conntrack_info ctinfo;
1125 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1125 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1126 struct nf_conn_help *help = nfct_help(ct); 1126 struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
1127 unsigned int ret; 1127 unsigned int ret;
1128 1128
1129 flush_expectations(ct, true); 1129 flush_expectations(ct, true);
1130 ret = process_sdp(skb, dataoff, dptr, datalen, cseq); 1130 ret = process_sdp(skb, dataoff, dptr, datalen, cseq);
1131 if (ret == NF_ACCEPT) 1131 if (ret == NF_ACCEPT)
1132 help->help.ct_sip_info.invite_cseq = cseq; 1132 ct_sip_info->invite_cseq = cseq;
1133 return ret; 1133 return ret;
1134} 1134}
1135 1135
@@ -1154,7 +1154,7 @@ static int process_register_request(struct sk_buff *skb, unsigned int dataoff,
1154{ 1154{
1155 enum ip_conntrack_info ctinfo; 1155 enum ip_conntrack_info ctinfo;
1156 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1156 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1157 struct nf_conn_help *help = nfct_help(ct); 1157 struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
1158 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 1158 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
1159 unsigned int matchoff, matchlen; 1159 unsigned int matchoff, matchlen;
1160 struct nf_conntrack_expect *exp; 1160 struct nf_conntrack_expect *exp;
@@ -1235,7 +1235,7 @@ static int process_register_request(struct sk_buff *skb, unsigned int dataoff,
1235 1235
1236store_cseq: 1236store_cseq:
1237 if (ret == NF_ACCEPT) 1237 if (ret == NF_ACCEPT)
1238 help->help.ct_sip_info.register_cseq = cseq; 1238 ct_sip_info->register_cseq = cseq;
1239 return ret; 1239 return ret;
1240} 1240}
1241 1241
@@ -1245,7 +1245,7 @@ static int process_register_response(struct sk_buff *skb, unsigned int dataoff,
1245{ 1245{
1246 enum ip_conntrack_info ctinfo; 1246 enum ip_conntrack_info ctinfo;
1247 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1247 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1248 struct nf_conn_help *help = nfct_help(ct); 1248 struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
1249 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 1249 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
1250 union nf_inet_addr addr; 1250 union nf_inet_addr addr;
1251 __be16 port; 1251 __be16 port;
@@ -1262,7 +1262,7 @@ static int process_register_response(struct sk_buff *skb, unsigned int dataoff,
1262 * responses, so we store the sequence number of the last valid 1262 * responses, so we store the sequence number of the last valid
1263 * request and compare it here. 1263 * request and compare it here.
1264 */ 1264 */
1265 if (help->help.ct_sip_info.register_cseq != cseq) 1265 if (ct_sip_info->register_cseq != cseq)
1266 return NF_ACCEPT; 1266 return NF_ACCEPT;
1267 1267
1268 if (code >= 100 && code <= 199) 1268 if (code >= 100 && code <= 199)
@@ -1556,7 +1556,6 @@ static void nf_conntrack_sip_fini(void)
1556static int __init nf_conntrack_sip_init(void) 1556static int __init nf_conntrack_sip_init(void)
1557{ 1557{
1558 int i, j, ret; 1558 int i, j, ret;
1559 char *tmpname;
1560 1559
1561 if (ports_c == 0) 1560 if (ports_c == 0)
1562 ports[ports_c++] = SIP_PORT; 1561 ports[ports_c++] = SIP_PORT;
@@ -1579,17 +1578,16 @@ static int __init nf_conntrack_sip_init(void)
1579 sip[i][3].help = sip_help_tcp; 1578 sip[i][3].help = sip_help_tcp;
1580 1579
1581 for (j = 0; j < ARRAY_SIZE(sip[i]); j++) { 1580 for (j = 0; j < ARRAY_SIZE(sip[i]); j++) {
1581 sip[i][j].data_len = sizeof(struct nf_ct_sip_master);
1582 sip[i][j].tuple.src.u.udp.port = htons(ports[i]); 1582 sip[i][j].tuple.src.u.udp.port = htons(ports[i]);
1583 sip[i][j].expect_policy = sip_exp_policy; 1583 sip[i][j].expect_policy = sip_exp_policy;
1584 sip[i][j].expect_class_max = SIP_EXPECT_MAX; 1584 sip[i][j].expect_class_max = SIP_EXPECT_MAX;
1585 sip[i][j].me = THIS_MODULE; 1585 sip[i][j].me = THIS_MODULE;
1586 1586
1587 tmpname = &sip_names[i][j][0];
1588 if (ports[i] == SIP_PORT) 1587 if (ports[i] == SIP_PORT)
1589 sprintf(tmpname, "sip"); 1588 sprintf(sip_names[i][j], "sip");
1590 else 1589 else
1591 sprintf(tmpname, "sip-%u", i); 1590 sprintf(sip_names[i][j], "sip-%u", i);
1592 sip[i][j].name = tmpname;
1593 1591
1594 pr_debug("port #%u: %u\n", i, ports[i]); 1592 pr_debug("port #%u: %u\n", i, ports[i]);
1595 1593
diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c
index 75466fd72f4..81fc61c0526 100644
--- a/net/netfilter/nf_conntrack_tftp.c
+++ b/net/netfilter/nf_conntrack_tftp.c
@@ -92,7 +92,6 @@ static int tftp_help(struct sk_buff *skb,
92} 92}
93 93
94static struct nf_conntrack_helper tftp[MAX_PORTS][2] __read_mostly; 94static struct nf_conntrack_helper tftp[MAX_PORTS][2] __read_mostly;
95static char tftp_names[MAX_PORTS][2][sizeof("tftp-65535")] __read_mostly;
96 95
97static const struct nf_conntrack_expect_policy tftp_exp_policy = { 96static const struct nf_conntrack_expect_policy tftp_exp_policy = {
98 .max_expected = 1, 97 .max_expected = 1,
@@ -112,7 +111,6 @@ static void nf_conntrack_tftp_fini(void)
112static int __init nf_conntrack_tftp_init(void) 111static int __init nf_conntrack_tftp_init(void)
113{ 112{
114 int i, j, ret; 113 int i, j, ret;
115 char *tmpname;
116 114
117 if (ports_c == 0) 115 if (ports_c == 0)
118 ports[ports_c++] = TFTP_PORT; 116 ports[ports_c++] = TFTP_PORT;
@@ -129,12 +127,10 @@ static int __init nf_conntrack_tftp_init(void)
129 tftp[i][j].me = THIS_MODULE; 127 tftp[i][j].me = THIS_MODULE;
130 tftp[i][j].help = tftp_help; 128 tftp[i][j].help = tftp_help;
131 129
132 tmpname = &tftp_names[i][j][0];
133 if (ports[i] == TFTP_PORT) 130 if (ports[i] == TFTP_PORT)
134 sprintf(tmpname, "tftp"); 131 sprintf(tftp[i][j].name, "tftp");
135 else 132 else
136 sprintf(tmpname, "tftp-%u", i); 133 sprintf(tftp[i][j].name, "tftp-%u", i);
137 tftp[i][j].name = tmpname;
138 134
139 ret = nf_conntrack_helper_register(&tftp[i][j]); 135 ret = nf_conntrack_helper_register(&tftp[i][j]);
140 if (ret) { 136 if (ret) {
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 791d56bbd74..a26503342e7 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -39,6 +39,15 @@ static char __initdata nfversion[] = "0.30";
39static const struct nfnetlink_subsystem __rcu *subsys_table[NFNL_SUBSYS_COUNT]; 39static const struct nfnetlink_subsystem __rcu *subsys_table[NFNL_SUBSYS_COUNT];
40static DEFINE_MUTEX(nfnl_mutex); 40static DEFINE_MUTEX(nfnl_mutex);
41 41
42static const int nfnl_group2type[NFNLGRP_MAX+1] = {
43 [NFNLGRP_CONNTRACK_NEW] = NFNL_SUBSYS_CTNETLINK,
44 [NFNLGRP_CONNTRACK_UPDATE] = NFNL_SUBSYS_CTNETLINK,
45 [NFNLGRP_CONNTRACK_DESTROY] = NFNL_SUBSYS_CTNETLINK,
46 [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP,
47 [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP,
48 [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP,
49};
50
42void nfnl_lock(void) 51void nfnl_lock(void)
43{ 52{
44 mutex_lock(&nfnl_mutex); 53 mutex_lock(&nfnl_mutex);
@@ -186,9 +195,11 @@ replay:
186 lockdep_is_held(&nfnl_mutex)) != ss || 195 lockdep_is_held(&nfnl_mutex)) != ss ||
187 nfnetlink_find_client(type, ss) != nc) 196 nfnetlink_find_client(type, ss) != nc)
188 err = -EAGAIN; 197 err = -EAGAIN;
189 else 198 else if (nc->call)
190 err = nc->call(net->nfnl, skb, nlh, 199 err = nc->call(net->nfnl, skb, nlh,
191 (const struct nlattr **)cda); 200 (const struct nlattr **)cda);
201 else
202 err = -EINVAL;
192 nfnl_unlock(); 203 nfnl_unlock();
193 } 204 }
194 if (err == -EAGAIN) 205 if (err == -EAGAIN)
@@ -202,12 +213,35 @@ static void nfnetlink_rcv(struct sk_buff *skb)
202 netlink_rcv_skb(skb, &nfnetlink_rcv_msg); 213 netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
203} 214}
204 215
216#ifdef CONFIG_MODULES
217static void nfnetlink_bind(int group)
218{
219 const struct nfnetlink_subsystem *ss;
220 int type = nfnl_group2type[group];
221
222 rcu_read_lock();
223 ss = nfnetlink_get_subsys(type);
224 if (!ss) {
225 rcu_read_unlock();
226 request_module("nfnetlink-subsys-%d", type);
227 return;
228 }
229 rcu_read_unlock();
230}
231#endif
232
205static int __net_init nfnetlink_net_init(struct net *net) 233static int __net_init nfnetlink_net_init(struct net *net)
206{ 234{
207 struct sock *nfnl; 235 struct sock *nfnl;
236 struct netlink_kernel_cfg cfg = {
237 .groups = NFNLGRP_MAX,
238 .input = nfnetlink_rcv,
239#ifdef CONFIG_MODULES
240 .bind = nfnetlink_bind,
241#endif
242 };
208 243
209 nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, NFNLGRP_MAX, 244 nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, THIS_MODULE, &cfg);
210 nfnetlink_rcv, NULL, THIS_MODULE);
211 if (!nfnl) 245 if (!nfnl)
212 return -ENOMEM; 246 return -ENOMEM;
213 net->nfnl_stash = nfnl; 247 net->nfnl_stash = nfnl;
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
new file mode 100644
index 00000000000..d6836193d47
--- /dev/null
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -0,0 +1,672 @@
1/*
2 * (C) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation (or any later at your option).
7 *
8 * This software has been sponsored by Vyatta Inc. <http://www.vyatta.com>
9 */
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/skbuff.h>
14#include <linux/netlink.h>
15#include <linux/rculist.h>
16#include <linux/slab.h>
17#include <linux/types.h>
18#include <linux/list.h>
19#include <linux/errno.h>
20#include <net/netlink.h>
21#include <net/sock.h>
22
23#include <net/netfilter/nf_conntrack_helper.h>
24#include <net/netfilter/nf_conntrack_expect.h>
25#include <net/netfilter/nf_conntrack_ecache.h>
26
27#include <linux/netfilter/nfnetlink.h>
28#include <linux/netfilter/nfnetlink_conntrack.h>
29#include <linux/netfilter/nfnetlink_cthelper.h>
30
31MODULE_LICENSE("GPL");
32MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
33MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers");
34
35static int
36nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff,
37 struct nf_conn *ct, enum ip_conntrack_info ctinfo)
38{
39 const struct nf_conn_help *help;
40 struct nf_conntrack_helper *helper;
41
42 help = nfct_help(ct);
43 if (help == NULL)
44 return NF_DROP;
45
46 /* rcu_read_lock()ed by nf_hook_slow */
47 helper = rcu_dereference(help->helper);
48 if (helper == NULL)
49 return NF_DROP;
50
51 /* This is an user-space helper not yet configured, skip. */
52 if ((helper->flags &
53 (NF_CT_HELPER_F_USERSPACE | NF_CT_HELPER_F_CONFIGURED)) ==
54 NF_CT_HELPER_F_USERSPACE)
55 return NF_ACCEPT;
56
57 /* If the user-space helper is not available, don't block traffic. */
58 return NF_QUEUE_NR(helper->queue_num) | NF_VERDICT_FLAG_QUEUE_BYPASS;
59}
60
61static const struct nla_policy nfnl_cthelper_tuple_pol[NFCTH_TUPLE_MAX+1] = {
62 [NFCTH_TUPLE_L3PROTONUM] = { .type = NLA_U16, },
63 [NFCTH_TUPLE_L4PROTONUM] = { .type = NLA_U8, },
64};
65
66static int
67nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
68 const struct nlattr *attr)
69{
70 struct nlattr *tb[NFCTH_TUPLE_MAX+1];
71
72 nla_parse_nested(tb, NFCTH_TUPLE_MAX, attr, nfnl_cthelper_tuple_pol);
73
74 if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM])
75 return -EINVAL;
76
77 tuple->src.l3num = ntohs(nla_get_u16(tb[NFCTH_TUPLE_L3PROTONUM]));
78 tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]);
79
80 return 0;
81}
82
83static int
84nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
85{
86 const struct nf_conn_help *help = nfct_help(ct);
87
88 if (help->helper->data_len == 0)
89 return -EINVAL;
90
91 memcpy(&help->data, nla_data(attr), help->helper->data_len);
92 return 0;
93}
94
95static int
96nfnl_cthelper_to_nlattr(struct sk_buff *skb, const struct nf_conn *ct)
97{
98 const struct nf_conn_help *help = nfct_help(ct);
99
100 if (help->helper->data_len &&
101 nla_put(skb, CTA_HELP_INFO, help->helper->data_len, &help->data))
102 goto nla_put_failure;
103
104 return 0;
105
106nla_put_failure:
107 return -ENOSPC;
108}
109
110static const struct nla_policy nfnl_cthelper_expect_pol[NFCTH_POLICY_MAX+1] = {
111 [NFCTH_POLICY_NAME] = { .type = NLA_NUL_STRING,
112 .len = NF_CT_HELPER_NAME_LEN-1 },
113 [NFCTH_POLICY_EXPECT_MAX] = { .type = NLA_U32, },
114 [NFCTH_POLICY_EXPECT_TIMEOUT] = { .type = NLA_U32, },
115};
116
117static int
118nfnl_cthelper_expect_policy(struct nf_conntrack_expect_policy *expect_policy,
119 const struct nlattr *attr)
120{
121 struct nlattr *tb[NFCTH_POLICY_MAX+1];
122
123 nla_parse_nested(tb, NFCTH_POLICY_MAX, attr, nfnl_cthelper_expect_pol);
124
125 if (!tb[NFCTH_POLICY_NAME] ||
126 !tb[NFCTH_POLICY_EXPECT_MAX] ||
127 !tb[NFCTH_POLICY_EXPECT_TIMEOUT])
128 return -EINVAL;
129
130 strncpy(expect_policy->name,
131 nla_data(tb[NFCTH_POLICY_NAME]), NF_CT_HELPER_NAME_LEN);
132 expect_policy->max_expected =
133 ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
134 expect_policy->timeout =
135 ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT]));
136
137 return 0;
138}
139
140static const struct nla_policy
141nfnl_cthelper_expect_policy_set[NFCTH_POLICY_SET_MAX+1] = {
142 [NFCTH_POLICY_SET_NUM] = { .type = NLA_U32, },
143};
144
145static int
146nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
147 const struct nlattr *attr)
148{
149 int i, ret;
150 struct nf_conntrack_expect_policy *expect_policy;
151 struct nlattr *tb[NFCTH_POLICY_SET_MAX+1];
152
153 nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
154 nfnl_cthelper_expect_policy_set);
155
156 if (!tb[NFCTH_POLICY_SET_NUM])
157 return -EINVAL;
158
159 helper->expect_class_max =
160 ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
161
162 if (helper->expect_class_max != 0 &&
163 helper->expect_class_max > NF_CT_MAX_EXPECT_CLASSES)
164 return -EOVERFLOW;
165
166 expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) *
167 helper->expect_class_max, GFP_KERNEL);
168 if (expect_policy == NULL)
169 return -ENOMEM;
170
171 for (i=0; i<helper->expect_class_max; i++) {
172 if (!tb[NFCTH_POLICY_SET+i])
173 goto err;
174
175 ret = nfnl_cthelper_expect_policy(&expect_policy[i],
176 tb[NFCTH_POLICY_SET+i]);
177 if (ret < 0)
178 goto err;
179 }
180 helper->expect_policy = expect_policy;
181 return 0;
182err:
183 kfree(expect_policy);
184 return -EINVAL;
185}
186
187static int
188nfnl_cthelper_create(const struct nlattr * const tb[],
189 struct nf_conntrack_tuple *tuple)
190{
191 struct nf_conntrack_helper *helper;
192 int ret;
193
194 if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN])
195 return -EINVAL;
196
197 helper = kzalloc(sizeof(struct nf_conntrack_helper), GFP_KERNEL);
198 if (helper == NULL)
199 return -ENOMEM;
200
201 ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]);
202 if (ret < 0)
203 goto err;
204
205 strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
206 helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
207 helper->flags |= NF_CT_HELPER_F_USERSPACE;
208 memcpy(&helper->tuple, tuple, sizeof(struct nf_conntrack_tuple));
209
210 helper->me = THIS_MODULE;
211 helper->help = nfnl_userspace_cthelper;
212 helper->from_nlattr = nfnl_cthelper_from_nlattr;
213 helper->to_nlattr = nfnl_cthelper_to_nlattr;
214
215 /* Default to queue number zero, this can be updated at any time. */
216 if (tb[NFCTH_QUEUE_NUM])
217 helper->queue_num = ntohl(nla_get_be32(tb[NFCTH_QUEUE_NUM]));
218
219 if (tb[NFCTH_STATUS]) {
220 int status = ntohl(nla_get_be32(tb[NFCTH_STATUS]));
221
222 switch(status) {
223 case NFCT_HELPER_STATUS_ENABLED:
224 helper->flags |= NF_CT_HELPER_F_CONFIGURED;
225 break;
226 case NFCT_HELPER_STATUS_DISABLED:
227 helper->flags &= ~NF_CT_HELPER_F_CONFIGURED;
228 break;
229 }
230 }
231
232 ret = nf_conntrack_helper_register(helper);
233 if (ret < 0)
234 goto err;
235
236 return 0;
237err:
238 kfree(helper);
239 return ret;
240}
241
242static int
243nfnl_cthelper_update(const struct nlattr * const tb[],
244 struct nf_conntrack_helper *helper)
245{
246 int ret;
247
248 if (tb[NFCTH_PRIV_DATA_LEN])
249 return -EBUSY;
250
251 if (tb[NFCTH_POLICY]) {
252 ret = nfnl_cthelper_parse_expect_policy(helper,
253 tb[NFCTH_POLICY]);
254 if (ret < 0)
255 return ret;
256 }
257 if (tb[NFCTH_QUEUE_NUM])
258 helper->queue_num = ntohl(nla_get_be32(tb[NFCTH_QUEUE_NUM]));
259
260 if (tb[NFCTH_STATUS]) {
261 int status = ntohl(nla_get_be32(tb[NFCTH_STATUS]));
262
263 switch(status) {
264 case NFCT_HELPER_STATUS_ENABLED:
265 helper->flags |= NF_CT_HELPER_F_CONFIGURED;
266 break;
267 case NFCT_HELPER_STATUS_DISABLED:
268 helper->flags &= ~NF_CT_HELPER_F_CONFIGURED;
269 break;
270 }
271 }
272 return 0;
273}
274
275static int
276nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
277 const struct nlmsghdr *nlh, const struct nlattr * const tb[])
278{
279 const char *helper_name;
280 struct nf_conntrack_helper *cur, *helper = NULL;
281 struct nf_conntrack_tuple tuple;
282 struct hlist_node *n;
283 int ret = 0, i;
284
285 if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
286 return -EINVAL;
287
288 helper_name = nla_data(tb[NFCTH_NAME]);
289
290 ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]);
291 if (ret < 0)
292 return ret;
293
294 rcu_read_lock();
295 for (i = 0; i < nf_ct_helper_hsize && !helper; i++) {
296 hlist_for_each_entry_rcu(cur, n, &nf_ct_helper_hash[i], hnode) {
297
298 /* skip non-userspace conntrack helpers. */
299 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
300 continue;
301
302 if (strncmp(cur->name, helper_name,
303 NF_CT_HELPER_NAME_LEN) != 0)
304 continue;
305
306 if ((tuple.src.l3num != cur->tuple.src.l3num ||
307 tuple.dst.protonum != cur->tuple.dst.protonum))
308 continue;
309
310 if (nlh->nlmsg_flags & NLM_F_EXCL) {
311 ret = -EEXIST;
312 goto err;
313 }
314 helper = cur;
315 break;
316 }
317 }
318 rcu_read_unlock();
319
320 if (helper == NULL)
321 ret = nfnl_cthelper_create(tb, &tuple);
322 else
323 ret = nfnl_cthelper_update(tb, helper);
324
325 return ret;
326err:
327 rcu_read_unlock();
328 return ret;
329}
330
331static int
332nfnl_cthelper_dump_tuple(struct sk_buff *skb,
333 struct nf_conntrack_helper *helper)
334{
335 struct nlattr *nest_parms;
336
337 nest_parms = nla_nest_start(skb, NFCTH_TUPLE | NLA_F_NESTED);
338 if (nest_parms == NULL)
339 goto nla_put_failure;
340
341 if (nla_put_be16(skb, NFCTH_TUPLE_L3PROTONUM,
342 htons(helper->tuple.src.l3num)))
343 goto nla_put_failure;
344
345 if (nla_put_u8(skb, NFCTH_TUPLE_L4PROTONUM, helper->tuple.dst.protonum))
346 goto nla_put_failure;
347
348 nla_nest_end(skb, nest_parms);
349 return 0;
350
351nla_put_failure:
352 return -1;
353}
354
355static int
356nfnl_cthelper_dump_policy(struct sk_buff *skb,
357 struct nf_conntrack_helper *helper)
358{
359 int i;
360 struct nlattr *nest_parms1, *nest_parms2;
361
362 nest_parms1 = nla_nest_start(skb, NFCTH_POLICY | NLA_F_NESTED);
363 if (nest_parms1 == NULL)
364 goto nla_put_failure;
365
366 if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM,
367 htonl(helper->expect_class_max)))
368 goto nla_put_failure;
369
370 for (i=0; i<helper->expect_class_max; i++) {
371 nest_parms2 = nla_nest_start(skb,
372 (NFCTH_POLICY_SET+i) | NLA_F_NESTED);
373 if (nest_parms2 == NULL)
374 goto nla_put_failure;
375
376 if (nla_put_string(skb, NFCTH_POLICY_NAME,
377 helper->expect_policy[i].name))
378 goto nla_put_failure;
379
380 if (nla_put_be32(skb, NFCTH_POLICY_EXPECT_MAX,
381 htonl(helper->expect_policy[i].max_expected)))
382 goto nla_put_failure;
383
384 if (nla_put_be32(skb, NFCTH_POLICY_EXPECT_TIMEOUT,
385 htonl(helper->expect_policy[i].timeout)))
386 goto nla_put_failure;
387
388 nla_nest_end(skb, nest_parms2);
389 }
390 nla_nest_end(skb, nest_parms1);
391 return 0;
392
393nla_put_failure:
394 return -1;
395}
396
397static int
398nfnl_cthelper_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
399 int event, struct nf_conntrack_helper *helper)
400{
401 struct nlmsghdr *nlh;
402 struct nfgenmsg *nfmsg;
403 unsigned int flags = pid ? NLM_F_MULTI : 0;
404 int status;
405
406 event |= NFNL_SUBSYS_CTHELPER << 8;
407 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
408 if (nlh == NULL)
409 goto nlmsg_failure;
410
411 nfmsg = nlmsg_data(nlh);
412 nfmsg->nfgen_family = AF_UNSPEC;
413 nfmsg->version = NFNETLINK_V0;
414 nfmsg->res_id = 0;
415
416 if (nla_put_string(skb, NFCTH_NAME, helper->name))
417 goto nla_put_failure;
418
419 if (nla_put_be32(skb, NFCTH_QUEUE_NUM, htonl(helper->queue_num)))
420 goto nla_put_failure;
421
422 if (nfnl_cthelper_dump_tuple(skb, helper) < 0)
423 goto nla_put_failure;
424
425 if (nfnl_cthelper_dump_policy(skb, helper) < 0)
426 goto nla_put_failure;
427
428 if (nla_put_be32(skb, NFCTH_PRIV_DATA_LEN, htonl(helper->data_len)))
429 goto nla_put_failure;
430
431 if (helper->flags & NF_CT_HELPER_F_CONFIGURED)
432 status = NFCT_HELPER_STATUS_ENABLED;
433 else
434 status = NFCT_HELPER_STATUS_DISABLED;
435
436 if (nla_put_be32(skb, NFCTH_STATUS, htonl(status)))
437 goto nla_put_failure;
438
439 nlmsg_end(skb, nlh);
440 return skb->len;
441
442nlmsg_failure:
443nla_put_failure:
444 nlmsg_cancel(skb, nlh);
445 return -1;
446}
447
448static int
449nfnl_cthelper_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
450{
451 struct nf_conntrack_helper *cur, *last;
452 struct hlist_node *n;
453
454 rcu_read_lock();
455 last = (struct nf_conntrack_helper *)cb->args[1];
456 for (; cb->args[0] < nf_ct_helper_hsize; cb->args[0]++) {
457restart:
458 hlist_for_each_entry_rcu(cur, n,
459 &nf_ct_helper_hash[cb->args[0]], hnode) {
460
461 /* skip non-userspace conntrack helpers. */
462 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
463 continue;
464
465 if (cb->args[1]) {
466 if (cur != last)
467 continue;
468 cb->args[1] = 0;
469 }
470 if (nfnl_cthelper_fill_info(skb,
471 NETLINK_CB(cb->skb).pid,
472 cb->nlh->nlmsg_seq,
473 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
474 NFNL_MSG_CTHELPER_NEW, cur) < 0) {
475 cb->args[1] = (unsigned long)cur;
476 goto out;
477 }
478 }
479 }
480 if (cb->args[1]) {
481 cb->args[1] = 0;
482 goto restart;
483 }
484out:
485 rcu_read_unlock();
486 return skb->len;
487}
488
489static int
490nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
491 const struct nlmsghdr *nlh, const struct nlattr * const tb[])
492{
493 int ret = -ENOENT, i;
494 struct nf_conntrack_helper *cur;
495 struct hlist_node *n;
496 struct sk_buff *skb2;
497 char *helper_name = NULL;
498 struct nf_conntrack_tuple tuple;
499 bool tuple_set = false;
500
501 if (nlh->nlmsg_flags & NLM_F_DUMP) {
502 struct netlink_dump_control c = {
503 .dump = nfnl_cthelper_dump_table,
504 };
505 return netlink_dump_start(nfnl, skb, nlh, &c);
506 }
507
508 if (tb[NFCTH_NAME])
509 helper_name = nla_data(tb[NFCTH_NAME]);
510
511 if (tb[NFCTH_TUPLE]) {
512 ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]);
513 if (ret < 0)
514 return ret;
515
516 tuple_set = true;
517 }
518
519 for (i = 0; i < nf_ct_helper_hsize; i++) {
520 hlist_for_each_entry_rcu(cur, n, &nf_ct_helper_hash[i], hnode) {
521
522 /* skip non-userspace conntrack helpers. */
523 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
524 continue;
525
526 if (helper_name && strncmp(cur->name, helper_name,
527 NF_CT_HELPER_NAME_LEN) != 0) {
528 continue;
529 }
530 if (tuple_set &&
531 (tuple.src.l3num != cur->tuple.src.l3num ||
532 tuple.dst.protonum != cur->tuple.dst.protonum))
533 continue;
534
535 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
536 if (skb2 == NULL) {
537 ret = -ENOMEM;
538 break;
539 }
540
541 ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).pid,
542 nlh->nlmsg_seq,
543 NFNL_MSG_TYPE(nlh->nlmsg_type),
544 NFNL_MSG_CTHELPER_NEW, cur);
545 if (ret <= 0) {
546 kfree_skb(skb2);
547 break;
548 }
549
550 ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).pid,
551 MSG_DONTWAIT);
552 if (ret > 0)
553 ret = 0;
554
555 /* this avoids a loop in nfnetlink. */
556 return ret == -EAGAIN ? -ENOBUFS : ret;
557 }
558 }
559 return ret;
560}
561
562static int
563nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb,
564 const struct nlmsghdr *nlh, const struct nlattr * const tb[])
565{
566 char *helper_name = NULL;
567 struct nf_conntrack_helper *cur;
568 struct hlist_node *n, *tmp;
569 struct nf_conntrack_tuple tuple;
570 bool tuple_set = false, found = false;
571 int i, j = 0, ret;
572
573 if (tb[NFCTH_NAME])
574 helper_name = nla_data(tb[NFCTH_NAME]);
575
576 if (tb[NFCTH_TUPLE]) {
577 ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]);
578 if (ret < 0)
579 return ret;
580
581 tuple_set = true;
582 }
583
584 for (i = 0; i < nf_ct_helper_hsize; i++) {
585 hlist_for_each_entry_safe(cur, n, tmp, &nf_ct_helper_hash[i],
586 hnode) {
587 /* skip non-userspace conntrack helpers. */
588 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
589 continue;
590
591 j++;
592
593 if (helper_name && strncmp(cur->name, helper_name,
594 NF_CT_HELPER_NAME_LEN) != 0) {
595 continue;
596 }
597 if (tuple_set &&
598 (tuple.src.l3num != cur->tuple.src.l3num ||
599 tuple.dst.protonum != cur->tuple.dst.protonum))
600 continue;
601
602 found = true;
603 nf_conntrack_helper_unregister(cur);
604 }
605 }
606 /* Make sure we return success if we flush and there is no helpers */
607 return (found || j == 0) ? 0 : -ENOENT;
608}
609
610static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = {
611 [NFCTH_NAME] = { .type = NLA_NUL_STRING,
612 .len = NF_CT_HELPER_NAME_LEN-1 },
613 [NFCTH_QUEUE_NUM] = { .type = NLA_U32, },
614};
615
616static const struct nfnl_callback nfnl_cthelper_cb[NFNL_MSG_CTHELPER_MAX] = {
617 [NFNL_MSG_CTHELPER_NEW] = { .call = nfnl_cthelper_new,
618 .attr_count = NFCTH_MAX,
619 .policy = nfnl_cthelper_policy },
620 [NFNL_MSG_CTHELPER_GET] = { .call = nfnl_cthelper_get,
621 .attr_count = NFCTH_MAX,
622 .policy = nfnl_cthelper_policy },
623 [NFNL_MSG_CTHELPER_DEL] = { .call = nfnl_cthelper_del,
624 .attr_count = NFCTH_MAX,
625 .policy = nfnl_cthelper_policy },
626};
627
628static const struct nfnetlink_subsystem nfnl_cthelper_subsys = {
629 .name = "cthelper",
630 .subsys_id = NFNL_SUBSYS_CTHELPER,
631 .cb_count = NFNL_MSG_CTHELPER_MAX,
632 .cb = nfnl_cthelper_cb,
633};
634
635MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTHELPER);
636
637static int __init nfnl_cthelper_init(void)
638{
639 int ret;
640
641 ret = nfnetlink_subsys_register(&nfnl_cthelper_subsys);
642 if (ret < 0) {
643 pr_err("nfnl_cthelper: cannot register with nfnetlink.\n");
644 goto err_out;
645 }
646 return 0;
647err_out:
648 return ret;
649}
650
651static void __exit nfnl_cthelper_exit(void)
652{
653 struct nf_conntrack_helper *cur;
654 struct hlist_node *n, *tmp;
655 int i;
656
657 nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
658
659 for (i=0; i<nf_ct_helper_hsize; i++) {
660 hlist_for_each_entry_safe(cur, n, tmp, &nf_ct_helper_hash[i],
661 hnode) {
662 /* skip non-userspace conntrack helpers. */
663 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
664 continue;
665
666 nf_conntrack_helper_unregister(cur);
667 }
668 }
669}
670
671module_init(nfnl_cthelper_init);
672module_exit(nfnl_cthelper_exit);
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 3e655288d1d..cdecbc8fe96 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -49,8 +49,9 @@ static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = {
49 49
50static int 50static int
51ctnl_timeout_parse_policy(struct ctnl_timeout *timeout, 51ctnl_timeout_parse_policy(struct ctnl_timeout *timeout,
52 struct nf_conntrack_l4proto *l4proto, 52 struct nf_conntrack_l4proto *l4proto,
53 const struct nlattr *attr) 53 struct net *net,
54 const struct nlattr *attr)
54{ 55{
55 int ret = 0; 56 int ret = 0;
56 57
@@ -60,7 +61,8 @@ ctnl_timeout_parse_policy(struct ctnl_timeout *timeout,
60 nla_parse_nested(tb, l4proto->ctnl_timeout.nlattr_max, 61 nla_parse_nested(tb, l4proto->ctnl_timeout.nlattr_max,
61 attr, l4proto->ctnl_timeout.nla_policy); 62 attr, l4proto->ctnl_timeout.nla_policy);
62 63
63 ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, &timeout->data); 64 ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, net,
65 &timeout->data);
64 } 66 }
65 return ret; 67 return ret;
66} 68}
@@ -74,6 +76,7 @@ cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
74 __u8 l4num; 76 __u8 l4num;
75 struct nf_conntrack_l4proto *l4proto; 77 struct nf_conntrack_l4proto *l4proto;
76 struct ctnl_timeout *timeout, *matching = NULL; 78 struct ctnl_timeout *timeout, *matching = NULL;
79 struct net *net = sock_net(skb->sk);
77 char *name; 80 char *name;
78 int ret; 81 int ret;
79 82
@@ -117,7 +120,7 @@ cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
117 goto err_proto_put; 120 goto err_proto_put;
118 } 121 }
119 122
120 ret = ctnl_timeout_parse_policy(matching, l4proto, 123 ret = ctnl_timeout_parse_policy(matching, l4proto, net,
121 cda[CTA_TIMEOUT_DATA]); 124 cda[CTA_TIMEOUT_DATA]);
122 return ret; 125 return ret;
123 } 126 }
@@ -132,7 +135,7 @@ cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
132 goto err_proto_put; 135 goto err_proto_put;
133 } 136 }
134 137
135 ret = ctnl_timeout_parse_policy(timeout, l4proto, 138 ret = ctnl_timeout_parse_policy(timeout, l4proto, net,
136 cda[CTA_TIMEOUT_DATA]); 139 cda[CTA_TIMEOUT_DATA]);
137 if (ret < 0) 140 if (ret < 0)
138 goto err; 141 goto err;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 3c3cfc0cc9b..169ab59ed9d 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -326,18 +326,20 @@ __nfulnl_send(struct nfulnl_instance *inst)
326{ 326{
327 int status = -1; 327 int status = -1;
328 328
329 if (inst->qlen > 1) 329 if (inst->qlen > 1) {
330 NLMSG_PUT(inst->skb, 0, 0, 330 struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0,
331 NLMSG_DONE, 331 NLMSG_DONE,
332 sizeof(struct nfgenmsg)); 332 sizeof(struct nfgenmsg),
333 333 0);
334 if (!nlh)
335 goto out;
336 }
334 status = nfnetlink_unicast(inst->skb, &init_net, inst->peer_pid, 337 status = nfnetlink_unicast(inst->skb, &init_net, inst->peer_pid,
335 MSG_DONTWAIT); 338 MSG_DONTWAIT);
336 339
337 inst->qlen = 0; 340 inst->qlen = 0;
338 inst->skb = NULL; 341 inst->skb = NULL;
339 342out:
340nlmsg_failure:
341 return status; 343 return status;
342} 344}
343 345
@@ -380,10 +382,12 @@ __build_packet_message(struct nfulnl_instance *inst,
380 struct nfgenmsg *nfmsg; 382 struct nfgenmsg *nfmsg;
381 sk_buff_data_t old_tail = inst->skb->tail; 383 sk_buff_data_t old_tail = inst->skb->tail;
382 384
383 nlh = NLMSG_PUT(inst->skb, 0, 0, 385 nlh = nlmsg_put(inst->skb, 0, 0,
384 NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET, 386 NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
385 sizeof(struct nfgenmsg)); 387 sizeof(struct nfgenmsg), 0);
386 nfmsg = NLMSG_DATA(nlh); 388 if (!nlh)
389 return -1;
390 nfmsg = nlmsg_data(nlh);
387 nfmsg->nfgen_family = pf; 391 nfmsg->nfgen_family = pf;
388 nfmsg->version = NFNETLINK_V0; 392 nfmsg->version = NFNETLINK_V0;
389 nfmsg->res_id = htons(inst->group_num); 393 nfmsg->res_id = htons(inst->group_num);
@@ -526,7 +530,7 @@ __build_packet_message(struct nfulnl_instance *inst,
526 530
527 if (skb_tailroom(inst->skb) < nla_total_size(data_len)) { 531 if (skb_tailroom(inst->skb) < nla_total_size(data_len)) {
528 printk(KERN_WARNING "nfnetlink_log: no tailroom!\n"); 532 printk(KERN_WARNING "nfnetlink_log: no tailroom!\n");
529 goto nlmsg_failure; 533 return -1;
530 } 534 }
531 535
532 nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len)); 536 nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len));
@@ -540,7 +544,6 @@ __build_packet_message(struct nfulnl_instance *inst,
540 nlh->nlmsg_len = inst->skb->tail - old_tail; 544 nlh->nlmsg_len = inst->skb->tail - old_tail;
541 return 0; 545 return 0;
542 546
543nlmsg_failure:
544nla_put_failure: 547nla_put_failure:
545 PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n"); 548 PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
546 return -1; 549 return -1;
@@ -745,7 +748,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
745 const struct nlmsghdr *nlh, 748 const struct nlmsghdr *nlh,
746 const struct nlattr * const nfula[]) 749 const struct nlattr * const nfula[])
747{ 750{
748 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); 751 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
749 u_int16_t group_num = ntohs(nfmsg->res_id); 752 u_int16_t group_num = ntohs(nfmsg->res_id);
750 struct nfulnl_instance *inst; 753 struct nfulnl_instance *inst;
751 struct nfulnl_msg_config_cmd *cmd = NULL; 754 struct nfulnl_msg_config_cmd *cmd = NULL;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue_core.c
index 4162437b836..c0496a55ad0 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -30,6 +30,7 @@
30#include <linux/list.h> 30#include <linux/list.h>
31#include <net/sock.h> 31#include <net/sock.h>
32#include <net/netfilter/nf_queue.h> 32#include <net/netfilter/nf_queue.h>
33#include <net/netfilter/nfnetlink_queue.h>
33 34
34#include <linux/atomic.h> 35#include <linux/atomic.h>
35 36
@@ -52,6 +53,7 @@ struct nfqnl_instance {
52 53
53 u_int16_t queue_num; /* number of this queue */ 54 u_int16_t queue_num; /* number of this queue */
54 u_int8_t copy_mode; 55 u_int8_t copy_mode;
56 u_int32_t flags; /* Set using NFQA_CFG_FLAGS */
55/* 57/*
56 * Following fields are dirtied for each queued packet, 58 * Following fields are dirtied for each queued packet,
57 * keep them in same cache line if possible. 59 * keep them in same cache line if possible.
@@ -232,6 +234,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
232 struct sk_buff *entskb = entry->skb; 234 struct sk_buff *entskb = entry->skb;
233 struct net_device *indev; 235 struct net_device *indev;
234 struct net_device *outdev; 236 struct net_device *outdev;
237 struct nf_conn *ct = NULL;
238 enum ip_conntrack_info uninitialized_var(ctinfo);
235 239
236 size = NLMSG_SPACE(sizeof(struct nfgenmsg)) 240 size = NLMSG_SPACE(sizeof(struct nfgenmsg))
237 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) 241 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
@@ -265,16 +269,22 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
265 break; 269 break;
266 } 270 }
267 271
272 if (queue->flags & NFQA_CFG_F_CONNTRACK)
273 ct = nfqnl_ct_get(entskb, &size, &ctinfo);
268 274
269 skb = alloc_skb(size, GFP_ATOMIC); 275 skb = alloc_skb(size, GFP_ATOMIC);
270 if (!skb) 276 if (!skb)
271 goto nlmsg_failure; 277 return NULL;
272 278
273 old_tail = skb->tail; 279 old_tail = skb->tail;
274 nlh = NLMSG_PUT(skb, 0, 0, 280 nlh = nlmsg_put(skb, 0, 0,
275 NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, 281 NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
276 sizeof(struct nfgenmsg)); 282 sizeof(struct nfgenmsg), 0);
277 nfmsg = NLMSG_DATA(nlh); 283 if (!nlh) {
284 kfree_skb(skb);
285 return NULL;
286 }
287 nfmsg = nlmsg_data(nlh);
278 nfmsg->nfgen_family = entry->pf; 288 nfmsg->nfgen_family = entry->pf;
279 nfmsg->version = NFNETLINK_V0; 289 nfmsg->version = NFNETLINK_V0;
280 nfmsg->res_id = htons(queue->queue_num); 290 nfmsg->res_id = htons(queue->queue_num);
@@ -377,7 +387,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
377 387
378 if (skb_tailroom(skb) < nla_total_size(data_len)) { 388 if (skb_tailroom(skb) < nla_total_size(data_len)) {
379 printk(KERN_WARNING "nf_queue: no tailroom!\n"); 389 printk(KERN_WARNING "nf_queue: no tailroom!\n");
380 goto nlmsg_failure; 390 kfree_skb(skb);
391 return NULL;
381 } 392 }
382 393
383 nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len)); 394 nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len));
@@ -388,10 +399,12 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
388 BUG(); 399 BUG();
389 } 400 }
390 401
402 if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0)
403 goto nla_put_failure;
404
391 nlh->nlmsg_len = skb->tail - old_tail; 405 nlh->nlmsg_len = skb->tail - old_tail;
392 return skb; 406 return skb;
393 407
394nlmsg_failure:
395nla_put_failure: 408nla_put_failure:
396 if (skb) 409 if (skb)
397 kfree_skb(skb); 410 kfree_skb(skb);
@@ -406,6 +419,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
406 struct nfqnl_instance *queue; 419 struct nfqnl_instance *queue;
407 int err = -ENOBUFS; 420 int err = -ENOBUFS;
408 __be32 *packet_id_ptr; 421 __be32 *packet_id_ptr;
422 int failopen = 0;
409 423
410 /* rcu_read_lock()ed by nf_hook_slow() */ 424 /* rcu_read_lock()ed by nf_hook_slow() */
411 queue = instance_lookup(queuenum); 425 queue = instance_lookup(queuenum);
@@ -431,9 +445,14 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
431 goto err_out_free_nskb; 445 goto err_out_free_nskb;
432 } 446 }
433 if (queue->queue_total >= queue->queue_maxlen) { 447 if (queue->queue_total >= queue->queue_maxlen) {
434 queue->queue_dropped++; 448 if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
435 net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n", 449 failopen = 1;
436 queue->queue_total); 450 err = 0;
451 } else {
452 queue->queue_dropped++;
453 net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
454 queue->queue_total);
455 }
437 goto err_out_free_nskb; 456 goto err_out_free_nskb;
438 } 457 }
439 entry->id = ++queue->id_sequence; 458 entry->id = ++queue->id_sequence;
@@ -455,17 +474,17 @@ err_out_free_nskb:
455 kfree_skb(nskb); 474 kfree_skb(nskb);
456err_out_unlock: 475err_out_unlock:
457 spin_unlock_bh(&queue->lock); 476 spin_unlock_bh(&queue->lock);
477 if (failopen)
478 nf_reinject(entry, NF_ACCEPT);
458err_out: 479err_out:
459 return err; 480 return err;
460} 481}
461 482
462static int 483static int
463nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e) 484nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
464{ 485{
465 struct sk_buff *nskb; 486 struct sk_buff *nskb;
466 int diff;
467 487
468 diff = data_len - e->skb->len;
469 if (diff < 0) { 488 if (diff < 0) {
470 if (pskb_trim(e->skb, data_len)) 489 if (pskb_trim(e->skb, data_len))
471 return -ENOMEM; 490 return -ENOMEM;
@@ -623,6 +642,7 @@ static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
623 [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, 642 [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
624 [NFQA_MARK] = { .type = NLA_U32 }, 643 [NFQA_MARK] = { .type = NLA_U32 },
625 [NFQA_PAYLOAD] = { .type = NLA_UNSPEC }, 644 [NFQA_PAYLOAD] = { .type = NLA_UNSPEC },
645 [NFQA_CT] = { .type = NLA_UNSPEC },
626}; 646};
627 647
628static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = { 648static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
@@ -670,7 +690,7 @@ nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb,
670 const struct nlmsghdr *nlh, 690 const struct nlmsghdr *nlh,
671 const struct nlattr * const nfqa[]) 691 const struct nlattr * const nfqa[])
672{ 692{
673 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); 693 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
674 struct nf_queue_entry *entry, *tmp; 694 struct nf_queue_entry *entry, *tmp;
675 unsigned int verdict, maxid; 695 unsigned int verdict, maxid;
676 struct nfqnl_msg_verdict_hdr *vhdr; 696 struct nfqnl_msg_verdict_hdr *vhdr;
@@ -716,13 +736,15 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
716 const struct nlmsghdr *nlh, 736 const struct nlmsghdr *nlh,
717 const struct nlattr * const nfqa[]) 737 const struct nlattr * const nfqa[])
718{ 738{
719 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); 739 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
720 u_int16_t queue_num = ntohs(nfmsg->res_id); 740 u_int16_t queue_num = ntohs(nfmsg->res_id);
721 741
722 struct nfqnl_msg_verdict_hdr *vhdr; 742 struct nfqnl_msg_verdict_hdr *vhdr;
723 struct nfqnl_instance *queue; 743 struct nfqnl_instance *queue;
724 unsigned int verdict; 744 unsigned int verdict;
725 struct nf_queue_entry *entry; 745 struct nf_queue_entry *entry;
746 enum ip_conntrack_info uninitialized_var(ctinfo);
747 struct nf_conn *ct = NULL;
726 748
727 queue = instance_lookup(queue_num); 749 queue = instance_lookup(queue_num);
728 if (!queue) 750 if (!queue)
@@ -741,11 +763,22 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
741 if (entry == NULL) 763 if (entry == NULL)
742 return -ENOENT; 764 return -ENOENT;
743 765
766 rcu_read_lock();
767 if (nfqa[NFQA_CT] && (queue->flags & NFQA_CFG_F_CONNTRACK))
768 ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo);
769
744 if (nfqa[NFQA_PAYLOAD]) { 770 if (nfqa[NFQA_PAYLOAD]) {
771 u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
772 int diff = payload_len - entry->skb->len;
773
745 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]), 774 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
746 nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0) 775 payload_len, entry, diff) < 0)
747 verdict = NF_DROP; 776 verdict = NF_DROP;
777
778 if (ct)
779 nfqnl_ct_seq_adjust(skb, ct, ctinfo, diff);
748 } 780 }
781 rcu_read_unlock();
749 782
750 if (nfqa[NFQA_MARK]) 783 if (nfqa[NFQA_MARK])
751 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); 784 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
@@ -777,7 +810,7 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
777 const struct nlmsghdr *nlh, 810 const struct nlmsghdr *nlh,
778 const struct nlattr * const nfqa[]) 811 const struct nlattr * const nfqa[])
779{ 812{
780 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); 813 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
781 u_int16_t queue_num = ntohs(nfmsg->res_id); 814 u_int16_t queue_num = ntohs(nfmsg->res_id);
782 struct nfqnl_instance *queue; 815 struct nfqnl_instance *queue;
783 struct nfqnl_msg_config_cmd *cmd = NULL; 816 struct nfqnl_msg_config_cmd *cmd = NULL;
@@ -858,6 +891,36 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
858 spin_unlock_bh(&queue->lock); 891 spin_unlock_bh(&queue->lock);
859 } 892 }
860 893
894 if (nfqa[NFQA_CFG_FLAGS]) {
895 __u32 flags, mask;
896
897 if (!queue) {
898 ret = -ENODEV;
899 goto err_out_unlock;
900 }
901
902 if (!nfqa[NFQA_CFG_MASK]) {
903 /* A mask is needed to specify which flags are being
904 * changed.
905 */
906 ret = -EINVAL;
907 goto err_out_unlock;
908 }
909
910 flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS]));
911 mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK]));
912
913 if (flags >= NFQA_CFG_F_MAX) {
914 ret = -EOPNOTSUPP;
915 goto err_out_unlock;
916 }
917
918 spin_lock_bh(&queue->lock);
919 queue->flags &= ~mask;
920 queue->flags |= flags & mask;
921 spin_unlock_bh(&queue->lock);
922 }
923
861err_out_unlock: 924err_out_unlock:
862 rcu_read_unlock(); 925 rcu_read_unlock();
863 return ret; 926 return ret;
diff --git a/net/netfilter/nfnetlink_queue_ct.c b/net/netfilter/nfnetlink_queue_ct.c
new file mode 100644
index 00000000000..ab61d66bc0b
--- /dev/null
+++ b/net/netfilter/nfnetlink_queue_ct.c
@@ -0,0 +1,98 @@
1/*
2 * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 */
9
10#include <linux/skbuff.h>
11#include <linux/netfilter.h>
12#include <linux/netfilter/nfnetlink.h>
13#include <linux/netfilter/nfnetlink_queue.h>
14#include <net/netfilter/nf_conntrack.h>
15#include <net/netfilter/nfnetlink_queue.h>
16
17struct nf_conn *nfqnl_ct_get(struct sk_buff *entskb, size_t *size,
18 enum ip_conntrack_info *ctinfo)
19{
20 struct nfq_ct_hook *nfq_ct;
21 struct nf_conn *ct;
22
23 /* rcu_read_lock()ed by __nf_queue already. */
24 nfq_ct = rcu_dereference(nfq_ct_hook);
25 if (nfq_ct == NULL)
26 return NULL;
27
28 ct = nf_ct_get(entskb, ctinfo);
29 if (ct) {
30 if (!nf_ct_is_untracked(ct))
31 *size += nfq_ct->build_size(ct);
32 else
33 ct = NULL;
34 }
35 return ct;
36}
37
38struct nf_conn *
39nfqnl_ct_parse(const struct sk_buff *skb, const struct nlattr *attr,
40 enum ip_conntrack_info *ctinfo)
41{
42 struct nfq_ct_hook *nfq_ct;
43 struct nf_conn *ct;
44
45 /* rcu_read_lock()ed by __nf_queue already. */
46 nfq_ct = rcu_dereference(nfq_ct_hook);
47 if (nfq_ct == NULL)
48 return NULL;
49
50 ct = nf_ct_get(skb, ctinfo);
51 if (ct && !nf_ct_is_untracked(ct))
52 nfq_ct->parse(attr, ct);
53
54 return ct;
55}
56
57int nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct,
58 enum ip_conntrack_info ctinfo)
59{
60 struct nfq_ct_hook *nfq_ct;
61 struct nlattr *nest_parms;
62 u_int32_t tmp;
63
64 nfq_ct = rcu_dereference(nfq_ct_hook);
65 if (nfq_ct == NULL)
66 return 0;
67
68 nest_parms = nla_nest_start(skb, NFQA_CT | NLA_F_NESTED);
69 if (!nest_parms)
70 goto nla_put_failure;
71
72 if (nfq_ct->build(skb, ct) < 0)
73 goto nla_put_failure;
74
75 nla_nest_end(skb, nest_parms);
76
77 tmp = ctinfo;
78 if (nla_put_be32(skb, NFQA_CT_INFO, htonl(tmp)))
79 goto nla_put_failure;
80
81 return 0;
82
83nla_put_failure:
84 return -1;
85}
86
87void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
88 enum ip_conntrack_info ctinfo, int diff)
89{
90 struct nfq_ct_nat_hook *nfq_nat_ct;
91
92 nfq_nat_ct = rcu_dereference(nfq_ct_nat_hook);
93 if (nfq_nat_ct == NULL)
94 return;
95
96 if ((ct->status & IPS_NAT_MASK) && diff)
97 nfq_nat_ct->seq_adjust(skb, ct, ctinfo, diff);
98}
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index a51de9b052b..116018560c6 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -112,6 +112,8 @@ static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
112 goto err3; 112 goto err3;
113 113
114 if (info->helper[0]) { 114 if (info->helper[0]) {
115 struct nf_conntrack_helper *helper;
116
115 ret = -ENOENT; 117 ret = -ENOENT;
116 proto = xt_ct_find_proto(par); 118 proto = xt_ct_find_proto(par);
117 if (!proto) { 119 if (!proto) {
@@ -120,19 +122,21 @@ static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
120 goto err3; 122 goto err3;
121 } 123 }
122 124
123 ret = -ENOMEM;
124 help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
125 if (help == NULL)
126 goto err3;
127
128 ret = -ENOENT; 125 ret = -ENOENT;
129 help->helper = nf_conntrack_helper_try_module_get(info->helper, 126 helper = nf_conntrack_helper_try_module_get(info->helper,
130 par->family, 127 par->family,
131 proto); 128 proto);
132 if (help->helper == NULL) { 129 if (helper == NULL) {
133 pr_info("No such helper \"%s\"\n", info->helper); 130 pr_info("No such helper \"%s\"\n", info->helper);
134 goto err3; 131 goto err3;
135 } 132 }
133
134 ret = -ENOMEM;
135 help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL);
136 if (help == NULL)
137 goto err3;
138
139 help->helper = helper;
136 } 140 }
137 141
138 __set_bit(IPS_TEMPLATE_BIT, &ct->status); 142 __set_bit(IPS_TEMPLATE_BIT, &ct->status);
@@ -202,6 +206,8 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
202 goto err3; 206 goto err3;
203 207
204 if (info->helper[0]) { 208 if (info->helper[0]) {
209 struct nf_conntrack_helper *helper;
210
205 ret = -ENOENT; 211 ret = -ENOENT;
206 proto = xt_ct_find_proto(par); 212 proto = xt_ct_find_proto(par);
207 if (!proto) { 213 if (!proto) {
@@ -210,19 +216,21 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
210 goto err3; 216 goto err3;
211 } 217 }
212 218
213 ret = -ENOMEM;
214 help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
215 if (help == NULL)
216 goto err3;
217
218 ret = -ENOENT; 219 ret = -ENOENT;
219 help->helper = nf_conntrack_helper_try_module_get(info->helper, 220 helper = nf_conntrack_helper_try_module_get(info->helper,
220 par->family, 221 par->family,
221 proto); 222 proto);
222 if (help->helper == NULL) { 223 if (helper == NULL) {
223 pr_info("No such helper \"%s\"\n", info->helper); 224 pr_info("No such helper \"%s\"\n", info->helper);
224 goto err3; 225 goto err3;
225 } 226 }
227
228 ret = -ENOMEM;
229 help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL);
230 if (help == NULL)
231 goto err3;
232
233 help->helper = helper;
226 } 234 }
227 235
228#ifdef CONFIG_NF_CONNTRACK_TIMEOUT 236#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 95237c89607..7babe7d6871 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -41,26 +41,36 @@ nfqueue_tg(struct sk_buff *skb, const struct xt_action_param *par)
41static u32 hash_v4(const struct sk_buff *skb) 41static u32 hash_v4(const struct sk_buff *skb)
42{ 42{
43 const struct iphdr *iph = ip_hdr(skb); 43 const struct iphdr *iph = ip_hdr(skb);
44 __be32 ipaddr;
45 44
46 /* packets in either direction go into same queue */ 45 /* packets in either direction go into same queue */
47 ipaddr = iph->saddr ^ iph->daddr; 46 if (iph->saddr < iph->daddr)
47 return jhash_3words((__force u32)iph->saddr,
48 (__force u32)iph->daddr, iph->protocol, jhash_initval);
48 49
49 return jhash_2words((__force u32)ipaddr, iph->protocol, jhash_initval); 50 return jhash_3words((__force u32)iph->daddr,
51 (__force u32)iph->saddr, iph->protocol, jhash_initval);
50} 52}
51 53
52#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 54#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
53static u32 hash_v6(const struct sk_buff *skb) 55static u32 hash_v6(const struct sk_buff *skb)
54{ 56{
55 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 57 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
56 __be32 addr[4]; 58 u32 a, b, c;
59
60 if (ip6h->saddr.s6_addr32[3] < ip6h->daddr.s6_addr32[3]) {
61 a = (__force u32) ip6h->saddr.s6_addr32[3];
62 b = (__force u32) ip6h->daddr.s6_addr32[3];
63 } else {
64 b = (__force u32) ip6h->saddr.s6_addr32[3];
65 a = (__force u32) ip6h->daddr.s6_addr32[3];
66 }
57 67
58 addr[0] = ip6h->saddr.s6_addr32[0] ^ ip6h->daddr.s6_addr32[0]; 68 if (ip6h->saddr.s6_addr32[1] < ip6h->daddr.s6_addr32[1])
59 addr[1] = ip6h->saddr.s6_addr32[1] ^ ip6h->daddr.s6_addr32[1]; 69 c = (__force u32) ip6h->saddr.s6_addr32[1];
60 addr[2] = ip6h->saddr.s6_addr32[2] ^ ip6h->daddr.s6_addr32[2]; 70 else
61 addr[3] = ip6h->saddr.s6_addr32[3] ^ ip6h->daddr.s6_addr32[3]; 71 c = (__force u32) ip6h->daddr.s6_addr32[1];
62 72
63 return jhash2((__force u32 *)addr, ARRAY_SIZE(addr), jhash_initval); 73 return jhash_3words(a, b, c, jhash_initval);
64} 74}
65#endif 75#endif
66 76
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 146033a86de..d7f195388f6 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -69,7 +69,7 @@ tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
69} 69}
70 70
71/** 71/**
72 * tproxy_handle_time_wait4() - handle IPv4 TCP TIME_WAIT reopen redirections 72 * tproxy_handle_time_wait4 - handle IPv4 TCP TIME_WAIT reopen redirections
73 * @skb: The skb being processed. 73 * @skb: The skb being processed.
74 * @laddr: IPv4 address to redirect to or zero. 74 * @laddr: IPv4 address to redirect to or zero.
75 * @lport: TCP port to redirect to or zero. 75 * @lport: TCP port to redirect to or zero.
@@ -220,7 +220,7 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
220} 220}
221 221
222/** 222/**
223 * tproxy_handle_time_wait6() - handle IPv6 TCP TIME_WAIT reopen redirections 223 * tproxy_handle_time_wait6 - handle IPv6 TCP TIME_WAIT reopen redirections
224 * @skb: The skb being processed. 224 * @skb: The skb being processed.
225 * @tproto: Transport protocol. 225 * @tproto: Transport protocol.
226 * @thoff: Transport protocol header offset. 226 * @thoff: Transport protocol header offset.
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index c6d5a83450c..70b5591a258 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -274,38 +274,25 @@ static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
274 kfree(info->data); 274 kfree(info->data);
275} 275}
276 276
277static struct xt_match connlimit_mt_reg[] __read_mostly = { 277static struct xt_match connlimit_mt_reg __read_mostly = {
278 { 278 .name = "connlimit",
279 .name = "connlimit", 279 .revision = 1,
280 .revision = 0, 280 .family = NFPROTO_UNSPEC,
281 .family = NFPROTO_UNSPEC, 281 .checkentry = connlimit_mt_check,
282 .checkentry = connlimit_mt_check, 282 .match = connlimit_mt,
283 .match = connlimit_mt, 283 .matchsize = sizeof(struct xt_connlimit_info),
284 .matchsize = sizeof(struct xt_connlimit_info), 284 .destroy = connlimit_mt_destroy,
285 .destroy = connlimit_mt_destroy, 285 .me = THIS_MODULE,
286 .me = THIS_MODULE,
287 },
288 {
289 .name = "connlimit",
290 .revision = 1,
291 .family = NFPROTO_UNSPEC,
292 .checkentry = connlimit_mt_check,
293 .match = connlimit_mt,
294 .matchsize = sizeof(struct xt_connlimit_info),
295 .destroy = connlimit_mt_destroy,
296 .me = THIS_MODULE,
297 },
298}; 286};
299 287
300static int __init connlimit_mt_init(void) 288static int __init connlimit_mt_init(void)
301{ 289{
302 return xt_register_matches(connlimit_mt_reg, 290 return xt_register_match(&connlimit_mt_reg);
303 ARRAY_SIZE(connlimit_mt_reg));
304} 291}
305 292
306static void __exit connlimit_mt_exit(void) 293static void __exit connlimit_mt_exit(void)
307{ 294{
308 xt_unregister_matches(connlimit_mt_reg, ARRAY_SIZE(connlimit_mt_reg)); 295 xt_unregister_match(&connlimit_mt_reg);
309} 296}
310 297
311module_init(connlimit_mt_init); 298module_init(connlimit_mt_init);
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index fc0d6dbe5d1..ae2ad1eec8d 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -75,6 +75,7 @@ struct recent_entry {
75struct recent_table { 75struct recent_table {
76 struct list_head list; 76 struct list_head list;
77 char name[XT_RECENT_NAME_LEN]; 77 char name[XT_RECENT_NAME_LEN];
78 union nf_inet_addr mask;
78 unsigned int refcnt; 79 unsigned int refcnt;
79 unsigned int entries; 80 unsigned int entries;
80 struct list_head lru_list; 81 struct list_head lru_list;
@@ -228,10 +229,10 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
228{ 229{
229 struct net *net = dev_net(par->in ? par->in : par->out); 230 struct net *net = dev_net(par->in ? par->in : par->out);
230 struct recent_net *recent_net = recent_pernet(net); 231 struct recent_net *recent_net = recent_pernet(net);
231 const struct xt_recent_mtinfo *info = par->matchinfo; 232 const struct xt_recent_mtinfo_v1 *info = par->matchinfo;
232 struct recent_table *t; 233 struct recent_table *t;
233 struct recent_entry *e; 234 struct recent_entry *e;
234 union nf_inet_addr addr = {}; 235 union nf_inet_addr addr = {}, addr_mask;
235 u_int8_t ttl; 236 u_int8_t ttl;
236 bool ret = info->invert; 237 bool ret = info->invert;
237 238
@@ -261,12 +262,15 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
261 262
262 spin_lock_bh(&recent_lock); 263 spin_lock_bh(&recent_lock);
263 t = recent_table_lookup(recent_net, info->name); 264 t = recent_table_lookup(recent_net, info->name);
264 e = recent_entry_lookup(t, &addr, par->family, 265
266 nf_inet_addr_mask(&addr, &addr_mask, &t->mask);
267
268 e = recent_entry_lookup(t, &addr_mask, par->family,
265 (info->check_set & XT_RECENT_TTL) ? ttl : 0); 269 (info->check_set & XT_RECENT_TTL) ? ttl : 0);
266 if (e == NULL) { 270 if (e == NULL) {
267 if (!(info->check_set & XT_RECENT_SET)) 271 if (!(info->check_set & XT_RECENT_SET))
268 goto out; 272 goto out;
269 e = recent_entry_init(t, &addr, par->family, ttl); 273 e = recent_entry_init(t, &addr_mask, par->family, ttl);
270 if (e == NULL) 274 if (e == NULL)
271 par->hotdrop = true; 275 par->hotdrop = true;
272 ret = !ret; 276 ret = !ret;
@@ -306,10 +310,10 @@ out:
306 return ret; 310 return ret;
307} 311}
308 312
309static int recent_mt_check(const struct xt_mtchk_param *par) 313static int recent_mt_check(const struct xt_mtchk_param *par,
314 const struct xt_recent_mtinfo_v1 *info)
310{ 315{
311 struct recent_net *recent_net = recent_pernet(par->net); 316 struct recent_net *recent_net = recent_pernet(par->net);
312 const struct xt_recent_mtinfo *info = par->matchinfo;
313 struct recent_table *t; 317 struct recent_table *t;
314#ifdef CONFIG_PROC_FS 318#ifdef CONFIG_PROC_FS
315 struct proc_dir_entry *pde; 319 struct proc_dir_entry *pde;
@@ -361,6 +365,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par)
361 goto out; 365 goto out;
362 } 366 }
363 t->refcnt = 1; 367 t->refcnt = 1;
368
369 memcpy(&t->mask, &info->mask, sizeof(t->mask));
364 strcpy(t->name, info->name); 370 strcpy(t->name, info->name);
365 INIT_LIST_HEAD(&t->lru_list); 371 INIT_LIST_HEAD(&t->lru_list);
366 for (i = 0; i < ip_list_hash_size; i++) 372 for (i = 0; i < ip_list_hash_size; i++)
@@ -385,10 +391,28 @@ out:
385 return ret; 391 return ret;
386} 392}
387 393
394static int recent_mt_check_v0(const struct xt_mtchk_param *par)
395{
396 const struct xt_recent_mtinfo_v0 *info_v0 = par->matchinfo;
397 struct xt_recent_mtinfo_v1 info_v1;
398
399 /* Copy revision 0 structure to revision 1 */
400 memcpy(&info_v1, info_v0, sizeof(struct xt_recent_mtinfo));
401 /* Set default mask to ensure backward compatible behaviour */
402 memset(info_v1.mask.all, 0xFF, sizeof(info_v1.mask.all));
403
404 return recent_mt_check(par, &info_v1);
405}
406
407static int recent_mt_check_v1(const struct xt_mtchk_param *par)
408{
409 return recent_mt_check(par, par->matchinfo);
410}
411
388static void recent_mt_destroy(const struct xt_mtdtor_param *par) 412static void recent_mt_destroy(const struct xt_mtdtor_param *par)
389{ 413{
390 struct recent_net *recent_net = recent_pernet(par->net); 414 struct recent_net *recent_net = recent_pernet(par->net);
391 const struct xt_recent_mtinfo *info = par->matchinfo; 415 const struct xt_recent_mtinfo_v1 *info = par->matchinfo;
392 struct recent_table *t; 416 struct recent_table *t;
393 417
394 mutex_lock(&recent_mutex); 418 mutex_lock(&recent_mutex);
@@ -625,7 +649,7 @@ static struct xt_match recent_mt_reg[] __read_mostly = {
625 .family = NFPROTO_IPV4, 649 .family = NFPROTO_IPV4,
626 .match = recent_mt, 650 .match = recent_mt,
627 .matchsize = sizeof(struct xt_recent_mtinfo), 651 .matchsize = sizeof(struct xt_recent_mtinfo),
628 .checkentry = recent_mt_check, 652 .checkentry = recent_mt_check_v0,
629 .destroy = recent_mt_destroy, 653 .destroy = recent_mt_destroy,
630 .me = THIS_MODULE, 654 .me = THIS_MODULE,
631 }, 655 },
@@ -635,10 +659,30 @@ static struct xt_match recent_mt_reg[] __read_mostly = {
635 .family = NFPROTO_IPV6, 659 .family = NFPROTO_IPV6,
636 .match = recent_mt, 660 .match = recent_mt,
637 .matchsize = sizeof(struct xt_recent_mtinfo), 661 .matchsize = sizeof(struct xt_recent_mtinfo),
638 .checkentry = recent_mt_check, 662 .checkentry = recent_mt_check_v0,
663 .destroy = recent_mt_destroy,
664 .me = THIS_MODULE,
665 },
666 {
667 .name = "recent",
668 .revision = 1,
669 .family = NFPROTO_IPV4,
670 .match = recent_mt,
671 .matchsize = sizeof(struct xt_recent_mtinfo_v1),
672 .checkentry = recent_mt_check_v1,
639 .destroy = recent_mt_destroy, 673 .destroy = recent_mt_destroy,
640 .me = THIS_MODULE, 674 .me = THIS_MODULE,
641 }, 675 },
676 {
677 .name = "recent",
678 .revision = 1,
679 .family = NFPROTO_IPV6,
680 .match = recent_mt,
681 .matchsize = sizeof(struct xt_recent_mtinfo_v1),
682 .checkentry = recent_mt_check_v1,
683 .destroy = recent_mt_destroy,
684 .me = THIS_MODULE,
685 }
642}; 686};
643 687
644static int __init recent_mt_init(void) 688static int __init recent_mt_init(void)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index b3025a603d5..5463969da45 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -80,6 +80,7 @@ struct netlink_sock {
80 struct mutex *cb_mutex; 80 struct mutex *cb_mutex;
81 struct mutex cb_def_mutex; 81 struct mutex cb_def_mutex;
82 void (*netlink_rcv)(struct sk_buff *skb); 82 void (*netlink_rcv)(struct sk_buff *skb);
83 void (*netlink_bind)(int group);
83 struct module *module; 84 struct module *module;
84}; 85};
85 86
@@ -124,6 +125,7 @@ struct netlink_table {
124 unsigned int groups; 125 unsigned int groups;
125 struct mutex *cb_mutex; 126 struct mutex *cb_mutex;
126 struct module *module; 127 struct module *module;
128 void (*bind)(int group);
127 int registered; 129 int registered;
128}; 130};
129 131
@@ -444,6 +446,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
444 struct module *module = NULL; 446 struct module *module = NULL;
445 struct mutex *cb_mutex; 447 struct mutex *cb_mutex;
446 struct netlink_sock *nlk; 448 struct netlink_sock *nlk;
449 void (*bind)(int group);
447 int err = 0; 450 int err = 0;
448 451
449 sock->state = SS_UNCONNECTED; 452 sock->state = SS_UNCONNECTED;
@@ -468,6 +471,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
468 else 471 else
469 err = -EPROTONOSUPPORT; 472 err = -EPROTONOSUPPORT;
470 cb_mutex = nl_table[protocol].cb_mutex; 473 cb_mutex = nl_table[protocol].cb_mutex;
474 bind = nl_table[protocol].bind;
471 netlink_unlock_table(); 475 netlink_unlock_table();
472 476
473 if (err < 0) 477 if (err < 0)
@@ -483,6 +487,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
483 487
484 nlk = nlk_sk(sock->sk); 488 nlk = nlk_sk(sock->sk);
485 nlk->module = module; 489 nlk->module = module;
490 nlk->netlink_bind = bind;
486out: 491out:
487 return err; 492 return err;
488 493
@@ -683,6 +688,15 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
683 netlink_update_listeners(sk); 688 netlink_update_listeners(sk);
684 netlink_table_ungrab(); 689 netlink_table_ungrab();
685 690
691 if (nlk->netlink_bind && nlk->groups[0]) {
692 int i;
693
694 for (i=0; i<nlk->ngroups; i++) {
695 if (test_bit(i, nlk->groups))
696 nlk->netlink_bind(i);
697 }
698 }
699
686 return 0; 700 return 0;
687} 701}
688 702
@@ -1239,6 +1253,10 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
1239 netlink_update_socket_mc(nlk, val, 1253 netlink_update_socket_mc(nlk, val,
1240 optname == NETLINK_ADD_MEMBERSHIP); 1254 optname == NETLINK_ADD_MEMBERSHIP);
1241 netlink_table_ungrab(); 1255 netlink_table_ungrab();
1256
1257 if (nlk->netlink_bind)
1258 nlk->netlink_bind(val);
1259
1242 err = 0; 1260 err = 0;
1243 break; 1261 break;
1244 } 1262 }
@@ -1503,14 +1521,16 @@ static void netlink_data_ready(struct sock *sk, int len)
1503 */ 1521 */
1504 1522
1505struct sock * 1523struct sock *
1506netlink_kernel_create(struct net *net, int unit, unsigned int groups, 1524netlink_kernel_create(struct net *net, int unit,
1507 void (*input)(struct sk_buff *skb), 1525 struct module *module,
1508 struct mutex *cb_mutex, struct module *module) 1526 struct netlink_kernel_cfg *cfg)
1509{ 1527{
1510 struct socket *sock; 1528 struct socket *sock;
1511 struct sock *sk; 1529 struct sock *sk;
1512 struct netlink_sock *nlk; 1530 struct netlink_sock *nlk;
1513 struct listeners *listeners = NULL; 1531 struct listeners *listeners = NULL;
1532 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
1533 unsigned int groups;
1514 1534
1515 BUG_ON(!nl_table); 1535 BUG_ON(!nl_table);
1516 1536
@@ -1532,16 +1552,18 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1532 sk = sock->sk; 1552 sk = sock->sk;
1533 sk_change_net(sk, net); 1553 sk_change_net(sk, net);
1534 1554
1535 if (groups < 32) 1555 if (!cfg || cfg->groups < 32)
1536 groups = 32; 1556 groups = 32;
1557 else
1558 groups = cfg->groups;
1537 1559
1538 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL); 1560 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
1539 if (!listeners) 1561 if (!listeners)
1540 goto out_sock_release; 1562 goto out_sock_release;
1541 1563
1542 sk->sk_data_ready = netlink_data_ready; 1564 sk->sk_data_ready = netlink_data_ready;
1543 if (input) 1565 if (cfg && cfg->input)
1544 nlk_sk(sk)->netlink_rcv = input; 1566 nlk_sk(sk)->netlink_rcv = cfg->input;
1545 1567
1546 if (netlink_insert(sk, net, 0)) 1568 if (netlink_insert(sk, net, 0))
1547 goto out_sock_release; 1569 goto out_sock_release;
@@ -1555,6 +1577,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1555 rcu_assign_pointer(nl_table[unit].listeners, listeners); 1577 rcu_assign_pointer(nl_table[unit].listeners, listeners);
1556 nl_table[unit].cb_mutex = cb_mutex; 1578 nl_table[unit].cb_mutex = cb_mutex;
1557 nl_table[unit].module = module; 1579 nl_table[unit].module = module;
1580 nl_table[unit].bind = cfg ? cfg->bind : NULL;
1558 nl_table[unit].registered = 1; 1581 nl_table[unit].registered = 1;
1559 } else { 1582 } else {
1560 kfree(listeners); 1583 kfree(listeners);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 2cc7c1ee769..62ebe3c6291 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -504,7 +504,7 @@ EXPORT_SYMBOL(genl_unregister_family);
504 * @pid: netlink pid the message is addressed to 504 * @pid: netlink pid the message is addressed to
505 * @seq: sequence number (usually the one of the sender) 505 * @seq: sequence number (usually the one of the sender)
506 * @family: generic netlink family 506 * @family: generic netlink family
507 * @flags netlink message flags 507 * @flags: netlink message flags
508 * @cmd: generic netlink command 508 * @cmd: generic netlink command
509 * 509 *
510 * Returns pointer to user specific header 510 * Returns pointer to user specific header
@@ -915,10 +915,14 @@ static struct genl_multicast_group notify_grp = {
915 915
916static int __net_init genl_pernet_init(struct net *net) 916static int __net_init genl_pernet_init(struct net *net)
917{ 917{
918 struct netlink_kernel_cfg cfg = {
919 .input = genl_rcv,
920 .cb_mutex = &genl_mutex,
921 };
922
918 /* we'll bump the group number right afterwards */ 923 /* we'll bump the group number right afterwards */
919 net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, 0, 924 net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC,
920 genl_rcv, &genl_mutex, 925 THIS_MODULE, &cfg);
921 THIS_MODULE);
922 926
923 if (!net->genl_sock && net_eq(net, &init_net)) 927 if (!net->genl_sock && net_eq(net, &init_net))
924 panic("GENL: Cannot initialize generic netlink\n"); 928 panic("GENL: Cannot initialize generic netlink\n");
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 9f6ce011d35..4177bb5104b 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -121,14 +121,14 @@ error:
121 * The device remains polling for targets until a target is found or 121 * The device remains polling for targets until a target is found or
122 * the nfc_stop_poll function is called. 122 * the nfc_stop_poll function is called.
123 */ 123 */
124int nfc_start_poll(struct nfc_dev *dev, u32 protocols) 124int nfc_start_poll(struct nfc_dev *dev, u32 im_protocols, u32 tm_protocols)
125{ 125{
126 int rc; 126 int rc;
127 127
128 pr_debug("dev_name=%s protocols=0x%x\n", 128 pr_debug("dev_name %s initiator protocols 0x%x target protocols 0x%x\n",
129 dev_name(&dev->dev), protocols); 129 dev_name(&dev->dev), im_protocols, tm_protocols);
130 130
131 if (!protocols) 131 if (!im_protocols && !tm_protocols)
132 return -EINVAL; 132 return -EINVAL;
133 133
134 device_lock(&dev->dev); 134 device_lock(&dev->dev);
@@ -143,9 +143,11 @@ int nfc_start_poll(struct nfc_dev *dev, u32 protocols)
143 goto error; 143 goto error;
144 } 144 }
145 145
146 rc = dev->ops->start_poll(dev, protocols); 146 rc = dev->ops->start_poll(dev, im_protocols, tm_protocols);
147 if (!rc) 147 if (!rc) {
148 dev->polling = true; 148 dev->polling = true;
149 dev->rf_mode = NFC_RF_NONE;
150 }
149 151
150error: 152error:
151 device_unlock(&dev->dev); 153 device_unlock(&dev->dev);
@@ -235,8 +237,10 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode)
235 } 237 }
236 238
237 rc = dev->ops->dep_link_up(dev, target, comm_mode, gb, gb_len); 239 rc = dev->ops->dep_link_up(dev, target, comm_mode, gb, gb_len);
238 if (!rc) 240 if (!rc) {
239 dev->active_target = target; 241 dev->active_target = target;
242 dev->rf_mode = NFC_RF_INITIATOR;
243 }
240 244
241error: 245error:
242 device_unlock(&dev->dev); 246 device_unlock(&dev->dev);
@@ -264,11 +268,6 @@ int nfc_dep_link_down(struct nfc_dev *dev)
264 goto error; 268 goto error;
265 } 269 }
266 270
267 if (dev->dep_rf_mode == NFC_RF_TARGET) {
268 rc = -EOPNOTSUPP;
269 goto error;
270 }
271
272 rc = dev->ops->dep_link_down(dev); 271 rc = dev->ops->dep_link_down(dev);
273 if (!rc) { 272 if (!rc) {
274 dev->dep_link_up = false; 273 dev->dep_link_up = false;
@@ -286,7 +285,6 @@ int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
286 u8 comm_mode, u8 rf_mode) 285 u8 comm_mode, u8 rf_mode)
287{ 286{
288 dev->dep_link_up = true; 287 dev->dep_link_up = true;
289 dev->dep_rf_mode = rf_mode;
290 288
291 nfc_llcp_mac_is_up(dev, target_idx, comm_mode, rf_mode); 289 nfc_llcp_mac_is_up(dev, target_idx, comm_mode, rf_mode);
292 290
@@ -330,6 +328,7 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
330 rc = dev->ops->activate_target(dev, target, protocol); 328 rc = dev->ops->activate_target(dev, target, protocol);
331 if (!rc) { 329 if (!rc) {
332 dev->active_target = target; 330 dev->active_target = target;
331 dev->rf_mode = NFC_RF_INITIATOR;
333 332
334 if (dev->ops->check_presence) 333 if (dev->ops->check_presence)
335 mod_timer(&dev->check_pres_timer, jiffies + 334 mod_timer(&dev->check_pres_timer, jiffies +
@@ -409,27 +408,30 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
409 goto error; 408 goto error;
410 } 409 }
411 410
412 if (dev->active_target == NULL) { 411 if (dev->rf_mode == NFC_RF_INITIATOR && dev->active_target != NULL) {
413 rc = -ENOTCONN; 412 if (dev->active_target->idx != target_idx) {
414 kfree_skb(skb); 413 rc = -EADDRNOTAVAIL;
415 goto error; 414 kfree_skb(skb);
416 } 415 goto error;
416 }
417 417
418 if (dev->active_target->idx != target_idx) { 418 if (dev->ops->check_presence)
419 rc = -EADDRNOTAVAIL; 419 del_timer_sync(&dev->check_pres_timer);
420
421 rc = dev->ops->im_transceive(dev, dev->active_target, skb, cb,
422 cb_context);
423
424 if (!rc && dev->ops->check_presence)
425 mod_timer(&dev->check_pres_timer, jiffies +
426 msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
427 } else if (dev->rf_mode == NFC_RF_TARGET && dev->ops->tm_send != NULL) {
428 rc = dev->ops->tm_send(dev, skb);
429 } else {
430 rc = -ENOTCONN;
420 kfree_skb(skb); 431 kfree_skb(skb);
421 goto error; 432 goto error;
422 } 433 }
423 434
424 if (dev->ops->check_presence)
425 del_timer_sync(&dev->check_pres_timer);
426
427 rc = dev->ops->data_exchange(dev, dev->active_target, skb, cb,
428 cb_context);
429
430 if (!rc && dev->ops->check_presence)
431 mod_timer(&dev->check_pres_timer, jiffies +
432 msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
433 435
434error: 436error:
435 device_unlock(&dev->dev); 437 device_unlock(&dev->dev);
@@ -447,6 +449,63 @@ int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len)
447} 449}
448EXPORT_SYMBOL(nfc_set_remote_general_bytes); 450EXPORT_SYMBOL(nfc_set_remote_general_bytes);
449 451
452u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, size_t *gb_len)
453{
454 pr_debug("dev_name=%s\n", dev_name(&dev->dev));
455
456 return nfc_llcp_general_bytes(dev, gb_len);
457}
458EXPORT_SYMBOL(nfc_get_local_general_bytes);
459
460int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb)
461{
462 /* Only LLCP target mode for now */
463 if (dev->dep_link_up == false) {
464 kfree_skb(skb);
465 return -ENOLINK;
466 }
467
468 return nfc_llcp_data_received(dev, skb);
469}
470EXPORT_SYMBOL(nfc_tm_data_received);
471
472int nfc_tm_activated(struct nfc_dev *dev, u32 protocol, u8 comm_mode,
473 u8 *gb, size_t gb_len)
474{
475 int rc;
476
477 device_lock(&dev->dev);
478
479 dev->polling = false;
480
481 if (gb != NULL) {
482 rc = nfc_set_remote_general_bytes(dev, gb, gb_len);
483 if (rc < 0)
484 goto out;
485 }
486
487 dev->rf_mode = NFC_RF_TARGET;
488
489 if (protocol == NFC_PROTO_NFC_DEP_MASK)
490 nfc_dep_link_is_up(dev, 0, comm_mode, NFC_RF_TARGET);
491
492 rc = nfc_genl_tm_activated(dev, protocol);
493
494out:
495 device_unlock(&dev->dev);
496
497 return rc;
498}
499EXPORT_SYMBOL(nfc_tm_activated);
500
501int nfc_tm_deactivated(struct nfc_dev *dev)
502{
503 dev->dep_link_up = false;
504
505 return nfc_genl_tm_deactivated(dev);
506}
507EXPORT_SYMBOL(nfc_tm_deactivated);
508
450/** 509/**
451 * nfc_alloc_send_skb - allocate a skb for data exchange responses 510 * nfc_alloc_send_skb - allocate a skb for data exchange responses
452 * 511 *
@@ -678,7 +737,7 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
678 struct nfc_dev *dev; 737 struct nfc_dev *dev;
679 738
680 if (!ops->start_poll || !ops->stop_poll || !ops->activate_target || 739 if (!ops->start_poll || !ops->stop_poll || !ops->activate_target ||
681 !ops->deactivate_target || !ops->data_exchange) 740 !ops->deactivate_target || !ops->im_transceive)
682 return NULL; 741 return NULL;
683 742
684 if (!supported_protocols) 743 if (!supported_protocols)
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index e1a640d2b58..a8b0b71e8f8 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -481,12 +481,13 @@ static int hci_dev_down(struct nfc_dev *nfc_dev)
481 return 0; 481 return 0;
482} 482}
483 483
484static int hci_start_poll(struct nfc_dev *nfc_dev, u32 protocols) 484static int hci_start_poll(struct nfc_dev *nfc_dev,
485 u32 im_protocols, u32 tm_protocols)
485{ 486{
486 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); 487 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
487 488
488 if (hdev->ops->start_poll) 489 if (hdev->ops->start_poll)
489 return hdev->ops->start_poll(hdev, protocols); 490 return hdev->ops->start_poll(hdev, im_protocols, tm_protocols);
490 else 491 else
491 return nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, 492 return nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
492 NFC_HCI_EVT_READER_REQUESTED, NULL, 0); 493 NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
@@ -511,9 +512,9 @@ static void hci_deactivate_target(struct nfc_dev *nfc_dev,
511{ 512{
512} 513}
513 514
514static int hci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target, 515static int hci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
515 struct sk_buff *skb, data_exchange_cb_t cb, 516 struct sk_buff *skb, data_exchange_cb_t cb,
516 void *cb_context) 517 void *cb_context)
517{ 518{
518 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); 519 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
519 int r; 520 int r;
@@ -579,7 +580,7 @@ static struct nfc_ops hci_nfc_ops = {
579 .stop_poll = hci_stop_poll, 580 .stop_poll = hci_stop_poll,
580 .activate_target = hci_activate_target, 581 .activate_target = hci_activate_target,
581 .deactivate_target = hci_deactivate_target, 582 .deactivate_target = hci_deactivate_target,
582 .data_exchange = hci_data_exchange, 583 .im_transceive = hci_transceive,
583 .check_presence = hci_check_presence, 584 .check_presence = hci_check_presence,
584}; 585};
585 586
diff --git a/net/nfc/hci/shdlc.c b/net/nfc/hci/shdlc.c
index 5665dc6d893..6b836e6242b 100644
--- a/net/nfc/hci/shdlc.c
+++ b/net/nfc/hci/shdlc.c
@@ -765,14 +765,16 @@ static int nfc_shdlc_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
765 return 0; 765 return 0;
766} 766}
767 767
768static int nfc_shdlc_start_poll(struct nfc_hci_dev *hdev, u32 protocols) 768static int nfc_shdlc_start_poll(struct nfc_hci_dev *hdev,
769 u32 im_protocols, u32 tm_protocols)
769{ 770{
770 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev); 771 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
771 772
772 pr_debug("\n"); 773 pr_debug("\n");
773 774
774 if (shdlc->ops->start_poll) 775 if (shdlc->ops->start_poll)
775 return shdlc->ops->start_poll(shdlc, protocols); 776 return shdlc->ops->start_poll(shdlc,
777 im_protocols, tm_protocols);
776 778
777 return 0; 779 return 0;
778} 780}
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
index bf8ae4f0b90..b982b5b890d 100644
--- a/net/nfc/llcp/commands.c
+++ b/net/nfc/llcp/commands.c
@@ -51,7 +51,7 @@ static u8 llcp_tlv8(u8 *tlv, u8 type)
51 return tlv[2]; 51 return tlv[2];
52} 52}
53 53
54static u8 llcp_tlv16(u8 *tlv, u8 type) 54static u16 llcp_tlv16(u8 *tlv, u8 type)
55{ 55{
56 if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]]) 56 if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]])
57 return 0; 57 return 0;
@@ -67,7 +67,7 @@ static u8 llcp_tlv_version(u8 *tlv)
67 67
68static u16 llcp_tlv_miux(u8 *tlv) 68static u16 llcp_tlv_miux(u8 *tlv)
69{ 69{
70 return llcp_tlv16(tlv, LLCP_TLV_MIUX) & 0x7f; 70 return llcp_tlv16(tlv, LLCP_TLV_MIUX) & 0x7ff;
71} 71}
72 72
73static u16 llcp_tlv_wks(u8 *tlv) 73static u16 llcp_tlv_wks(u8 *tlv)
@@ -117,8 +117,8 @@ u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length)
117 return tlv; 117 return tlv;
118} 118}
119 119
120int nfc_llcp_parse_tlv(struct nfc_llcp_local *local, 120int nfc_llcp_parse_gb_tlv(struct nfc_llcp_local *local,
121 u8 *tlv_array, u16 tlv_array_len) 121 u8 *tlv_array, u16 tlv_array_len)
122{ 122{
123 u8 *tlv = tlv_array, type, length, offset = 0; 123 u8 *tlv = tlv_array, type, length, offset = 0;
124 124
@@ -149,8 +149,45 @@ int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
149 case LLCP_TLV_OPT: 149 case LLCP_TLV_OPT:
150 local->remote_opt = llcp_tlv_opt(tlv); 150 local->remote_opt = llcp_tlv_opt(tlv);
151 break; 151 break;
152 default:
153 pr_err("Invalid gt tlv value 0x%x\n", type);
154 break;
155 }
156
157 offset += length + 2;
158 tlv += length + 2;
159 }
160
161 pr_debug("version 0x%x miu %d lto %d opt 0x%x wks 0x%x\n",
162 local->remote_version, local->remote_miu,
163 local->remote_lto, local->remote_opt,
164 local->remote_wks);
165
166 return 0;
167}
168
169int nfc_llcp_parse_connection_tlv(struct nfc_llcp_sock *sock,
170 u8 *tlv_array, u16 tlv_array_len)
171{
172 u8 *tlv = tlv_array, type, length, offset = 0;
173
174 pr_debug("TLV array length %d\n", tlv_array_len);
175
176 if (sock == NULL)
177 return -ENOTCONN;
178
179 while (offset < tlv_array_len) {
180 type = tlv[0];
181 length = tlv[1];
182
183 pr_debug("type 0x%x length %d\n", type, length);
184
185 switch (type) {
186 case LLCP_TLV_MIUX:
187 sock->miu = llcp_tlv_miux(tlv) + 128;
188 break;
152 case LLCP_TLV_RW: 189 case LLCP_TLV_RW:
153 local->remote_rw = llcp_tlv_rw(tlv); 190 sock->rw = llcp_tlv_rw(tlv);
154 break; 191 break;
155 case LLCP_TLV_SN: 192 case LLCP_TLV_SN:
156 break; 193 break;
@@ -163,10 +200,7 @@ int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
163 tlv += length + 2; 200 tlv += length + 2;
164 } 201 }
165 202
166 pr_debug("version 0x%x miu %d lto %d opt 0x%x wks 0x%x rw %d\n", 203 pr_debug("sock %p rw %d miu %d\n", sock, sock->rw, sock->miu);
167 local->remote_version, local->remote_miu,
168 local->remote_lto, local->remote_opt,
169 local->remote_wks, local->remote_rw);
170 204
171 return 0; 205 return 0;
172} 206}
@@ -474,7 +508,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
474 508
475 while (remaining_len > 0) { 509 while (remaining_len > 0) {
476 510
477 frag_len = min_t(size_t, local->remote_miu, remaining_len); 511 frag_len = min_t(size_t, sock->miu, remaining_len);
478 512
479 pr_debug("Fragment %zd bytes remaining %zd", 513 pr_debug("Fragment %zd bytes remaining %zd",
480 frag_len, remaining_len); 514 frag_len, remaining_len);
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index 42994fac26d..5d503eeb15a 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -31,47 +31,41 @@ static u8 llcp_magic[3] = {0x46, 0x66, 0x6d};
31 31
32static struct list_head llcp_devices; 32static struct list_head llcp_devices;
33 33
34static void nfc_llcp_socket_release(struct nfc_llcp_local *local) 34void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *sk)
35{ 35{
36 struct nfc_llcp_sock *parent, *s, *n; 36 write_lock(&l->lock);
37 struct sock *sk, *parent_sk; 37 sk_add_node(sk, &l->head);
38 int i; 38 write_unlock(&l->lock);
39 39}
40 mutex_lock(&local->socket_lock);
41
42 for (i = 0; i < LLCP_MAX_SAP; i++) {
43 parent = local->sockets[i];
44 if (parent == NULL)
45 continue;
46
47 /* Release all child sockets */
48 list_for_each_entry_safe(s, n, &parent->list, list) {
49 list_del_init(&s->list);
50 sk = &s->sk;
51
52 lock_sock(sk);
53
54 if (sk->sk_state == LLCP_CONNECTED)
55 nfc_put_device(s->dev);
56 40
57 sk->sk_state = LLCP_CLOSED; 41void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *sk)
42{
43 write_lock(&l->lock);
44 sk_del_node_init(sk);
45 write_unlock(&l->lock);
46}
58 47
59 release_sock(sk); 48static void nfc_llcp_socket_release(struct nfc_llcp_local *local)
49{
50 struct sock *sk;
51 struct hlist_node *node, *tmp;
52 struct nfc_llcp_sock *llcp_sock;
60 53
61 sock_orphan(sk); 54 write_lock(&local->sockets.lock);
62 55
63 s->local = NULL; 56 sk_for_each_safe(sk, node, tmp, &local->sockets.head) {
64 } 57 llcp_sock = nfc_llcp_sock(sk);
65 58
66 parent_sk = &parent->sk; 59 lock_sock(sk);
67 60
68 lock_sock(parent_sk); 61 if (sk->sk_state == LLCP_CONNECTED)
62 nfc_put_device(llcp_sock->dev);
69 63
70 if (parent_sk->sk_state == LLCP_LISTEN) { 64 if (sk->sk_state == LLCP_LISTEN) {
71 struct nfc_llcp_sock *lsk, *n; 65 struct nfc_llcp_sock *lsk, *n;
72 struct sock *accept_sk; 66 struct sock *accept_sk;
73 67
74 list_for_each_entry_safe(lsk, n, &parent->accept_queue, 68 list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue,
75 accept_queue) { 69 accept_queue) {
76 accept_sk = &lsk->sk; 70 accept_sk = &lsk->sk;
77 lock_sock(accept_sk); 71 lock_sock(accept_sk);
@@ -83,24 +77,53 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local)
83 release_sock(accept_sk); 77 release_sock(accept_sk);
84 78
85 sock_orphan(accept_sk); 79 sock_orphan(accept_sk);
86
87 lsk->local = NULL;
88 } 80 }
89 } 81 }
90 82
91 if (parent_sk->sk_state == LLCP_CONNECTED) 83 sk->sk_state = LLCP_CLOSED;
92 nfc_put_device(parent->dev);
93
94 parent_sk->sk_state = LLCP_CLOSED;
95 84
96 release_sock(parent_sk); 85 release_sock(sk);
97 86
98 sock_orphan(parent_sk); 87 sock_orphan(sk);
99 88
100 parent->local = NULL; 89 sk_del_node_init(sk);
101 } 90 }
102 91
103 mutex_unlock(&local->socket_lock); 92 write_unlock(&local->sockets.lock);
93}
94
95struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
96{
97 kref_get(&local->ref);
98
99 return local;
100}
101
102static void local_release(struct kref *ref)
103{
104 struct nfc_llcp_local *local;
105
106 local = container_of(ref, struct nfc_llcp_local, ref);
107
108 list_del(&local->list);
109 nfc_llcp_socket_release(local);
110 del_timer_sync(&local->link_timer);
111 skb_queue_purge(&local->tx_queue);
112 destroy_workqueue(local->tx_wq);
113 destroy_workqueue(local->rx_wq);
114 destroy_workqueue(local->timeout_wq);
115 kfree_skb(local->rx_pending);
116 kfree(local);
117}
118
119int nfc_llcp_local_put(struct nfc_llcp_local *local)
120{
121 WARN_ON(local == NULL);
122
123 if (local == NULL)
124 return 0;
125
126 return kref_put(&local->ref, local_release);
104} 127}
105 128
106static void nfc_llcp_clear_sdp(struct nfc_llcp_local *local) 129static void nfc_llcp_clear_sdp(struct nfc_llcp_local *local)
@@ -384,31 +407,9 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
384 return -EINVAL; 407 return -EINVAL;
385 } 408 }
386 409
387 return nfc_llcp_parse_tlv(local, 410 return nfc_llcp_parse_gb_tlv(local,
388 &local->remote_gb[3], 411 &local->remote_gb[3],
389 local->remote_gb_len - 3); 412 local->remote_gb_len - 3);
390}
391
392static void nfc_llcp_tx_work(struct work_struct *work)
393{
394 struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
395 tx_work);
396 struct sk_buff *skb;
397
398 skb = skb_dequeue(&local->tx_queue);
399 if (skb != NULL) {
400 pr_debug("Sending pending skb\n");
401 print_hex_dump(KERN_DEBUG, "LLCP Tx: ", DUMP_PREFIX_OFFSET,
402 16, 1, skb->data, skb->len, true);
403
404 nfc_data_exchange(local->dev, local->target_idx,
405 skb, nfc_llcp_recv, local);
406 } else {
407 nfc_llcp_send_symm(local->dev);
408 }
409
410 mod_timer(&local->link_timer,
411 jiffies + msecs_to_jiffies(local->remote_lto));
412} 413}
413 414
414static u8 nfc_llcp_dsap(struct sk_buff *pdu) 415static u8 nfc_llcp_dsap(struct sk_buff *pdu)
@@ -443,46 +444,146 @@ static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu)
443 sock->recv_ack_n = (sock->recv_n - 1) % 16; 444 sock->recv_ack_n = (sock->recv_n - 1) % 16;
444} 445}
445 446
447static void nfc_llcp_tx_work(struct work_struct *work)
448{
449 struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
450 tx_work);
451 struct sk_buff *skb;
452 struct sock *sk;
453 struct nfc_llcp_sock *llcp_sock;
454
455 skb = skb_dequeue(&local->tx_queue);
456 if (skb != NULL) {
457 sk = skb->sk;
458 llcp_sock = nfc_llcp_sock(sk);
459 if (llcp_sock != NULL) {
460 int ret;
461
462 pr_debug("Sending pending skb\n");
463 print_hex_dump(KERN_DEBUG, "LLCP Tx: ",
464 DUMP_PREFIX_OFFSET, 16, 1,
465 skb->data, skb->len, true);
466
467 ret = nfc_data_exchange(local->dev, local->target_idx,
468 skb, nfc_llcp_recv, local);
469
470 if (!ret && nfc_llcp_ptype(skb) == LLCP_PDU_I) {
471 skb = skb_get(skb);
472 skb_queue_tail(&llcp_sock->tx_pending_queue,
473 skb);
474 }
475 } else {
476 nfc_llcp_send_symm(local->dev);
477 }
478 } else {
479 nfc_llcp_send_symm(local->dev);
480 }
481
482 mod_timer(&local->link_timer,
483 jiffies + msecs_to_jiffies(2 * local->remote_lto));
484}
485
486static struct nfc_llcp_sock *nfc_llcp_connecting_sock_get(struct nfc_llcp_local *local,
487 u8 ssap)
488{
489 struct sock *sk;
490 struct nfc_llcp_sock *llcp_sock;
491 struct hlist_node *node;
492
493 read_lock(&local->connecting_sockets.lock);
494
495 sk_for_each(sk, node, &local->connecting_sockets.head) {
496 llcp_sock = nfc_llcp_sock(sk);
497
498 if (llcp_sock->ssap == ssap) {
499 sock_hold(&llcp_sock->sk);
500 goto out;
501 }
502 }
503
504 llcp_sock = NULL;
505
506out:
507 read_unlock(&local->connecting_sockets.lock);
508
509 return llcp_sock;
510}
511
446static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local, 512static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
447 u8 ssap, u8 dsap) 513 u8 ssap, u8 dsap)
448{ 514{
449 struct nfc_llcp_sock *sock, *llcp_sock, *n; 515 struct sock *sk;
516 struct hlist_node *node;
517 struct nfc_llcp_sock *llcp_sock;
450 518
451 pr_debug("ssap dsap %d %d\n", ssap, dsap); 519 pr_debug("ssap dsap %d %d\n", ssap, dsap);
452 520
453 if (ssap == 0 && dsap == 0) 521 if (ssap == 0 && dsap == 0)
454 return NULL; 522 return NULL;
455 523
456 mutex_lock(&local->socket_lock); 524 read_lock(&local->sockets.lock);
457 sock = local->sockets[ssap];
458 if (sock == NULL) {
459 mutex_unlock(&local->socket_lock);
460 return NULL;
461 }
462 525
463 pr_debug("root dsap %d (%d)\n", sock->dsap, dsap); 526 llcp_sock = NULL;
464 527
465 if (sock->dsap == dsap) { 528 sk_for_each(sk, node, &local->sockets.head) {
466 sock_hold(&sock->sk); 529 llcp_sock = nfc_llcp_sock(sk);
467 mutex_unlock(&local->socket_lock); 530
468 return sock; 531 if (llcp_sock->ssap == ssap &&
532 llcp_sock->dsap == dsap)
533 break;
469 } 534 }
470 535
471 list_for_each_entry_safe(llcp_sock, n, &sock->list, list) { 536 read_unlock(&local->sockets.lock);
472 pr_debug("llcp_sock %p sk %p dsap %d\n", llcp_sock, 537
473 &llcp_sock->sk, llcp_sock->dsap); 538 if (llcp_sock == NULL)
474 if (llcp_sock->dsap == dsap) { 539 return NULL;
475 sock_hold(&llcp_sock->sk); 540
476 mutex_unlock(&local->socket_lock); 541 sock_hold(&llcp_sock->sk);
477 return llcp_sock; 542
478 } 543 return llcp_sock;
544}
545
546static struct nfc_llcp_sock *nfc_llcp_sock_get_sn(struct nfc_llcp_local *local,
547 u8 *sn, size_t sn_len)
548{
549 struct sock *sk;
550 struct hlist_node *node;
551 struct nfc_llcp_sock *llcp_sock;
552
553 pr_debug("sn %zd\n", sn_len);
554
555 if (sn == NULL || sn_len == 0)
556 return NULL;
557
558 read_lock(&local->sockets.lock);
559
560 llcp_sock = NULL;
561
562 sk_for_each(sk, node, &local->sockets.head) {
563 llcp_sock = nfc_llcp_sock(sk);
564
565 if (llcp_sock->sk.sk_state != LLCP_LISTEN)
566 continue;
567
568 if (llcp_sock->service_name == NULL ||
569 llcp_sock->service_name_len == 0)
570 continue;
571
572 if (llcp_sock->service_name_len != sn_len)
573 continue;
574
575 if (memcmp(sn, llcp_sock->service_name, sn_len) == 0)
576 break;
479 } 577 }
480 578
481 pr_err("Could not find socket for %d %d\n", ssap, dsap); 579 read_unlock(&local->sockets.lock);
482 580
483 mutex_unlock(&local->socket_lock); 581 if (llcp_sock == NULL)
582 return NULL;
484 583
485 return NULL; 584 sock_hold(&llcp_sock->sk);
585
586 return llcp_sock;
486} 587}
487 588
488static void nfc_llcp_sock_put(struct nfc_llcp_sock *sock) 589static void nfc_llcp_sock_put(struct nfc_llcp_sock *sock)
@@ -518,35 +619,19 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
518{ 619{
519 struct sock *new_sk, *parent; 620 struct sock *new_sk, *parent;
520 struct nfc_llcp_sock *sock, *new_sock; 621 struct nfc_llcp_sock *sock, *new_sock;
521 u8 dsap, ssap, bound_sap, reason; 622 u8 dsap, ssap, reason;
522 623
523 dsap = nfc_llcp_dsap(skb); 624 dsap = nfc_llcp_dsap(skb);
524 ssap = nfc_llcp_ssap(skb); 625 ssap = nfc_llcp_ssap(skb);
525 626
526 pr_debug("%d %d\n", dsap, ssap); 627 pr_debug("%d %d\n", dsap, ssap);
527 628
528 nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
529 skb->len - LLCP_HEADER_SIZE);
530
531 if (dsap != LLCP_SAP_SDP) { 629 if (dsap != LLCP_SAP_SDP) {
532 bound_sap = dsap; 630 sock = nfc_llcp_sock_get(local, dsap, LLCP_SAP_SDP);
533 631 if (sock == NULL || sock->sk.sk_state != LLCP_LISTEN) {
534 mutex_lock(&local->socket_lock);
535 sock = local->sockets[dsap];
536 if (sock == NULL) {
537 mutex_unlock(&local->socket_lock);
538 reason = LLCP_DM_NOBOUND; 632 reason = LLCP_DM_NOBOUND;
539 goto fail; 633 goto fail;
540 } 634 }
541
542 sock_hold(&sock->sk);
543 mutex_unlock(&local->socket_lock);
544
545 lock_sock(&sock->sk);
546
547 if (sock->dsap == LLCP_SAP_SDP &&
548 sock->sk.sk_state == LLCP_LISTEN)
549 goto enqueue;
550 } else { 635 } else {
551 u8 *sn; 636 u8 *sn;
552 size_t sn_len; 637 size_t sn_len;
@@ -559,40 +644,15 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
559 644
560 pr_debug("Service name length %zu\n", sn_len); 645 pr_debug("Service name length %zu\n", sn_len);
561 646
562 mutex_lock(&local->socket_lock); 647 sock = nfc_llcp_sock_get_sn(local, sn, sn_len);
563 for (bound_sap = 0; bound_sap < LLCP_LOCAL_SAP_OFFSET; 648 if (sock == NULL) {
564 bound_sap++) { 649 reason = LLCP_DM_NOBOUND;
565 sock = local->sockets[bound_sap]; 650 goto fail;
566 if (sock == NULL)
567 continue;
568
569 if (sock->service_name == NULL ||
570 sock->service_name_len == 0)
571 continue;
572
573 if (sock->service_name_len != sn_len)
574 continue;
575
576 if (sock->dsap == LLCP_SAP_SDP &&
577 sock->sk.sk_state == LLCP_LISTEN &&
578 !memcmp(sn, sock->service_name, sn_len)) {
579 pr_debug("Found service name at SAP %d\n",
580 bound_sap);
581 sock_hold(&sock->sk);
582 mutex_unlock(&local->socket_lock);
583
584 lock_sock(&sock->sk);
585
586 goto enqueue;
587 }
588 } 651 }
589 mutex_unlock(&local->socket_lock);
590 } 652 }
591 653
592 reason = LLCP_DM_NOBOUND; 654 lock_sock(&sock->sk);
593 goto fail;
594 655
595enqueue:
596 parent = &sock->sk; 656 parent = &sock->sk;
597 657
598 if (sk_acceptq_is_full(parent)) { 658 if (sk_acceptq_is_full(parent)) {
@@ -612,15 +672,19 @@ enqueue:
612 672
613 new_sock = nfc_llcp_sock(new_sk); 673 new_sock = nfc_llcp_sock(new_sk);
614 new_sock->dev = local->dev; 674 new_sock->dev = local->dev;
615 new_sock->local = local; 675 new_sock->local = nfc_llcp_local_get(local);
676 new_sock->miu = local->remote_miu;
616 new_sock->nfc_protocol = sock->nfc_protocol; 677 new_sock->nfc_protocol = sock->nfc_protocol;
617 new_sock->ssap = bound_sap; 678 new_sock->ssap = sock->ssap;
618 new_sock->dsap = ssap; 679 new_sock->dsap = ssap;
619 new_sock->parent = parent; 680 new_sock->parent = parent;
620 681
682 nfc_llcp_parse_connection_tlv(new_sock, &skb->data[LLCP_HEADER_SIZE],
683 skb->len - LLCP_HEADER_SIZE);
684
621 pr_debug("new sock %p sk %p\n", new_sock, &new_sock->sk); 685 pr_debug("new sock %p sk %p\n", new_sock, &new_sock->sk);
622 686
623 list_add_tail(&new_sock->list, &sock->list); 687 nfc_llcp_sock_link(&local->sockets, new_sk);
624 688
625 nfc_llcp_accept_enqueue(&sock->sk, new_sk); 689 nfc_llcp_accept_enqueue(&sock->sk, new_sk);
626 690
@@ -654,12 +718,12 @@ int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock)
654 718
655 pr_debug("Remote ready %d tx queue len %d remote rw %d", 719 pr_debug("Remote ready %d tx queue len %d remote rw %d",
656 sock->remote_ready, skb_queue_len(&sock->tx_pending_queue), 720 sock->remote_ready, skb_queue_len(&sock->tx_pending_queue),
657 local->remote_rw); 721 sock->rw);
658 722
659 /* Try to queue some I frames for transmission */ 723 /* Try to queue some I frames for transmission */
660 while (sock->remote_ready && 724 while (sock->remote_ready &&
661 skb_queue_len(&sock->tx_pending_queue) < local->remote_rw) { 725 skb_queue_len(&sock->tx_pending_queue) < sock->rw) {
662 struct sk_buff *pdu, *pending_pdu; 726 struct sk_buff *pdu;
663 727
664 pdu = skb_dequeue(&sock->tx_queue); 728 pdu = skb_dequeue(&sock->tx_queue);
665 if (pdu == NULL) 729 if (pdu == NULL)
@@ -668,10 +732,7 @@ int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock)
668 /* Update N(S)/N(R) */ 732 /* Update N(S)/N(R) */
669 nfc_llcp_set_nrns(sock, pdu); 733 nfc_llcp_set_nrns(sock, pdu);
670 734
671 pending_pdu = skb_clone(pdu, GFP_KERNEL);
672
673 skb_queue_tail(&local->tx_queue, pdu); 735 skb_queue_tail(&local->tx_queue, pdu);
674 skb_queue_tail(&sock->tx_pending_queue, pending_pdu);
675 nr_frames++; 736 nr_frames++;
676 } 737 }
677 738
@@ -728,11 +789,21 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
728 789
729 llcp_sock->send_ack_n = nr; 790 llcp_sock->send_ack_n = nr;
730 791
731 skb_queue_walk_safe(&llcp_sock->tx_pending_queue, s, tmp) 792 /* Remove and free all skbs until ns == nr */
732 if (nfc_llcp_ns(s) <= nr) { 793 skb_queue_walk_safe(&llcp_sock->tx_pending_queue, s, tmp) {
733 skb_unlink(s, &llcp_sock->tx_pending_queue); 794 skb_unlink(s, &llcp_sock->tx_pending_queue);
734 kfree_skb(s); 795 kfree_skb(s);
735 } 796
797 if (nfc_llcp_ns(s) == nr)
798 break;
799 }
800
801 /* Re-queue the remaining skbs for transmission */
802 skb_queue_reverse_walk_safe(&llcp_sock->tx_pending_queue,
803 s, tmp) {
804 skb_unlink(s, &llcp_sock->tx_pending_queue);
805 skb_queue_head(&local->tx_queue, s);
806 }
736 } 807 }
737 808
738 if (ptype == LLCP_PDU_RR) 809 if (ptype == LLCP_PDU_RR)
@@ -740,7 +811,7 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
740 else if (ptype == LLCP_PDU_RNR) 811 else if (ptype == LLCP_PDU_RNR)
741 llcp_sock->remote_ready = false; 812 llcp_sock->remote_ready = false;
742 813
743 if (nfc_llcp_queue_i_frames(llcp_sock) == 0) 814 if (nfc_llcp_queue_i_frames(llcp_sock) == 0 && ptype == LLCP_PDU_I)
744 nfc_llcp_send_rr(llcp_sock); 815 nfc_llcp_send_rr(llcp_sock);
745 816
746 release_sock(sk); 817 release_sock(sk);
@@ -791,11 +862,7 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
791 dsap = nfc_llcp_dsap(skb); 862 dsap = nfc_llcp_dsap(skb);
792 ssap = nfc_llcp_ssap(skb); 863 ssap = nfc_llcp_ssap(skb);
793 864
794 llcp_sock = nfc_llcp_sock_get(local, dsap, ssap); 865 llcp_sock = nfc_llcp_connecting_sock_get(local, dsap);
795
796 if (llcp_sock == NULL)
797 llcp_sock = nfc_llcp_sock_get(local, dsap, LLCP_SAP_SDP);
798
799 if (llcp_sock == NULL) { 866 if (llcp_sock == NULL) {
800 pr_err("Invalid CC\n"); 867 pr_err("Invalid CC\n");
801 nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN); 868 nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN);
@@ -803,11 +870,15 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
803 return; 870 return;
804 } 871 }
805 872
806 llcp_sock->dsap = ssap;
807 sk = &llcp_sock->sk; 873 sk = &llcp_sock->sk;
808 874
809 nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE], 875 /* Unlink from connecting and link to the client array */
810 skb->len - LLCP_HEADER_SIZE); 876 nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
877 nfc_llcp_sock_link(&local->sockets, sk);
878 llcp_sock->dsap = ssap;
879
880 nfc_llcp_parse_connection_tlv(llcp_sock, &skb->data[LLCP_HEADER_SIZE],
881 skb->len - LLCP_HEADER_SIZE);
811 882
812 sk->sk_state = LLCP_CONNECTED; 883 sk->sk_state = LLCP_CONNECTED;
813 sk->sk_state_change(sk); 884 sk->sk_state_change(sk);
@@ -891,6 +962,21 @@ void nfc_llcp_recv(void *data, struct sk_buff *skb, int err)
891 return; 962 return;
892} 963}
893 964
965int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb)
966{
967 struct nfc_llcp_local *local;
968
969 local = nfc_llcp_find_local(dev);
970 if (local == NULL)
971 return -ENODEV;
972
973 local->rx_pending = skb_get(skb);
974 del_timer(&local->link_timer);
975 queue_work(local->rx_wq, &local->rx_work);
976
977 return 0;
978}
979
894void nfc_llcp_mac_is_down(struct nfc_dev *dev) 980void nfc_llcp_mac_is_down(struct nfc_dev *dev)
895{ 981{
896 struct nfc_llcp_local *local; 982 struct nfc_llcp_local *local;
@@ -943,8 +1029,8 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
943 1029
944 local->dev = ndev; 1030 local->dev = ndev;
945 INIT_LIST_HEAD(&local->list); 1031 INIT_LIST_HEAD(&local->list);
1032 kref_init(&local->ref);
946 mutex_init(&local->sdp_lock); 1033 mutex_init(&local->sdp_lock);
947 mutex_init(&local->socket_lock);
948 init_timer(&local->link_timer); 1034 init_timer(&local->link_timer);
949 local->link_timer.data = (unsigned long) local; 1035 local->link_timer.data = (unsigned long) local;
950 local->link_timer.function = nfc_llcp_symm_timer; 1036 local->link_timer.function = nfc_llcp_symm_timer;
@@ -984,11 +1070,13 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
984 goto err_rx_wq; 1070 goto err_rx_wq;
985 } 1071 }
986 1072
1073 local->sockets.lock = __RW_LOCK_UNLOCKED(local->sockets.lock);
1074 local->connecting_sockets.lock = __RW_LOCK_UNLOCKED(local->connecting_sockets.lock);
1075
987 nfc_llcp_build_gb(local); 1076 nfc_llcp_build_gb(local);
988 1077
989 local->remote_miu = LLCP_DEFAULT_MIU; 1078 local->remote_miu = LLCP_DEFAULT_MIU;
990 local->remote_lto = LLCP_DEFAULT_LTO; 1079 local->remote_lto = LLCP_DEFAULT_LTO;
991 local->remote_rw = LLCP_DEFAULT_RW;
992 1080
993 list_add(&llcp_devices, &local->list); 1081 list_add(&llcp_devices, &local->list);
994 1082
@@ -1015,14 +1103,7 @@ void nfc_llcp_unregister_device(struct nfc_dev *dev)
1015 return; 1103 return;
1016 } 1104 }
1017 1105
1018 list_del(&local->list); 1106 nfc_llcp_local_put(local);
1019 nfc_llcp_socket_release(local);
1020 del_timer_sync(&local->link_timer);
1021 skb_queue_purge(&local->tx_queue);
1022 destroy_workqueue(local->tx_wq);
1023 destroy_workqueue(local->rx_wq);
1024 kfree_skb(local->rx_pending);
1025 kfree(local);
1026} 1107}
1027 1108
1028int __init nfc_llcp_init(void) 1109int __init nfc_llcp_init(void)
diff --git a/net/nfc/llcp/llcp.h b/net/nfc/llcp/llcp.h
index 50680ce5ae4..7286c86982f 100644
--- a/net/nfc/llcp/llcp.h
+++ b/net/nfc/llcp/llcp.h
@@ -40,12 +40,18 @@ enum llcp_state {
40 40
41struct nfc_llcp_sock; 41struct nfc_llcp_sock;
42 42
43struct llcp_sock_list {
44 struct hlist_head head;
45 rwlock_t lock;
46};
47
43struct nfc_llcp_local { 48struct nfc_llcp_local {
44 struct list_head list; 49 struct list_head list;
45 struct nfc_dev *dev; 50 struct nfc_dev *dev;
46 51
52 struct kref ref;
53
47 struct mutex sdp_lock; 54 struct mutex sdp_lock;
48 struct mutex socket_lock;
49 55
50 struct timer_list link_timer; 56 struct timer_list link_timer;
51 struct sk_buff_head tx_queue; 57 struct sk_buff_head tx_queue;
@@ -77,24 +83,26 @@ struct nfc_llcp_local {
77 u16 remote_lto; 83 u16 remote_lto;
78 u8 remote_opt; 84 u8 remote_opt;
79 u16 remote_wks; 85 u16 remote_wks;
80 u8 remote_rw;
81 86
82 /* sockets array */ 87 /* sockets array */
83 struct nfc_llcp_sock *sockets[LLCP_MAX_SAP]; 88 struct llcp_sock_list sockets;
89 struct llcp_sock_list connecting_sockets;
84}; 90};
85 91
86struct nfc_llcp_sock { 92struct nfc_llcp_sock {
87 struct sock sk; 93 struct sock sk;
88 struct list_head list;
89 struct nfc_dev *dev; 94 struct nfc_dev *dev;
90 struct nfc_llcp_local *local; 95 struct nfc_llcp_local *local;
91 u32 target_idx; 96 u32 target_idx;
92 u32 nfc_protocol; 97 u32 nfc_protocol;
93 98
99 /* Link parameters */
94 u8 ssap; 100 u8 ssap;
95 u8 dsap; 101 u8 dsap;
96 char *service_name; 102 char *service_name;
97 size_t service_name_len; 103 size_t service_name_len;
104 u8 rw;
105 u16 miu;
98 106
99 /* Link variables */ 107 /* Link variables */
100 u8 send_n; 108 u8 send_n;
@@ -164,7 +172,11 @@ struct nfc_llcp_sock {
164#define LLCP_DM_REJ 0x03 172#define LLCP_DM_REJ 0x03
165 173
166 174
175void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *s);
176void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *s);
167struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev); 177struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
178struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local);
179int nfc_llcp_local_put(struct nfc_llcp_local *local);
168u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local, 180u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
169 struct nfc_llcp_sock *sock); 181 struct nfc_llcp_sock *sock);
170u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local); 182u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local);
@@ -179,8 +191,10 @@ void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk);
179struct sock *nfc_llcp_accept_dequeue(struct sock *sk, struct socket *newsock); 191struct sock *nfc_llcp_accept_dequeue(struct sock *sk, struct socket *newsock);
180 192
181/* TLV API */ 193/* TLV API */
182int nfc_llcp_parse_tlv(struct nfc_llcp_local *local, 194int nfc_llcp_parse_gb_tlv(struct nfc_llcp_local *local,
183 u8 *tlv_array, u16 tlv_array_len); 195 u8 *tlv_array, u16 tlv_array_len);
196int nfc_llcp_parse_connection_tlv(struct nfc_llcp_sock *sock,
197 u8 *tlv_array, u16 tlv_array_len);
184 198
185/* Commands API */ 199/* Commands API */
186void nfc_llcp_recv(void *data, struct sk_buff *skb, int err); 200void nfc_llcp_recv(void *data, struct sk_buff *skb, int err);
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index e06d458fc71..05ca5a68007 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -111,7 +111,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
111 } 111 }
112 112
113 llcp_sock->dev = dev; 113 llcp_sock->dev = dev;
114 llcp_sock->local = local; 114 llcp_sock->local = nfc_llcp_local_get(local);
115 llcp_sock->nfc_protocol = llcp_addr.nfc_protocol; 115 llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
116 llcp_sock->service_name_len = min_t(unsigned int, 116 llcp_sock->service_name_len = min_t(unsigned int,
117 llcp_addr.service_name_len, 117 llcp_addr.service_name_len,
@@ -124,7 +124,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
124 if (llcp_sock->ssap == LLCP_MAX_SAP) 124 if (llcp_sock->ssap == LLCP_MAX_SAP)
125 goto put_dev; 125 goto put_dev;
126 126
127 local->sockets[llcp_sock->ssap] = llcp_sock; 127 nfc_llcp_sock_link(&local->sockets, sk);
128 128
129 pr_debug("Socket bound to SAP %d\n", llcp_sock->ssap); 129 pr_debug("Socket bound to SAP %d\n", llcp_sock->ssap);
130 130
@@ -382,15 +382,6 @@ static int llcp_sock_release(struct socket *sock)
382 goto out; 382 goto out;
383 } 383 }
384 384
385 mutex_lock(&local->socket_lock);
386
387 if (llcp_sock == local->sockets[llcp_sock->ssap])
388 local->sockets[llcp_sock->ssap] = NULL;
389 else
390 list_del_init(&llcp_sock->list);
391
392 mutex_unlock(&local->socket_lock);
393
394 lock_sock(sk); 385 lock_sock(sk);
395 386
396 /* Send a DISC */ 387 /* Send a DISC */
@@ -415,14 +406,12 @@ static int llcp_sock_release(struct socket *sock)
415 } 406 }
416 } 407 }
417 408
418 /* Freeing the SAP */ 409 nfc_llcp_put_ssap(llcp_sock->local, llcp_sock->ssap);
419 if ((sk->sk_state == LLCP_CONNECTED
420 && llcp_sock->ssap > LLCP_LOCAL_SAP_OFFSET) ||
421 sk->sk_state == LLCP_BOUND || sk->sk_state == LLCP_LISTEN)
422 nfc_llcp_put_ssap(llcp_sock->local, llcp_sock->ssap);
423 410
424 release_sock(sk); 411 release_sock(sk);
425 412
413 nfc_llcp_sock_unlink(&local->sockets, sk);
414
426out: 415out:
427 sock_orphan(sk); 416 sock_orphan(sk);
428 sock_put(sk); 417 sock_put(sk);
@@ -490,7 +479,8 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
490 } 479 }
491 480
492 llcp_sock->dev = dev; 481 llcp_sock->dev = dev;
493 llcp_sock->local = local; 482 llcp_sock->local = nfc_llcp_local_get(local);
483 llcp_sock->miu = llcp_sock->local->remote_miu;
494 llcp_sock->ssap = nfc_llcp_get_local_ssap(local); 484 llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
495 if (llcp_sock->ssap == LLCP_SAP_MAX) { 485 if (llcp_sock->ssap == LLCP_SAP_MAX) {
496 ret = -ENOMEM; 486 ret = -ENOMEM;
@@ -508,21 +498,26 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
508 llcp_sock->service_name_len, 498 llcp_sock->service_name_len,
509 GFP_KERNEL); 499 GFP_KERNEL);
510 500
511 local->sockets[llcp_sock->ssap] = llcp_sock; 501 nfc_llcp_sock_link(&local->connecting_sockets, sk);
512 502
513 ret = nfc_llcp_send_connect(llcp_sock); 503 ret = nfc_llcp_send_connect(llcp_sock);
514 if (ret) 504 if (ret)
515 goto put_dev; 505 goto sock_unlink;
516 506
517 ret = sock_wait_state(sk, LLCP_CONNECTED, 507 ret = sock_wait_state(sk, LLCP_CONNECTED,
518 sock_sndtimeo(sk, flags & O_NONBLOCK)); 508 sock_sndtimeo(sk, flags & O_NONBLOCK));
519 if (ret) 509 if (ret)
520 goto put_dev; 510 goto sock_unlink;
521 511
522 release_sock(sk); 512 release_sock(sk);
523 513
524 return 0; 514 return 0;
525 515
516sock_unlink:
517 nfc_llcp_put_ssap(local, llcp_sock->ssap);
518
519 nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
520
526put_dev: 521put_dev:
527 nfc_put_device(dev); 522 nfc_put_device(dev);
528 523
@@ -687,13 +682,14 @@ struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp)
687 682
688 llcp_sock->ssap = 0; 683 llcp_sock->ssap = 0;
689 llcp_sock->dsap = LLCP_SAP_SDP; 684 llcp_sock->dsap = LLCP_SAP_SDP;
685 llcp_sock->rw = LLCP_DEFAULT_RW;
686 llcp_sock->miu = LLCP_DEFAULT_MIU;
690 llcp_sock->send_n = llcp_sock->send_ack_n = 0; 687 llcp_sock->send_n = llcp_sock->send_ack_n = 0;
691 llcp_sock->recv_n = llcp_sock->recv_ack_n = 0; 688 llcp_sock->recv_n = llcp_sock->recv_ack_n = 0;
692 llcp_sock->remote_ready = 1; 689 llcp_sock->remote_ready = 1;
693 skb_queue_head_init(&llcp_sock->tx_queue); 690 skb_queue_head_init(&llcp_sock->tx_queue);
694 skb_queue_head_init(&llcp_sock->tx_pending_queue); 691 skb_queue_head_init(&llcp_sock->tx_pending_queue);
695 skb_queue_head_init(&llcp_sock->tx_backlog_queue); 692 skb_queue_head_init(&llcp_sock->tx_backlog_queue);
696 INIT_LIST_HEAD(&llcp_sock->list);
697 INIT_LIST_HEAD(&llcp_sock->accept_queue); 693 INIT_LIST_HEAD(&llcp_sock->accept_queue);
698 694
699 if (sock != NULL) 695 if (sock != NULL)
@@ -704,8 +700,6 @@ struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp)
704 700
705void nfc_llcp_sock_free(struct nfc_llcp_sock *sock) 701void nfc_llcp_sock_free(struct nfc_llcp_sock *sock)
706{ 702{
707 struct nfc_llcp_local *local = sock->local;
708
709 kfree(sock->service_name); 703 kfree(sock->service_name);
710 704
711 skb_queue_purge(&sock->tx_queue); 705 skb_queue_purge(&sock->tx_queue);
@@ -714,12 +708,9 @@ void nfc_llcp_sock_free(struct nfc_llcp_sock *sock)
714 708
715 list_del_init(&sock->accept_queue); 709 list_del_init(&sock->accept_queue);
716 710
717 if (local != NULL && sock == local->sockets[sock->ssap])
718 local->sockets[sock->ssap] = NULL;
719 else
720 list_del_init(&sock->list);
721
722 sock->parent = NULL; 711 sock->parent = NULL;
712
713 nfc_llcp_local_put(sock->local);
723} 714}
724 715
725static int llcp_sock_create(struct net *net, struct socket *sock, 716static int llcp_sock_create(struct net *net, struct socket *sock,
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index d560e6f1307..766a02b1dfa 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -387,7 +387,8 @@ static int nci_dev_down(struct nfc_dev *nfc_dev)
387 return nci_close_device(ndev); 387 return nci_close_device(ndev);
388} 388}
389 389
390static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols) 390static int nci_start_poll(struct nfc_dev *nfc_dev,
391 __u32 im_protocols, __u32 tm_protocols)
391{ 392{
392 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 393 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
393 int rc; 394 int rc;
@@ -413,11 +414,11 @@ static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
413 return -EBUSY; 414 return -EBUSY;
414 } 415 }
415 416
416 rc = nci_request(ndev, nci_rf_discover_req, protocols, 417 rc = nci_request(ndev, nci_rf_discover_req, im_protocols,
417 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT)); 418 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
418 419
419 if (!rc) 420 if (!rc)
420 ndev->poll_prots = protocols; 421 ndev->poll_prots = im_protocols;
421 422
422 return rc; 423 return rc;
423} 424}
@@ -521,9 +522,9 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev,
521 } 522 }
522} 523}
523 524
524static int nci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target, 525static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
525 struct sk_buff *skb, 526 struct sk_buff *skb,
526 data_exchange_cb_t cb, void *cb_context) 527 data_exchange_cb_t cb, void *cb_context)
527{ 528{
528 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 529 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
529 int rc; 530 int rc;
@@ -556,7 +557,7 @@ static struct nfc_ops nci_nfc_ops = {
556 .stop_poll = nci_stop_poll, 557 .stop_poll = nci_stop_poll,
557 .activate_target = nci_activate_target, 558 .activate_target = nci_activate_target,
558 .deactivate_target = nci_deactivate_target, 559 .deactivate_target = nci_deactivate_target,
559 .data_exchange = nci_data_exchange, 560 .im_transceive = nci_transceive,
560}; 561};
561 562
562/* ---- Interface to NCI drivers ---- */ 563/* ---- Interface to NCI drivers ---- */
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 581d419083a..f4f07f9b61c 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -49,6 +49,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
49 [NFC_ATTR_COMM_MODE] = { .type = NLA_U8 }, 49 [NFC_ATTR_COMM_MODE] = { .type = NLA_U8 },
50 [NFC_ATTR_RF_MODE] = { .type = NLA_U8 }, 50 [NFC_ATTR_RF_MODE] = { .type = NLA_U8 },
51 [NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 }, 51 [NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 },
52 [NFC_ATTR_IM_PROTOCOLS] = { .type = NLA_U32 },
53 [NFC_ATTR_TM_PROTOCOLS] = { .type = NLA_U32 },
52}; 54};
53 55
54static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target, 56static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
@@ -165,7 +167,7 @@ int nfc_genl_targets_found(struct nfc_dev *dev)
165 167
166 dev->genl_data.poll_req_pid = 0; 168 dev->genl_data.poll_req_pid = 0;
167 169
168 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 170 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
169 if (!msg) 171 if (!msg)
170 return -ENOMEM; 172 return -ENOMEM;
171 173
@@ -193,7 +195,7 @@ int nfc_genl_target_lost(struct nfc_dev *dev, u32 target_idx)
193 struct sk_buff *msg; 195 struct sk_buff *msg;
194 void *hdr; 196 void *hdr;
195 197
196 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 198 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
197 if (!msg) 199 if (!msg)
198 return -ENOMEM; 200 return -ENOMEM;
199 201
@@ -219,12 +221,74 @@ free_msg:
219 return -EMSGSIZE; 221 return -EMSGSIZE;
220} 222}
221 223
224int nfc_genl_tm_activated(struct nfc_dev *dev, u32 protocol)
225{
226 struct sk_buff *msg;
227 void *hdr;
228
229 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
230 if (!msg)
231 return -ENOMEM;
232
233 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
234 NFC_EVENT_TM_ACTIVATED);
235 if (!hdr)
236 goto free_msg;
237
238 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
239 goto nla_put_failure;
240 if (nla_put_u32(msg, NFC_ATTR_TM_PROTOCOLS, protocol))
241 goto nla_put_failure;
242
243 genlmsg_end(msg, hdr);
244
245 genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
246
247 return 0;
248
249nla_put_failure:
250 genlmsg_cancel(msg, hdr);
251free_msg:
252 nlmsg_free(msg);
253 return -EMSGSIZE;
254}
255
256int nfc_genl_tm_deactivated(struct nfc_dev *dev)
257{
258 struct sk_buff *msg;
259 void *hdr;
260
261 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
262 if (!msg)
263 return -ENOMEM;
264
265 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
266 NFC_EVENT_TM_DEACTIVATED);
267 if (!hdr)
268 goto free_msg;
269
270 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
271 goto nla_put_failure;
272
273 genlmsg_end(msg, hdr);
274
275 genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
276
277 return 0;
278
279nla_put_failure:
280 genlmsg_cancel(msg, hdr);
281free_msg:
282 nlmsg_free(msg);
283 return -EMSGSIZE;
284}
285
222int nfc_genl_device_added(struct nfc_dev *dev) 286int nfc_genl_device_added(struct nfc_dev *dev)
223{ 287{
224 struct sk_buff *msg; 288 struct sk_buff *msg;
225 void *hdr; 289 void *hdr;
226 290
227 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 291 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
228 if (!msg) 292 if (!msg)
229 return -ENOMEM; 293 return -ENOMEM;
230 294
@@ -257,7 +321,7 @@ int nfc_genl_device_removed(struct nfc_dev *dev)
257 struct sk_buff *msg; 321 struct sk_buff *msg;
258 void *hdr; 322 void *hdr;
259 323
260 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 324 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
261 if (!msg) 325 if (!msg)
262 return -ENOMEM; 326 return -ENOMEM;
263 327
@@ -370,7 +434,7 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
370 434
371 pr_debug("DEP link is up\n"); 435 pr_debug("DEP link is up\n");
372 436
373 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 437 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
374 if (!msg) 438 if (!msg)
375 return -ENOMEM; 439 return -ENOMEM;
376 440
@@ -409,7 +473,7 @@ int nfc_genl_dep_link_down_event(struct nfc_dev *dev)
409 473
410 pr_debug("DEP link is down\n"); 474 pr_debug("DEP link is down\n");
411 475
412 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 476 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
413 if (!msg) 477 if (!msg)
414 return -ENOMEM; 478 return -ENOMEM;
415 479
@@ -450,7 +514,7 @@ static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info)
450 if (!dev) 514 if (!dev)
451 return -ENODEV; 515 return -ENODEV;
452 516
453 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 517 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
454 if (!msg) { 518 if (!msg) {
455 rc = -ENOMEM; 519 rc = -ENOMEM;
456 goto out_putdev; 520 goto out_putdev;
@@ -519,16 +583,25 @@ static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
519 struct nfc_dev *dev; 583 struct nfc_dev *dev;
520 int rc; 584 int rc;
521 u32 idx; 585 u32 idx;
522 u32 protocols; 586 u32 im_protocols = 0, tm_protocols = 0;
523 587
524 pr_debug("Poll start\n"); 588 pr_debug("Poll start\n");
525 589
526 if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || 590 if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
527 !info->attrs[NFC_ATTR_PROTOCOLS]) 591 ((!info->attrs[NFC_ATTR_IM_PROTOCOLS] &&
592 !info->attrs[NFC_ATTR_PROTOCOLS]) &&
593 !info->attrs[NFC_ATTR_TM_PROTOCOLS]))
528 return -EINVAL; 594 return -EINVAL;
529 595
530 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); 596 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
531 protocols = nla_get_u32(info->attrs[NFC_ATTR_PROTOCOLS]); 597
598 if (info->attrs[NFC_ATTR_TM_PROTOCOLS])
599 tm_protocols = nla_get_u32(info->attrs[NFC_ATTR_TM_PROTOCOLS]);
600
601 if (info->attrs[NFC_ATTR_IM_PROTOCOLS])
602 im_protocols = nla_get_u32(info->attrs[NFC_ATTR_IM_PROTOCOLS]);
603 else if (info->attrs[NFC_ATTR_PROTOCOLS])
604 im_protocols = nla_get_u32(info->attrs[NFC_ATTR_PROTOCOLS]);
532 605
533 dev = nfc_get_device(idx); 606 dev = nfc_get_device(idx);
534 if (!dev) 607 if (!dev)
@@ -536,7 +609,7 @@ static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
536 609
537 mutex_lock(&dev->genl_data.genl_data_mutex); 610 mutex_lock(&dev->genl_data.genl_data_mutex);
538 611
539 rc = nfc_start_poll(dev, protocols); 612 rc = nfc_start_poll(dev, im_protocols, tm_protocols);
540 if (!rc) 613 if (!rc)
541 dev->genl_data.poll_req_pid = info->snd_pid; 614 dev->genl_data.poll_req_pid = info->snd_pid;
542 615
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index 3dd4232ae66..c5e42b79a41 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -55,6 +55,7 @@ int nfc_llcp_register_device(struct nfc_dev *dev);
55void nfc_llcp_unregister_device(struct nfc_dev *dev); 55void nfc_llcp_unregister_device(struct nfc_dev *dev);
56int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len); 56int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len);
57u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len); 57u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len);
58int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb);
58int __init nfc_llcp_init(void); 59int __init nfc_llcp_init(void);
59void nfc_llcp_exit(void); 60void nfc_llcp_exit(void);
60 61
@@ -90,6 +91,12 @@ static inline u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *gb_len)
90 return NULL; 91 return NULL;
91} 92}
92 93
94static inline int nfc_llcp_data_received(struct nfc_dev *dev,
95 struct sk_buff *skb)
96{
97 return 0;
98}
99
93static inline int nfc_llcp_init(void) 100static inline int nfc_llcp_init(void)
94{ 101{
95 return 0; 102 return 0;
@@ -128,6 +135,9 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
128 u8 comm_mode, u8 rf_mode); 135 u8 comm_mode, u8 rf_mode);
129int nfc_genl_dep_link_down_event(struct nfc_dev *dev); 136int nfc_genl_dep_link_down_event(struct nfc_dev *dev);
130 137
138int nfc_genl_tm_activated(struct nfc_dev *dev, u32 protocol);
139int nfc_genl_tm_deactivated(struct nfc_dev *dev);
140
131struct nfc_dev *nfc_get_device(unsigned int idx); 141struct nfc_dev *nfc_get_device(unsigned int idx);
132 142
133static inline void nfc_put_device(struct nfc_dev *dev) 143static inline void nfc_put_device(struct nfc_dev *dev)
@@ -158,7 +168,7 @@ int nfc_dev_up(struct nfc_dev *dev);
158 168
159int nfc_dev_down(struct nfc_dev *dev); 169int nfc_dev_down(struct nfc_dev *dev);
160 170
161int nfc_start_poll(struct nfc_dev *dev, u32 protocols); 171int nfc_start_poll(struct nfc_dev *dev, u32 im_protocols, u32 tm_protocols);
162 172
163int nfc_stop_poll(struct nfc_dev *dev); 173int nfc_stop_poll(struct nfc_dev *dev);
164 174
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 0f661745df0..ceaca7c134a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -531,6 +531,7 @@ static int prb_calc_retire_blk_tmo(struct packet_sock *po,
531 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0; 531 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
532 struct ethtool_cmd ecmd; 532 struct ethtool_cmd ecmd;
533 int err; 533 int err;
534 u32 speed;
534 535
535 rtnl_lock(); 536 rtnl_lock();
536 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); 537 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
@@ -539,25 +540,18 @@ static int prb_calc_retire_blk_tmo(struct packet_sock *po,
539 return DEFAULT_PRB_RETIRE_TOV; 540 return DEFAULT_PRB_RETIRE_TOV;
540 } 541 }
541 err = __ethtool_get_settings(dev, &ecmd); 542 err = __ethtool_get_settings(dev, &ecmd);
543 speed = ethtool_cmd_speed(&ecmd);
542 rtnl_unlock(); 544 rtnl_unlock();
543 if (!err) { 545 if (!err) {
544 switch (ecmd.speed) {
545 case SPEED_10000:
546 msec = 1;
547 div = 10000/1000;
548 break;
549 case SPEED_1000:
550 msec = 1;
551 div = 1000/1000;
552 break;
553 /* 546 /*
554 * If the link speed is so slow you don't really 547 * If the link speed is so slow you don't really
555 * need to worry about perf anyways 548 * need to worry about perf anyways
556 */ 549 */
557 case SPEED_100: 550 if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
558 case SPEED_10:
559 default:
560 return DEFAULT_PRB_RETIRE_TOV; 551 return DEFAULT_PRB_RETIRE_TOV;
552 } else {
553 msec = 1;
554 div = speed / 1000;
561 } 555 }
562 } 556 }
563 557
@@ -592,7 +586,7 @@ static void init_prb_bdqc(struct packet_sock *po,
592 p1->knxt_seq_num = 1; 586 p1->knxt_seq_num = 1;
593 p1->pkbdq = pg_vec; 587 p1->pkbdq = pg_vec;
594 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer; 588 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
595 p1->pkblk_start = (char *)pg_vec[0].buffer; 589 p1->pkblk_start = pg_vec[0].buffer;
596 p1->kblk_size = req_u->req3.tp_block_size; 590 p1->kblk_size = req_u->req3.tp_block_size;
597 p1->knum_blocks = req_u->req3.tp_block_nr; 591 p1->knum_blocks = req_u->req3.tp_block_nr;
598 p1->hdrlen = po->tp_hdrlen; 592 p1->hdrlen = po->tp_hdrlen;
@@ -824,8 +818,7 @@ static void prb_open_block(struct tpacket_kbdq_core *pkc1,
824 h1->ts_first_pkt.ts_sec = ts.tv_sec; 818 h1->ts_first_pkt.ts_sec = ts.tv_sec;
825 h1->ts_first_pkt.ts_nsec = ts.tv_nsec; 819 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
826 pkc1->pkblk_start = (char *)pbd1; 820 pkc1->pkblk_start = (char *)pbd1;
827 pkc1->nxt_offset = (char *)(pkc1->pkblk_start + 821 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
828 BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
829 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 822 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
830 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; 823 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
831 pbd1->version = pkc1->version; 824 pbd1->version = pkc1->version;
@@ -1018,7 +1011,7 @@ static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1018 struct tpacket_block_desc *pbd; 1011 struct tpacket_block_desc *pbd;
1019 char *curr, *end; 1012 char *curr, *end;
1020 1013
1021 pkc = GET_PBDQC_FROM_RB(((struct packet_ring_buffer *)&po->rx_ring)); 1014 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1022 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 1015 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1023 1016
1024 /* Queue is frozen when user space is lagging behind */ 1017 /* Queue is frozen when user space is lagging behind */
@@ -1044,7 +1037,7 @@ static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1044 smp_mb(); 1037 smp_mb();
1045 curr = pkc->nxt_offset; 1038 curr = pkc->nxt_offset;
1046 pkc->skb = skb; 1039 pkc->skb = skb;
1047 end = (char *) ((char *)pbd + pkc->kblk_size); 1040 end = (char *)pbd + pkc->kblk_size;
1048 1041
1049 /* first try the current block */ 1042 /* first try the current block */
1050 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { 1043 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
@@ -1476,7 +1469,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
1476 * Find the device first to size check it 1469 * Find the device first to size check it
1477 */ 1470 */
1478 1471
1479 saddr->spkt_device[13] = 0; 1472 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1480retry: 1473retry:
1481 rcu_read_lock(); 1474 rcu_read_lock();
1482 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); 1475 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
diff --git a/net/rds/page.c b/net/rds/page.c
index 2499cd10842..9005a2c920e 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -74,11 +74,12 @@ int rds_page_copy_user(struct page *page, unsigned long offset,
74} 74}
75EXPORT_SYMBOL_GPL(rds_page_copy_user); 75EXPORT_SYMBOL_GPL(rds_page_copy_user);
76 76
77/* 77/**
78 * Message allocation uses this to build up regions of a message. 78 * rds_page_remainder_alloc - build up regions of a message.
79 * 79 *
80 * @bytes - the number of bytes needed. 80 * @scat: Scatter list for message
81 * @gfp - the waiting behaviour of the allocation 81 * @bytes: the number of bytes needed.
82 * @gfp: the waiting behaviour of the allocation
82 * 83 *
83 * @gfp is always ored with __GFP_HIGHMEM. Callers must be prepared to 84 * @gfp is always ored with __GFP_HIGHMEM. Callers must be prepared to
84 * kmap the pages, etc. 85 * kmap the pages, etc.
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index f974961754c..752b72360eb 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -325,7 +325,7 @@ static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
325 325
326 rfkill_global_states[type].cur = blocked; 326 rfkill_global_states[type].cur = blocked;
327 list_for_each_entry(rfkill, &rfkill_list, node) { 327 list_for_each_entry(rfkill, &rfkill_list, node) {
328 if (rfkill->type != type) 328 if (rfkill->type != type && type != RFKILL_TYPE_ALL)
329 continue; 329 continue;
330 330
331 rfkill_set_block(rfkill, blocked); 331 rfkill_set_block(rfkill, blocked);
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c
index 5d6b572a670..a9206087b4d 100644
--- a/net/rxrpc/ar-error.c
+++ b/net/rxrpc/ar-error.c
@@ -81,10 +81,6 @@ void rxrpc_UDP_error_report(struct sock *sk)
81 _net("I/F MTU %u", mtu); 81 _net("I/F MTU %u", mtu);
82 } 82 }
83 83
84 /* ip_rt_frag_needed() may have eaten the info */
85 if (mtu == 0)
86 mtu = ntohs(icmp_hdr(skb)->un.frag.mtu);
87
88 if (mtu == 0) { 84 if (mtu == 0) {
89 /* they didn't give us a size, estimate one */ 85 /* they didn't give us a size, estimate one */
90 if (mtu > 1500) { 86 if (mtu > 1500) {
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index 16ae88762d0..e1ac183d50b 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -242,7 +242,7 @@ int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
242 242
243EXPORT_SYMBOL(rxrpc_kernel_send_data); 243EXPORT_SYMBOL(rxrpc_kernel_send_data);
244 244
245/* 245/**
246 * rxrpc_kernel_abort_call - Allow a kernel service to abort a call 246 * rxrpc_kernel_abort_call - Allow a kernel service to abort a call
247 * @call: The call to be aborted 247 * @call: The call to be aborted
248 * @abort_code: The abort code to stick into the ABORT packet 248 * @abort_code: The abort code to stick into the ABORT packet
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index e7a8976bf25..4a5d2bd4f78 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -507,6 +507,16 @@ config NET_EMATCH_TEXT
507 To compile this code as a module, choose M here: the 507 To compile this code as a module, choose M here: the
508 module will be called em_text. 508 module will be called em_text.
509 509
510config NET_EMATCH_CANID
511 tristate "CAN Identifier"
512 depends on NET_EMATCH && CAN
513 ---help---
514 Say Y here if you want to be able to classify CAN frames based
515 on CAN Identifier.
516
517 To compile this code as a module, choose M here: the
518 module will be called em_canid.
519
510config NET_CLS_ACT 520config NET_CLS_ACT
511 bool "Actions" 521 bool "Actions"
512 ---help--- 522 ---help---
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 5940a1992f0..bcada751b4e 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -55,3 +55,4 @@ obj-$(CONFIG_NET_EMATCH_NBYTE) += em_nbyte.o
55obj-$(CONFIG_NET_EMATCH_U32) += em_u32.o 55obj-$(CONFIG_NET_EMATCH_U32) += em_u32.o
56obj-$(CONFIG_NET_EMATCH_META) += em_meta.o 56obj-$(CONFIG_NET_EMATCH_META) += em_meta.o
57obj-$(CONFIG_NET_EMATCH_TEXT) += em_text.o 57obj-$(CONFIG_NET_EMATCH_TEXT) += em_text.o
58obj-$(CONFIG_NET_EMATCH_CANID) += em_canid.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 5cfb160df06..e3d2c78cb52 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -652,27 +652,27 @@ tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq,
652 unsigned char *b = skb_tail_pointer(skb); 652 unsigned char *b = skb_tail_pointer(skb);
653 struct nlattr *nest; 653 struct nlattr *nest;
654 654
655 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags); 655 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*t), flags);
656 656 if (!nlh)
657 t = NLMSG_DATA(nlh); 657 goto out_nlmsg_trim;
658 t = nlmsg_data(nlh);
658 t->tca_family = AF_UNSPEC; 659 t->tca_family = AF_UNSPEC;
659 t->tca__pad1 = 0; 660 t->tca__pad1 = 0;
660 t->tca__pad2 = 0; 661 t->tca__pad2 = 0;
661 662
662 nest = nla_nest_start(skb, TCA_ACT_TAB); 663 nest = nla_nest_start(skb, TCA_ACT_TAB);
663 if (nest == NULL) 664 if (nest == NULL)
664 goto nla_put_failure; 665 goto out_nlmsg_trim;
665 666
666 if (tcf_action_dump(skb, a, bind, ref) < 0) 667 if (tcf_action_dump(skb, a, bind, ref) < 0)
667 goto nla_put_failure; 668 goto out_nlmsg_trim;
668 669
669 nla_nest_end(skb, nest); 670 nla_nest_end(skb, nest);
670 671
671 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 672 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
672 return skb->len; 673 return skb->len;
673 674
674nla_put_failure: 675out_nlmsg_trim:
675nlmsg_failure:
676 nlmsg_trim(skb, b); 676 nlmsg_trim(skb, b);
677 return -1; 677 return -1;
678} 678}
@@ -799,19 +799,21 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
799 if (a->ops == NULL) 799 if (a->ops == NULL)
800 goto err_out; 800 goto err_out;
801 801
802 nlh = NLMSG_PUT(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t)); 802 nlh = nlmsg_put(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0);
803 t = NLMSG_DATA(nlh); 803 if (!nlh)
804 goto out_module_put;
805 t = nlmsg_data(nlh);
804 t->tca_family = AF_UNSPEC; 806 t->tca_family = AF_UNSPEC;
805 t->tca__pad1 = 0; 807 t->tca__pad1 = 0;
806 t->tca__pad2 = 0; 808 t->tca__pad2 = 0;
807 809
808 nest = nla_nest_start(skb, TCA_ACT_TAB); 810 nest = nla_nest_start(skb, TCA_ACT_TAB);
809 if (nest == NULL) 811 if (nest == NULL)
810 goto nla_put_failure; 812 goto out_module_put;
811 813
812 err = a->ops->walk(skb, &dcb, RTM_DELACTION, a); 814 err = a->ops->walk(skb, &dcb, RTM_DELACTION, a);
813 if (err < 0) 815 if (err < 0)
814 goto nla_put_failure; 816 goto out_module_put;
815 if (err == 0) 817 if (err == 0)
816 goto noflush_out; 818 goto noflush_out;
817 819
@@ -828,8 +830,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
828 830
829 return err; 831 return err;
830 832
831nla_put_failure: 833out_module_put:
832nlmsg_failure:
833 module_put(a->ops->owner); 834 module_put(a->ops->owner);
834err_out: 835err_out:
835noflush_out: 836noflush_out:
@@ -919,18 +920,20 @@ static int tcf_add_notify(struct net *net, struct tc_action *a,
919 920
920 b = skb_tail_pointer(skb); 921 b = skb_tail_pointer(skb);
921 922
922 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags); 923 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*t), flags);
923 t = NLMSG_DATA(nlh); 924 if (!nlh)
925 goto out_kfree_skb;
926 t = nlmsg_data(nlh);
924 t->tca_family = AF_UNSPEC; 927 t->tca_family = AF_UNSPEC;
925 t->tca__pad1 = 0; 928 t->tca__pad1 = 0;
926 t->tca__pad2 = 0; 929 t->tca__pad2 = 0;
927 930
928 nest = nla_nest_start(skb, TCA_ACT_TAB); 931 nest = nla_nest_start(skb, TCA_ACT_TAB);
929 if (nest == NULL) 932 if (nest == NULL)
930 goto nla_put_failure; 933 goto out_kfree_skb;
931 934
932 if (tcf_action_dump(skb, a, 0, 0) < 0) 935 if (tcf_action_dump(skb, a, 0, 0) < 0)
933 goto nla_put_failure; 936 goto out_kfree_skb;
934 937
935 nla_nest_end(skb, nest); 938 nla_nest_end(skb, nest);
936 939
@@ -942,8 +945,7 @@ static int tcf_add_notify(struct net *net, struct tc_action *a,
942 err = 0; 945 err = 0;
943 return err; 946 return err;
944 947
945nla_put_failure: 948out_kfree_skb:
946nlmsg_failure:
947 kfree_skb(skb); 949 kfree_skb(skb);
948 return -1; 950 return -1;
949} 951}
@@ -1062,7 +1064,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1062 struct tc_action_ops *a_o; 1064 struct tc_action_ops *a_o;
1063 struct tc_action a; 1065 struct tc_action a;
1064 int ret = 0; 1066 int ret = 0;
1065 struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh); 1067 struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
1066 struct nlattr *kind = find_dump_kind(cb->nlh); 1068 struct nlattr *kind = find_dump_kind(cb->nlh);
1067 1069
1068 if (kind == NULL) { 1070 if (kind == NULL) {
@@ -1080,23 +1082,25 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1080 if (a_o->walk == NULL) { 1082 if (a_o->walk == NULL) {
1081 WARN(1, "tc_dump_action: %s !capable of dumping table\n", 1083 WARN(1, "tc_dump_action: %s !capable of dumping table\n",
1082 a_o->kind); 1084 a_o->kind);
1083 goto nla_put_failure; 1085 goto out_module_put;
1084 } 1086 }
1085 1087
1086 nlh = NLMSG_PUT(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 1088 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
1087 cb->nlh->nlmsg_type, sizeof(*t)); 1089 cb->nlh->nlmsg_type, sizeof(*t), 0);
1088 t = NLMSG_DATA(nlh); 1090 if (!nlh)
1091 goto out_module_put;
1092 t = nlmsg_data(nlh);
1089 t->tca_family = AF_UNSPEC; 1093 t->tca_family = AF_UNSPEC;
1090 t->tca__pad1 = 0; 1094 t->tca__pad1 = 0;
1091 t->tca__pad2 = 0; 1095 t->tca__pad2 = 0;
1092 1096
1093 nest = nla_nest_start(skb, TCA_ACT_TAB); 1097 nest = nla_nest_start(skb, TCA_ACT_TAB);
1094 if (nest == NULL) 1098 if (nest == NULL)
1095 goto nla_put_failure; 1099 goto out_module_put;
1096 1100
1097 ret = a_o->walk(skb, cb, RTM_GETACTION, &a); 1101 ret = a_o->walk(skb, cb, RTM_GETACTION, &a);
1098 if (ret < 0) 1102 if (ret < 0)
1099 goto nla_put_failure; 1103 goto out_module_put;
1100 1104
1101 if (ret > 0) { 1105 if (ret > 0) {
1102 nla_nest_end(skb, nest); 1106 nla_nest_end(skb, nest);
@@ -1110,8 +1114,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1110 module_put(a_o->owner); 1114 module_put(a_o->owner);
1111 return skb->len; 1115 return skb->len;
1112 1116
1113nla_put_failure: 1117out_module_put:
1114nlmsg_failure:
1115 module_put(a_o->owner); 1118 module_put(a_o->owner);
1116 nlmsg_trim(skb, b); 1119 nlmsg_trim(skb, b);
1117 return skb->len; 1120 return skb->len;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index f452f696b4b..6dd1131f2ec 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -140,7 +140,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
140 int tp_created = 0; 140 int tp_created = 0;
141 141
142replay: 142replay:
143 t = NLMSG_DATA(n); 143 t = nlmsg_data(n);
144 protocol = TC_H_MIN(t->tcm_info); 144 protocol = TC_H_MIN(t->tcm_info);
145 prio = TC_H_MAJ(t->tcm_info); 145 prio = TC_H_MAJ(t->tcm_info);
146 nprio = prio; 146 nprio = prio;
@@ -349,8 +349,10 @@ static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
349 struct nlmsghdr *nlh; 349 struct nlmsghdr *nlh;
350 unsigned char *b = skb_tail_pointer(skb); 350 unsigned char *b = skb_tail_pointer(skb);
351 351
352 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); 352 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*tcm), flags);
353 tcm = NLMSG_DATA(nlh); 353 if (!nlh)
354 goto out_nlmsg_trim;
355 tcm = nlmsg_data(nlh);
354 tcm->tcm_family = AF_UNSPEC; 356 tcm->tcm_family = AF_UNSPEC;
355 tcm->tcm__pad1 = 0; 357 tcm->tcm__pad1 = 0;
356 tcm->tcm__pad2 = 0; 358 tcm->tcm__pad2 = 0;
@@ -368,7 +370,7 @@ static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
368 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 370 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
369 return skb->len; 371 return skb->len;
370 372
371nlmsg_failure: 373out_nlmsg_trim:
372nla_put_failure: 374nla_put_failure:
373 nlmsg_trim(skb, b); 375 nlmsg_trim(skb, b);
374 return -1; 376 return -1;
@@ -418,7 +420,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
418 struct net_device *dev; 420 struct net_device *dev;
419 struct Qdisc *q; 421 struct Qdisc *q;
420 struct tcf_proto *tp, **chain; 422 struct tcf_proto *tp, **chain;
421 struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh); 423 struct tcmsg *tcm = nlmsg_data(cb->nlh);
422 unsigned long cl = 0; 424 unsigned long cl = 0;
423 const struct Qdisc_class_ops *cops; 425 const struct Qdisc_class_ops *cops;
424 struct tcf_dump_args arg; 426 struct tcf_dump_args arg;
diff --git a/net/sched/em_canid.c b/net/sched/em_canid.c
new file mode 100644
index 00000000000..bfd34e4c1af
--- /dev/null
+++ b/net/sched/em_canid.c
@@ -0,0 +1,240 @@
1/*
2 * em_canid.c Ematch rule to match CAN frames according to their CAN IDs
3 *
4 * This program is free software; you can distribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Idea: Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
10 * Copyright: (c) 2011 Czech Technical University in Prague
11 * (c) 2011 Volkswagen Group Research
12 * Authors: Michal Sojka <sojkam1@fel.cvut.cz>
13 * Pavel Pisa <pisa@cmp.felk.cvut.cz>
14 * Rostislav Lisovy <lisovy@gmail.cz>
15 * Funded by: Volkswagen Group Research
16 */
17
18#include <linux/slab.h>
19#include <linux/module.h>
20#include <linux/types.h>
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/skbuff.h>
24#include <net/pkt_cls.h>
25#include <linux/can.h>
26
27#define EM_CAN_RULES_MAX 500
28
29struct canid_match {
30 /* For each SFF CAN ID (11 bit) there is one record in this bitfield */
31 DECLARE_BITMAP(match_sff, (1 << CAN_SFF_ID_BITS));
32
33 int rules_count;
34 int sff_rules_count;
35 int eff_rules_count;
36
37 /*
38 * Raw rules copied from netlink message; Used for sending
39 * information to userspace (when 'tc filter show' is invoked)
40 * AND when matching EFF frames
41 */
42 struct can_filter rules_raw[];
43};
44
45/**
46 * em_canid_get_id() - Extracts Can ID out of the sk_buff structure.
47 */
48static canid_t em_canid_get_id(struct sk_buff *skb)
49{
50 /* CAN ID is stored within the data field */
51 struct can_frame *cf = (struct can_frame *)skb->data;
52
53 return cf->can_id;
54}
55
56static void em_canid_sff_match_add(struct canid_match *cm, u32 can_id,
57 u32 can_mask)
58{
59 int i;
60
61 /*
62 * Limit can_mask and can_id to SFF range to
63 * protect against write after end of array
64 */
65 can_mask &= CAN_SFF_MASK;
66 can_id &= can_mask;
67
68 /* Single frame */
69 if (can_mask == CAN_SFF_MASK) {
70 set_bit(can_id, cm->match_sff);
71 return;
72 }
73
74 /* All frames */
75 if (can_mask == 0) {
76 bitmap_fill(cm->match_sff, (1 << CAN_SFF_ID_BITS));
77 return;
78 }
79
80 /*
81 * Individual frame filter.
82 * Add record (set bit to 1) for each ID that
83 * conforms particular rule
84 */
85 for (i = 0; i < (1 << CAN_SFF_ID_BITS); i++) {
86 if ((i & can_mask) == can_id)
87 set_bit(i, cm->match_sff);
88 }
89}
90
91static inline struct canid_match *em_canid_priv(struct tcf_ematch *m)
92{
93 return (struct canid_match *)m->data;
94}
95
96static int em_canid_match(struct sk_buff *skb, struct tcf_ematch *m,
97 struct tcf_pkt_info *info)
98{
99 struct canid_match *cm = em_canid_priv(m);
100 canid_t can_id;
101 int match = 0;
102 int i;
103 const struct can_filter *lp;
104
105 can_id = em_canid_get_id(skb);
106
107 if (can_id & CAN_EFF_FLAG) {
108 for (i = 0, lp = cm->rules_raw;
109 i < cm->eff_rules_count; i++, lp++) {
110 if (!(((lp->can_id ^ can_id) & lp->can_mask))) {
111 match = 1;
112 break;
113 }
114 }
115 } else { /* SFF */
116 can_id &= CAN_SFF_MASK;
117 match = (test_bit(can_id, cm->match_sff) ? 1 : 0);
118 }
119
120 return match;
121}
122
123static int em_canid_change(struct tcf_proto *tp, void *data, int len,
124 struct tcf_ematch *m)
125{
126 struct can_filter *conf = data; /* Array with rules */
127 struct canid_match *cm;
128 struct canid_match *cm_old = (struct canid_match *)m->data;
129 int i;
130
131 if (!len)
132 return -EINVAL;
133
134 if (len % sizeof(struct can_filter))
135 return -EINVAL;
136
137 if (len > sizeof(struct can_filter) * EM_CAN_RULES_MAX)
138 return -EINVAL;
139
140 cm = kzalloc(sizeof(struct canid_match) + len, GFP_KERNEL);
141 if (!cm)
142 return -ENOMEM;
143
144 cm->rules_count = len / sizeof(struct can_filter);
145
146 /*
147 * We need two for() loops for copying rules into two contiguous
148 * areas in rules_raw to process all eff rules with a simple loop.
149 * NB: The configuration interface supports sff and eff rules.
150 * We do not support filters here that match for the same can_id
151 * provided in a SFF and EFF frame (e.g. 0x123 / 0x80000123).
152 * For this (unusual case) two filters have to be specified. The
153 * SFF/EFF separation is done with the CAN_EFF_FLAG in the can_id.
154 */
155
156 /* Fill rules_raw with EFF rules first */
157 for (i = 0; i < cm->rules_count; i++) {
158 if (conf[i].can_id & CAN_EFF_FLAG) {
159 memcpy(cm->rules_raw + cm->eff_rules_count,
160 &conf[i],
161 sizeof(struct can_filter));
162
163 cm->eff_rules_count++;
164 }
165 }
166
167 /* append SFF frame rules */
168 for (i = 0; i < cm->rules_count; i++) {
169 if (!(conf[i].can_id & CAN_EFF_FLAG)) {
170 memcpy(cm->rules_raw
171 + cm->eff_rules_count
172 + cm->sff_rules_count,
173 &conf[i], sizeof(struct can_filter));
174
175 cm->sff_rules_count++;
176
177 em_canid_sff_match_add(cm,
178 conf[i].can_id, conf[i].can_mask);
179 }
180 }
181
182 m->datalen = sizeof(struct canid_match) + len;
183 m->data = (unsigned long)cm;
184
185 if (cm_old != NULL) {
186 pr_err("canid: Configuring an existing ematch!\n");
187 kfree(cm_old);
188 }
189
190 return 0;
191}
192
193static void em_canid_destroy(struct tcf_proto *tp, struct tcf_ematch *m)
194{
195 struct canid_match *cm = em_canid_priv(m);
196
197 kfree(cm);
198}
199
200static int em_canid_dump(struct sk_buff *skb, struct tcf_ematch *m)
201{
202 struct canid_match *cm = em_canid_priv(m);
203
204 /*
205 * When configuring this ematch 'rules_count' is set not to exceed
206 * 'rules_raw' array size
207 */
208 if (nla_put_nohdr(skb, sizeof(struct can_filter) * cm->rules_count,
209 &cm->rules_raw) < 0)
210 return -EMSGSIZE;
211
212 return 0;
213}
214
215static struct tcf_ematch_ops em_canid_ops = {
216 .kind = TCF_EM_CANID,
217 .change = em_canid_change,
218 .match = em_canid_match,
219 .destroy = em_canid_destroy,
220 .dump = em_canid_dump,
221 .owner = THIS_MODULE,
222 .link = LIST_HEAD_INIT(em_canid_ops.link)
223};
224
225static int __init init_em_canid(void)
226{
227 return tcf_em_register(&em_canid_ops);
228}
229
230static void __exit exit_em_canid(void)
231{
232 tcf_em_unregister(&em_canid_ops);
233}
234
235MODULE_LICENSE("GPL");
236
237module_init(init_em_canid);
238module_exit(exit_em_canid);
239
240MODULE_ALIAS_TCF_EMATCH(TCF_EM_CANID);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 085ce53d570..a08b4ab3e42 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -973,7 +973,7 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
973static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 973static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
974{ 974{
975 struct net *net = sock_net(skb->sk); 975 struct net *net = sock_net(skb->sk);
976 struct tcmsg *tcm = NLMSG_DATA(n); 976 struct tcmsg *tcm = nlmsg_data(n);
977 struct nlattr *tca[TCA_MAX + 1]; 977 struct nlattr *tca[TCA_MAX + 1];
978 struct net_device *dev; 978 struct net_device *dev;
979 u32 clid = tcm->tcm_parent; 979 u32 clid = tcm->tcm_parent;
@@ -1046,7 +1046,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1046 1046
1047replay: 1047replay:
1048 /* Reinit, just in case something touches this. */ 1048 /* Reinit, just in case something touches this. */
1049 tcm = NLMSG_DATA(n); 1049 tcm = nlmsg_data(n);
1050 clid = tcm->tcm_parent; 1050 clid = tcm->tcm_parent;
1051 q = p = NULL; 1051 q = p = NULL;
1052 1052
@@ -1193,8 +1193,10 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1193 struct gnet_dump d; 1193 struct gnet_dump d;
1194 struct qdisc_size_table *stab; 1194 struct qdisc_size_table *stab;
1195 1195
1196 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); 1196 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*tcm), flags);
1197 tcm = NLMSG_DATA(nlh); 1197 if (!nlh)
1198 goto out_nlmsg_trim;
1199 tcm = nlmsg_data(nlh);
1198 tcm->tcm_family = AF_UNSPEC; 1200 tcm->tcm_family = AF_UNSPEC;
1199 tcm->tcm__pad1 = 0; 1201 tcm->tcm__pad1 = 0;
1200 tcm->tcm__pad2 = 0; 1202 tcm->tcm__pad2 = 0;
@@ -1230,7 +1232,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1230 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1232 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1231 return skb->len; 1233 return skb->len;
1232 1234
1233nlmsg_failure: 1235out_nlmsg_trim:
1234nla_put_failure: 1236nla_put_failure:
1235 nlmsg_trim(skb, b); 1237 nlmsg_trim(skb, b);
1236 return -1; 1238 return -1;
@@ -1366,7 +1368,7 @@ done:
1366static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 1368static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1367{ 1369{
1368 struct net *net = sock_net(skb->sk); 1370 struct net *net = sock_net(skb->sk);
1369 struct tcmsg *tcm = NLMSG_DATA(n); 1371 struct tcmsg *tcm = nlmsg_data(n);
1370 struct nlattr *tca[TCA_MAX + 1]; 1372 struct nlattr *tca[TCA_MAX + 1];
1371 struct net_device *dev; 1373 struct net_device *dev;
1372 struct Qdisc *q = NULL; 1374 struct Qdisc *q = NULL;
@@ -1498,8 +1500,10 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1498 struct gnet_dump d; 1500 struct gnet_dump d;
1499 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops; 1501 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1500 1502
1501 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); 1503 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*tcm), flags);
1502 tcm = NLMSG_DATA(nlh); 1504 if (!nlh)
1505 goto out_nlmsg_trim;
1506 tcm = nlmsg_data(nlh);
1503 tcm->tcm_family = AF_UNSPEC; 1507 tcm->tcm_family = AF_UNSPEC;
1504 tcm->tcm__pad1 = 0; 1508 tcm->tcm__pad1 = 0;
1505 tcm->tcm__pad2 = 0; 1509 tcm->tcm__pad2 = 0;
@@ -1525,7 +1529,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1525 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1529 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1526 return skb->len; 1530 return skb->len;
1527 1531
1528nlmsg_failure: 1532out_nlmsg_trim:
1529nla_put_failure: 1533nla_put_failure:
1530 nlmsg_trim(skb, b); 1534 nlmsg_trim(skb, b);
1531 return -1; 1535 return -1;
@@ -1616,7 +1620,7 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1616 1620
1617static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) 1621static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1618{ 1622{
1619 struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh); 1623 struct tcmsg *tcm = nlmsg_data(cb->nlh);
1620 struct net *net = sock_net(skb->sk); 1624 struct net *net = sock_net(skb->sk);
1621 struct netdev_queue *dev_queue; 1625 struct netdev_queue *dev_queue;
1622 struct net_device *dev; 1626 struct net_device *dev;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index ca0c29695d5..47416716294 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -67,7 +67,6 @@ struct teql_master {
67struct teql_sched_data { 67struct teql_sched_data {
68 struct Qdisc *next; 68 struct Qdisc *next;
69 struct teql_master *m; 69 struct teql_master *m;
70 struct neighbour *ncache;
71 struct sk_buff_head q; 70 struct sk_buff_head q;
72}; 71};
73 72
@@ -134,7 +133,6 @@ teql_reset(struct Qdisc *sch)
134 133
135 skb_queue_purge(&dat->q); 134 skb_queue_purge(&dat->q);
136 sch->q.qlen = 0; 135 sch->q.qlen = 0;
137 teql_neigh_release(xchg(&dat->ncache, NULL));
138} 136}
139 137
140static void 138static void
@@ -166,7 +164,6 @@ teql_destroy(struct Qdisc *sch)
166 } 164 }
167 } 165 }
168 skb_queue_purge(&dat->q); 166 skb_queue_purge(&dat->q);
169 teql_neigh_release(xchg(&dat->ncache, NULL));
170 break; 167 break;
171 } 168 }
172 169
@@ -225,21 +222,25 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
225static int 222static int
226__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, 223__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
227 struct net_device *dev, struct netdev_queue *txq, 224 struct net_device *dev, struct netdev_queue *txq,
228 struct neighbour *mn) 225 struct dst_entry *dst)
229{ 226{
230 struct teql_sched_data *q = qdisc_priv(txq->qdisc); 227 struct neighbour *n;
231 struct neighbour *n = q->ncache; 228 int err = 0;
232 229
233 if (mn->tbl == NULL) 230 n = dst_neigh_lookup_skb(dst, skb);
234 return -EINVAL; 231 if (!n)
235 if (n && n->tbl == mn->tbl && 232 return -ENOENT;
236 memcmp(n->primary_key, mn->primary_key, mn->tbl->key_len) == 0) { 233
237 atomic_inc(&n->refcnt); 234 if (dst->dev != dev) {
238 } else { 235 struct neighbour *mn;
239 n = __neigh_lookup_errno(mn->tbl, mn->primary_key, dev); 236
240 if (IS_ERR(n)) 237 mn = __neigh_lookup_errno(n->tbl, n->primary_key, dev);
241 return PTR_ERR(n); 238 neigh_release(n);
239 if (IS_ERR(mn))
240 return PTR_ERR(mn);
241 n = mn;
242 } 242 }
243
243 if (neigh_event_send(n, skb_res) == 0) { 244 if (neigh_event_send(n, skb_res) == 0) {
244 int err; 245 int err;
245 char haddr[MAX_ADDR_LEN]; 246 char haddr[MAX_ADDR_LEN];
@@ -248,15 +249,13 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
248 err = dev_hard_header(skb, dev, ntohs(skb->protocol), haddr, 249 err = dev_hard_header(skb, dev, ntohs(skb->protocol), haddr,
249 NULL, skb->len); 250 NULL, skb->len);
250 251
251 if (err < 0) { 252 if (err < 0)
252 neigh_release(n); 253 err = -EINVAL;
253 return -EINVAL; 254 } else {
254 } 255 err = (skb_res == NULL) ? -EAGAIN : 1;
255 teql_neigh_release(xchg(&q->ncache, n));
256 return 0;
257 } 256 }
258 neigh_release(n); 257 neigh_release(n);
259 return (skb_res == NULL) ? -EAGAIN : 1; 258 return err;
260} 259}
261 260
262static inline int teql_resolve(struct sk_buff *skb, 261static inline int teql_resolve(struct sk_buff *skb,
@@ -265,7 +264,6 @@ static inline int teql_resolve(struct sk_buff *skb,
265 struct netdev_queue *txq) 264 struct netdev_queue *txq)
266{ 265{
267 struct dst_entry *dst = skb_dst(skb); 266 struct dst_entry *dst = skb_dst(skb);
268 struct neighbour *mn;
269 int res; 267 int res;
270 268
271 if (txq->qdisc == &noop_qdisc) 269 if (txq->qdisc == &noop_qdisc)
@@ -275,8 +273,7 @@ static inline int teql_resolve(struct sk_buff *skb,
275 return 0; 273 return 0;
276 274
277 rcu_read_lock(); 275 rcu_read_lock();
278 mn = dst_get_neighbour_noref(dst); 276 res = __teql_resolve(skb, skb_res, dev, txq, dst);
279 res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0;
280 rcu_read_unlock(); 277 rcu_read_unlock();
281 278
282 return res; 279 return res;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 6ae47acaaec..539f35d07f4 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -64,6 +64,8 @@
64#include <net/sctp/checksum.h> 64#include <net/sctp/checksum.h>
65 65
66/* Forward declarations for private helpers. */ 66/* Forward declarations for private helpers. */
67static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
68 struct sctp_chunk *chunk);
67static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, 69static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
68 struct sctp_chunk *chunk); 70 struct sctp_chunk *chunk);
69static void sctp_packet_append_data(struct sctp_packet *packet, 71static void sctp_packet_append_data(struct sctp_packet *packet,
@@ -224,7 +226,10 @@ static sctp_xmit_t sctp_packet_bundle_auth(struct sctp_packet *pkt,
224 if (!auth) 226 if (!auth)
225 return retval; 227 return retval;
226 228
227 retval = sctp_packet_append_chunk(pkt, auth); 229 retval = __sctp_packet_append_chunk(pkt, auth);
230
231 if (retval != SCTP_XMIT_OK)
232 sctp_chunk_free(auth);
228 233
229 return retval; 234 return retval;
230} 235}
@@ -256,48 +261,31 @@ static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
256 asoc->a_rwnd = asoc->rwnd; 261 asoc->a_rwnd = asoc->rwnd;
257 sack = sctp_make_sack(asoc); 262 sack = sctp_make_sack(asoc);
258 if (sack) { 263 if (sack) {
259 retval = sctp_packet_append_chunk(pkt, sack); 264 retval = __sctp_packet_append_chunk(pkt, sack);
265 if (retval != SCTP_XMIT_OK) {
266 sctp_chunk_free(sack);
267 goto out;
268 }
260 asoc->peer.sack_needed = 0; 269 asoc->peer.sack_needed = 0;
261 if (del_timer(timer)) 270 if (del_timer(timer))
262 sctp_association_put(asoc); 271 sctp_association_put(asoc);
263 } 272 }
264 } 273 }
265 } 274 }
275out:
266 return retval; 276 return retval;
267} 277}
268 278
279
269/* Append a chunk to the offered packet reporting back any inability to do 280/* Append a chunk to the offered packet reporting back any inability to do
270 * so. 281 * so.
271 */ 282 */
272sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, 283static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
273 struct sctp_chunk *chunk) 284 struct sctp_chunk *chunk)
274{ 285{
275 sctp_xmit_t retval = SCTP_XMIT_OK; 286 sctp_xmit_t retval = SCTP_XMIT_OK;
276 __u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length)); 287 __u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length));
277 288
278 SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet,
279 chunk);
280
281 /* Data chunks are special. Before seeing what else we can
282 * bundle into this packet, check to see if we are allowed to
283 * send this DATA.
284 */
285 if (sctp_chunk_is_data(chunk)) {
286 retval = sctp_packet_can_append_data(packet, chunk);
287 if (retval != SCTP_XMIT_OK)
288 goto finish;
289 }
290
291 /* Try to bundle AUTH chunk */
292 retval = sctp_packet_bundle_auth(packet, chunk);
293 if (retval != SCTP_XMIT_OK)
294 goto finish;
295
296 /* Try to bundle SACK chunk */
297 retval = sctp_packet_bundle_sack(packet, chunk);
298 if (retval != SCTP_XMIT_OK)
299 goto finish;
300
301 /* Check to see if this chunk will fit into the packet */ 289 /* Check to see if this chunk will fit into the packet */
302 retval = sctp_packet_will_fit(packet, chunk, chunk_len); 290 retval = sctp_packet_will_fit(packet, chunk, chunk_len);
303 if (retval != SCTP_XMIT_OK) 291 if (retval != SCTP_XMIT_OK)
@@ -339,6 +327,43 @@ finish:
339 return retval; 327 return retval;
340} 328}
341 329
330/* Append a chunk to the offered packet reporting back any inability to do
331 * so.
332 */
333sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
334 struct sctp_chunk *chunk)
335{
336 sctp_xmit_t retval = SCTP_XMIT_OK;
337
338 SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet,
339 chunk);
340
341 /* Data chunks are special. Before seeing what else we can
342 * bundle into this packet, check to see if we are allowed to
343 * send this DATA.
344 */
345 if (sctp_chunk_is_data(chunk)) {
346 retval = sctp_packet_can_append_data(packet, chunk);
347 if (retval != SCTP_XMIT_OK)
348 goto finish;
349 }
350
351 /* Try to bundle AUTH chunk */
352 retval = sctp_packet_bundle_auth(packet, chunk);
353 if (retval != SCTP_XMIT_OK)
354 goto finish;
355
356 /* Try to bundle SACK chunk */
357 retval = sctp_packet_bundle_sack(packet, chunk);
358 if (retval != SCTP_XMIT_OK)
359 goto finish;
360
361 retval = __sctp_packet_append_chunk(packet, chunk);
362
363finish:
364 return retval;
365}
366
342/* All packets are sent to the network through this function from 367/* All packets are sent to the network through this function from
343 * sctp_outq_tail(). 368 * sctp_outq_tail().
344 * 369 *
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 31def68a0f6..5a3d675d2f2 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -176,13 +176,14 @@ out_free:
176} 176}
177EXPORT_SYMBOL_GPL(xprt_setup_backchannel); 177EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
178 178
179/* 179/**
180 * Destroys the backchannel preallocated structures. 180 * xprt_destroy_backchannel - Destroys the backchannel preallocated structures.
181 * @xprt: the transport holding the preallocated strucures
182 * @max_reqs the maximum number of preallocated structures to destroy
183 *
181 * Since these structures may have been allocated by multiple calls 184 * Since these structures may have been allocated by multiple calls
182 * to xprt_setup_backchannel, we only destroy up to the maximum number 185 * to xprt_setup_backchannel, we only destroy up to the maximum number
183 * of reqs specified by the caller. 186 * of reqs specified by the caller.
184 * @xprt: the transport holding the preallocated strucures
185 * @max_reqs the maximum number of preallocated structures to destroy
186 */ 187 */
187void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs) 188void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
188{ 189{
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index f56f045778a..00eb859b7de 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -385,7 +385,7 @@ out_no_rpciod:
385 return ERR_PTR(err); 385 return ERR_PTR(err);
386} 386}
387 387
388/* 388/**
389 * rpc_create - create an RPC client and transport with one call 389 * rpc_create - create an RPC client and transport with one call
390 * @args: rpc_clnt create argument structure 390 * @args: rpc_clnt create argument structure
391 * 391 *
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index a6de09de5d2..18bc130255a 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -43,6 +43,7 @@
43#include <net/tcp_states.h> 43#include <net/tcp_states.h>
44#include <asm/uaccess.h> 44#include <asm/uaccess.h>
45#include <asm/ioctls.h> 45#include <asm/ioctls.h>
46#include <trace/events/skb.h>
46 47
47#include <linux/sunrpc/types.h> 48#include <linux/sunrpc/types.h>
48#include <linux/sunrpc/clnt.h> 49#include <linux/sunrpc/clnt.h>
@@ -619,6 +620,8 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
619 if (!svc_udp_get_dest_address(rqstp, cmh)) { 620 if (!svc_udp_get_dest_address(rqstp, cmh)) {
620 net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n", 621 net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n",
621 cmh->cmsg_level, cmh->cmsg_type); 622 cmh->cmsg_level, cmh->cmsg_type);
623out_free:
624 trace_kfree_skb(skb, svc_udp_recvfrom);
622 skb_free_datagram_locked(svsk->sk_sk, skb); 625 skb_free_datagram_locked(svsk->sk_sk, skb);
623 return 0; 626 return 0;
624 } 627 }
@@ -630,8 +633,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
630 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) { 633 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
631 local_bh_enable(); 634 local_bh_enable();
632 /* checksum error */ 635 /* checksum error */
633 skb_free_datagram_locked(svsk->sk_sk, skb); 636 goto out_free;
634 return 0;
635 } 637 }
636 local_bh_enable(); 638 local_bh_enable();
637 skb_free_datagram_locked(svsk->sk_sk, skb); 639 skb_free_datagram_locked(svsk->sk_sk, skb);
@@ -640,10 +642,8 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
640 rqstp->rq_arg.head[0].iov_base = skb->data + 642 rqstp->rq_arg.head[0].iov_base = skb->data +
641 sizeof(struct udphdr); 643 sizeof(struct udphdr);
642 rqstp->rq_arg.head[0].iov_len = len; 644 rqstp->rq_arg.head[0].iov_len = len;
643 if (skb_checksum_complete(skb)) { 645 if (skb_checksum_complete(skb))
644 skb_free_datagram_locked(svsk->sk_sk, skb); 646 goto out_free;
645 return 0;
646 }
647 rqstp->rq_xprt_ctxt = skb; 647 rqstp->rq_xprt_ctxt = skb;
648 } 648 }
649 649
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index fddcccfcdf7..0cf165580d8 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -180,7 +180,9 @@ EXPORT_SYMBOL_GPL(xdr_inline_pages);
180 180
181/* 181/*
182 * Helper routines for doing 'memmove' like operations on a struct xdr_buf 182 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
183 * 183 */
184
185/**
184 * _shift_data_right_pages 186 * _shift_data_right_pages
185 * @pages: vector of pages containing both the source and dest memory area. 187 * @pages: vector of pages containing both the source and dest memory area.
186 * @pgto_base: page vector address of destination 188 * @pgto_base: page vector address of destination
@@ -242,7 +244,7 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
242 } while ((len -= copy) != 0); 244 } while ((len -= copy) != 0);
243} 245}
244 246
245/* 247/**
246 * _copy_to_pages 248 * _copy_to_pages
247 * @pages: array of pages 249 * @pages: array of pages
248 * @pgbase: page vector address of destination 250 * @pgbase: page vector address of destination
@@ -286,7 +288,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
286 flush_dcache_page(*pgto); 288 flush_dcache_page(*pgto);
287} 289}
288 290
289/* 291/**
290 * _copy_from_pages 292 * _copy_from_pages
291 * @p: pointer to destination 293 * @p: pointer to destination
292 * @pages: array of pages 294 * @pages: array of pages
@@ -326,7 +328,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
326} 328}
327EXPORT_SYMBOL_GPL(_copy_from_pages); 329EXPORT_SYMBOL_GPL(_copy_from_pages);
328 330
329/* 331/**
330 * xdr_shrink_bufhead 332 * xdr_shrink_bufhead
331 * @buf: xdr_buf 333 * @buf: xdr_buf
332 * @len: bytes to remove from buf->head[0] 334 * @len: bytes to remove from buf->head[0]
@@ -399,7 +401,7 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
399 buf->len = buf->buflen; 401 buf->len = buf->buflen;
400} 402}
401 403
402/* 404/**
403 * xdr_shrink_pagelen 405 * xdr_shrink_pagelen
404 * @buf: xdr_buf 406 * @buf: xdr_buf
405 * @len: bytes to remove from buf->pages 407 * @len: bytes to remove from buf->pages
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 3c83035cdaa..a5a402a7d21 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -531,7 +531,7 @@ void xprt_set_retrans_timeout_def(struct rpc_task *task)
531} 531}
532EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def); 532EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
533 533
534/* 534/**
535 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout 535 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
536 * @task: task whose timeout is to be set 536 * @task: task whose timeout is to be set
537 * 537 *
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 890b03f8d87..62d0dac8f78 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1014,9 +1014,6 @@ static void xs_udp_data_ready(struct sock *sk, int len)
1014 1014
1015 UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS); 1015 UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS);
1016 1016
1017 /* Something worked... */
1018 dst_confirm(skb_dst(skb));
1019
1020 xprt_adjust_cwnd(task, copied); 1017 xprt_adjust_cwnd(task, copied);
1021 xprt_complete_rqst(task, copied); 1018 xprt_complete_rqst(task, copied);
1022 1019
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 2625f5ebe3e..d9df34fbd7c 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -162,7 +162,7 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
162} 162}
163 163
164 164
165/* 165/**
166 * tipc_bclink_retransmit_to - get most recent node to request retransmission 166 * tipc_bclink_retransmit_to - get most recent node to request retransmission
167 * 167 *
168 * Called with bc_lock locked 168 * Called with bc_lock locked
@@ -270,7 +270,7 @@ exit:
270 spin_unlock_bh(&bc_lock); 270 spin_unlock_bh(&bc_lock);
271} 271}
272 272
273/* 273/**
274 * tipc_bclink_update_link_state - update broadcast link state 274 * tipc_bclink_update_link_state - update broadcast link state
275 * 275 *
276 * tipc_net_lock and node lock set 276 * tipc_net_lock and node lock set
@@ -330,7 +330,7 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
330 } 330 }
331} 331}
332 332
333/* 333/**
334 * bclink_peek_nack - monitor retransmission requests sent by other nodes 334 * bclink_peek_nack - monitor retransmission requests sent by other nodes
335 * 335 *
336 * Delay any upcoming NACK by this node if another node has already 336 * Delay any upcoming NACK by this node if another node has already
@@ -381,7 +381,7 @@ exit:
381 return res; 381 return res;
382} 382}
383 383
384/* 384/**
385 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet 385 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
386 * 386 *
387 * Called with both sending node's lock and bc_lock taken. 387 * Called with both sending node's lock and bc_lock taken.
@@ -406,7 +406,7 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
406 } 406 }
407} 407}
408 408
409/* 409/**
410 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards 410 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
411 * 411 *
412 * tipc_net_lock is read_locked, no other locks set 412 * tipc_net_lock is read_locked, no other locks set
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index a297e3a2e3e..86b703f5509 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -172,8 +172,8 @@ struct sk_buff *tipc_media_get_names(void)
172 172
173/** 173/**
174 * bearer_name_validate - validate & (optionally) deconstruct bearer name 174 * bearer_name_validate - validate & (optionally) deconstruct bearer name
175 * @name - ptr to bearer name string 175 * @name: ptr to bearer name string
176 * @name_parts - ptr to area for bearer name components (or NULL if not needed) 176 * @name_parts: ptr to area for bearer name components (or NULL if not needed)
177 * 177 *
178 * Returns 1 if bearer name is valid, otherwise 0. 178 * Returns 1 if bearer name is valid, otherwise 0.
179 */ 179 */
@@ -520,8 +520,7 @@ exit:
520} 520}
521 521
522/** 522/**
523 * tipc_block_bearer(): Block the bearer with the given name, 523 * tipc_block_bearer - Block the bearer with the given name, and reset all its links
524 * and reset all its links
525 */ 524 */
526int tipc_block_bearer(const char *name) 525int tipc_block_bearer(const char *name)
527{ 526{
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index e3b2be37fb3..4680de118af 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -57,7 +57,7 @@
57 */ 57 */
58#define TIPC_MEDIA_TYPE_ETH 1 58#define TIPC_MEDIA_TYPE_ETH 1
59 59
60/* 60/**
61 * struct tipc_media_addr - destination address used by TIPC bearers 61 * struct tipc_media_addr - destination address used by TIPC bearers
62 * @value: address info (format defined by media) 62 * @value: address info (format defined by media)
63 * @media_id: TIPC media type identifier 63 * @media_id: TIPC media type identifier
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 7a614f43549..f6bf4830ddf 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -153,8 +153,8 @@ int tipc_link_is_active(struct tipc_link *l_ptr)
153 153
154/** 154/**
155 * link_name_validate - validate & (optionally) deconstruct tipc_link name 155 * link_name_validate - validate & (optionally) deconstruct tipc_link name
156 * @name - ptr to link name string 156 * @name: ptr to link name string
157 * @name_parts - ptr to area for link name components (or NULL if not needed) 157 * @name_parts: ptr to area for link name components (or NULL if not needed)
158 * 158 *
159 * Returns 1 if link name is valid, otherwise 0. 159 * Returns 1 if link name is valid, otherwise 0.
160 */ 160 */
@@ -944,7 +944,7 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
944 return res; 944 return res;
945} 945}
946 946
947/* 947/**
948 * tipc_link_send_names - send name table entries to new neighbor 948 * tipc_link_send_names - send name table entries to new neighbor
949 * 949 *
950 * Send routine for bulk delivery of name table messages when contact 950 * Send routine for bulk delivery of name table messages when contact
@@ -1787,7 +1787,7 @@ cont:
1787 read_unlock_bh(&tipc_net_lock); 1787 read_unlock_bh(&tipc_net_lock);
1788} 1788}
1789 1789
1790/* 1790/**
1791 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue 1791 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1792 * 1792 *
1793 * Returns increase in queue length (i.e. 0 or 1) 1793 * Returns increase in queue length (i.e. 0 or 1)
@@ -2635,8 +2635,8 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
2635 2635
2636/** 2636/**
2637 * link_find_link - locate link by name 2637 * link_find_link - locate link by name
2638 * @name - ptr to link name string 2638 * @name: ptr to link name string
2639 * @node - ptr to area to be filled with ptr to associated node 2639 * @node: ptr to area to be filled with ptr to associated node
2640 * 2640 *
2641 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted; 2641 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
2642 * this also prevents link deletion. 2642 * this also prevents link deletion.
@@ -2671,8 +2671,8 @@ static struct tipc_link *link_find_link(const char *name,
2671/** 2671/**
2672 * link_value_is_valid -- validate proposed link tolerance/priority/window 2672 * link_value_is_valid -- validate proposed link tolerance/priority/window
2673 * 2673 *
2674 * @cmd - value type (TIPC_CMD_SET_LINK_*) 2674 * @cmd: value type (TIPC_CMD_SET_LINK_*)
2675 * @new_value - the new value 2675 * @new_value: the new value
2676 * 2676 *
2677 * Returns 1 if value is within range, 0 if not. 2677 * Returns 1 if value is within range, 0 if not.
2678 */ 2678 */
@@ -2693,9 +2693,9 @@ static int link_value_is_valid(u16 cmd, u32 new_value)
2693 2693
2694/** 2694/**
2695 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media 2695 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
2696 * @name - ptr to link, bearer, or media name 2696 * @name: ptr to link, bearer, or media name
2697 * @new_value - new value of link, bearer, or media setting 2697 * @new_value: new value of link, bearer, or media setting
2698 * @cmd - which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*) 2698 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
2699 * 2699 *
2700 * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted. 2700 * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted.
2701 * 2701 *
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 010f24a59da..13fb9d559ea 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -191,7 +191,7 @@ static void nameseq_delete_empty(struct name_seq *seq)
191 } 191 }
192} 192}
193 193
194/* 194/**
195 * nameseq_find_subseq - find sub-sequence (if any) matching a name instance 195 * nameseq_find_subseq - find sub-sequence (if any) matching a name instance
196 * 196 *
197 * Very time-critical, so binary searches through sub-sequence array. 197 * Very time-critical, so binary searches through sub-sequence array.
@@ -435,7 +435,7 @@ found:
435} 435}
436 436
437/** 437/**
438 * tipc_nameseq_subscribe: attach a subscription, and issue 438 * tipc_nameseq_subscribe - attach a subscription, and issue
439 * the prescribed number of events if there is any sub- 439 * the prescribed number of events if there is any sub-
440 * sequence overlapping with the requested sequence 440 * sequence overlapping with the requested sequence
441 */ 441 */
@@ -520,7 +520,7 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
520 return publ; 520 return publ;
521} 521}
522 522
523/* 523/**
524 * tipc_nametbl_translate - perform name translation 524 * tipc_nametbl_translate - perform name translation
525 * 525 *
526 * On entry, 'destnode' is the search domain used during translation. 526 * On entry, 'destnode' is the search domain used during translation.
@@ -751,7 +751,7 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
751 751
752 752
753/** 753/**
754 * subseq_list: print specified sub-sequence contents into the given buffer 754 * subseq_list - print specified sub-sequence contents into the given buffer
755 */ 755 */
756static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth, 756static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
757 u32 index) 757 u32 index)
@@ -787,7 +787,7 @@ static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
787} 787}
788 788
789/** 789/**
790 * nameseq_list: print specified name sequence contents into the given buffer 790 * nameseq_list - print specified name sequence contents into the given buffer
791 */ 791 */
792static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth, 792static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
793 u32 type, u32 lowbound, u32 upbound, u32 index) 793 u32 type, u32 lowbound, u32 upbound, u32 index)
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 2ad37a4db37..70bf78bd5b7 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -69,7 +69,7 @@ static u32 port_peerport(struct tipc_port *p_ptr)
69 return msg_destport(&p_ptr->phdr); 69 return msg_destport(&p_ptr->phdr);
70} 70}
71 71
72/* 72/**
73 * tipc_port_peer_msg - verify message was sent by connected port's peer 73 * tipc_port_peer_msg - verify message was sent by connected port's peer
74 * 74 *
75 * Handles cases where the node's network address has changed from 75 * Handles cases where the node's network address has changed from
@@ -909,8 +909,8 @@ int tipc_createport(void *usr_handle,
909 warn("Port creation failed, no memory\n"); 909 warn("Port creation failed, no memory\n");
910 return -ENOMEM; 910 return -ENOMEM;
911 } 911 }
912 p_ptr = (struct tipc_port *)tipc_createport_raw(NULL, port_dispatcher, 912 p_ptr = tipc_createport_raw(NULL, port_dispatcher, port_wakeup,
913 port_wakeup, importance); 913 importance);
914 if (!p_ptr) { 914 if (!p_ptr) {
915 kfree(up_ptr); 915 kfree(up_ptr);
916 return -ENOMEM; 916 return -ENOMEM;
@@ -1078,8 +1078,7 @@ int tipc_disconnect_port(struct tipc_port *tp_ptr)
1078 if (tp_ptr->connected) { 1078 if (tp_ptr->connected) {
1079 tp_ptr->connected = 0; 1079 tp_ptr->connected = 0;
1080 /* let timer expire on it's own to avoid deadlock! */ 1080 /* let timer expire on it's own to avoid deadlock! */
1081 tipc_nodesub_unsubscribe( 1081 tipc_nodesub_unsubscribe(&tp_ptr->subscription);
1082 &((struct tipc_port *)tp_ptr)->subscription);
1083 res = 0; 1082 res = 0;
1084 } else { 1083 } else {
1085 res = -ENOTCONN; 1084 res = -ENOTCONN;
@@ -1099,7 +1098,7 @@ int tipc_disconnect(u32 ref)
1099 p_ptr = tipc_port_lock(ref); 1098 p_ptr = tipc_port_lock(ref);
1100 if (!p_ptr) 1099 if (!p_ptr)
1101 return -EINVAL; 1100 return -EINVAL;
1102 res = tipc_disconnect_port((struct tipc_port *)p_ptr); 1101 res = tipc_disconnect_port(p_ptr);
1103 tipc_port_unlock(p_ptr); 1102 tipc_port_unlock(p_ptr);
1104 return res; 1103 return res;
1105} 1104}
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 98cbec9c453..4660e306579 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -79,6 +79,7 @@ typedef void (*tipc_continue_event) (void *usr_handle, u32 portref);
79 * struct user_port - TIPC user port (used with native API) 79 * struct user_port - TIPC user port (used with native API)
80 * @usr_handle: user-specified field 80 * @usr_handle: user-specified field
81 * @ref: object reference to associated TIPC port 81 * @ref: object reference to associated TIPC port
82 *
82 * <various callback routines> 83 * <various callback routines>
83 */ 84 */
84struct user_port { 85struct user_port {
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 5577a447f53..1ebb49f3ddb 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -54,7 +54,7 @@ struct tipc_sock {
54}; 54};
55 55
56#define tipc_sk(sk) ((struct tipc_sock *)(sk)) 56#define tipc_sk(sk) ((struct tipc_sock *)(sk))
57#define tipc_sk_port(sk) ((struct tipc_port *)(tipc_sk(sk)->p)) 57#define tipc_sk_port(sk) (tipc_sk(sk)->p)
58 58
59#define tipc_rx_ready(sock) (!skb_queue_empty(&sock->sk->sk_receive_queue) || \ 59#define tipc_rx_ready(sock) (!skb_queue_empty(&sock->sk->sk_receive_queue) || \
60 (sock->state == SS_DISCONNECTING)) 60 (sock->state == SS_DISCONNECTING))
@@ -1699,9 +1699,8 @@ static int getsockopt(struct socket *sock,
1699 return put_user(sizeof(value), ol); 1699 return put_user(sizeof(value), ol);
1700} 1700}
1701 1701
1702/** 1702/* Protocol switches for the various types of TIPC sockets */
1703 * Protocol switches for the various types of TIPC sockets 1703
1704 */
1705static const struct proto_ops msg_ops = { 1704static const struct proto_ops msg_ops = {
1706 .owner = THIS_MODULE, 1705 .owner = THIS_MODULE,
1707 .family = AF_TIPC, 1706 .family = AF_TIPC,
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 641f2e47f16..79981d97bc9 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -115,15 +115,24 @@
115#include <net/checksum.h> 115#include <net/checksum.h>
116#include <linux/security.h> 116#include <linux/security.h>
117 117
118struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1]; 118struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
119EXPORT_SYMBOL_GPL(unix_socket_table); 119EXPORT_SYMBOL_GPL(unix_socket_table);
120DEFINE_SPINLOCK(unix_table_lock); 120DEFINE_SPINLOCK(unix_table_lock);
121EXPORT_SYMBOL_GPL(unix_table_lock); 121EXPORT_SYMBOL_GPL(unix_table_lock);
122static atomic_long_t unix_nr_socks; 122static atomic_long_t unix_nr_socks;
123 123
124#define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
125 124
126#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE) 125static struct hlist_head *unix_sockets_unbound(void *addr)
126{
127 unsigned long hash = (unsigned long)addr;
128
129 hash ^= hash >> 16;
130 hash ^= hash >> 8;
131 hash %= UNIX_HASH_SIZE;
132 return &unix_socket_table[UNIX_HASH_SIZE + hash];
133}
134
135#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
127 136
128#ifdef CONFIG_SECURITY_NETWORK 137#ifdef CONFIG_SECURITY_NETWORK
129static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) 138static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
@@ -645,7 +654,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock)
645 INIT_LIST_HEAD(&u->link); 654 INIT_LIST_HEAD(&u->link);
646 mutex_init(&u->readlock); /* single task reading lock */ 655 mutex_init(&u->readlock); /* single task reading lock */
647 init_waitqueue_head(&u->peer_wait); 656 init_waitqueue_head(&u->peer_wait);
648 unix_insert_socket(unix_sockets_unbound, sk); 657 unix_insert_socket(unix_sockets_unbound(sk), sk);
649out: 658out:
650 if (sk == NULL) 659 if (sk == NULL)
651 atomic_long_dec(&unix_nr_socks); 660 atomic_long_dec(&unix_nr_socks);
@@ -2239,47 +2248,54 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2239} 2248}
2240 2249
2241#ifdef CONFIG_PROC_FS 2250#ifdef CONFIG_PROC_FS
2242static struct sock *first_unix_socket(int *i) 2251
2252#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2253
2254#define get_bucket(x) ((x) >> BUCKET_SPACE)
2255#define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2256#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
2257
2258static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
2243{ 2259{
2244 for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) { 2260 unsigned long offset = get_offset(*pos);
2245 if (!hlist_empty(&unix_socket_table[*i])) 2261 unsigned long bucket = get_bucket(*pos);
2246 return __sk_head(&unix_socket_table[*i]); 2262 struct sock *sk;
2263 unsigned long count = 0;
2264
2265 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2266 if (sock_net(sk) != seq_file_net(seq))
2267 continue;
2268 if (++count == offset)
2269 break;
2247 } 2270 }
2248 return NULL; 2271
2272 return sk;
2249} 2273}
2250 2274
2251static struct sock *next_unix_socket(int *i, struct sock *s) 2275static struct sock *unix_next_socket(struct seq_file *seq,
2276 struct sock *sk,
2277 loff_t *pos)
2252{ 2278{
2253 struct sock *next = sk_next(s); 2279 unsigned long bucket;
2254 /* More in this chain? */ 2280
2255 if (next) 2281 while (sk > (struct sock *)SEQ_START_TOKEN) {
2256 return next; 2282 sk = sk_next(sk);
2257 /* Look for next non-empty chain. */ 2283 if (!sk)
2258 for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) { 2284 goto next_bucket;
2259 if (!hlist_empty(&unix_socket_table[*i])) 2285 if (sock_net(sk) == seq_file_net(seq))
2260 return __sk_head(&unix_socket_table[*i]); 2286 return sk;
2261 } 2287 }
2262 return NULL;
2263}
2264 2288
2265struct unix_iter_state { 2289 do {
2266 struct seq_net_private p; 2290 sk = unix_from_bucket(seq, pos);
2267 int i; 2291 if (sk)
2268}; 2292 return sk;
2269 2293
2270static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos) 2294next_bucket:
2271{ 2295 bucket = get_bucket(*pos) + 1;
2272 struct unix_iter_state *iter = seq->private; 2296 *pos = set_bucket_offset(bucket, 1);
2273 loff_t off = 0; 2297 } while (bucket < ARRAY_SIZE(unix_socket_table));
2274 struct sock *s;
2275 2298
2276 for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
2277 if (sock_net(s) != seq_file_net(seq))
2278 continue;
2279 if (off == pos)
2280 return s;
2281 ++off;
2282 }
2283 return NULL; 2299 return NULL;
2284} 2300}
2285 2301
@@ -2287,22 +2303,20 @@ static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2287 __acquires(unix_table_lock) 2303 __acquires(unix_table_lock)
2288{ 2304{
2289 spin_lock(&unix_table_lock); 2305 spin_lock(&unix_table_lock);
2290 return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2306
2307 if (!*pos)
2308 return SEQ_START_TOKEN;
2309
2310 if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2311 return NULL;
2312
2313 return unix_next_socket(seq, NULL, pos);
2291} 2314}
2292 2315
2293static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2316static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2294{ 2317{
2295 struct unix_iter_state *iter = seq->private;
2296 struct sock *sk = v;
2297 ++*pos; 2318 ++*pos;
2298 2319 return unix_next_socket(seq, v, pos);
2299 if (v == SEQ_START_TOKEN)
2300 sk = first_unix_socket(&iter->i);
2301 else
2302 sk = next_unix_socket(&iter->i, sk);
2303 while (sk && (sock_net(sk) != seq_file_net(seq)))
2304 sk = next_unix_socket(&iter->i, sk);
2305 return sk;
2306} 2320}
2307 2321
2308static void unix_seq_stop(struct seq_file *seq, void *v) 2322static void unix_seq_stop(struct seq_file *seq, void *v)
@@ -2365,7 +2379,7 @@ static const struct seq_operations unix_seq_ops = {
2365static int unix_seq_open(struct inode *inode, struct file *file) 2379static int unix_seq_open(struct inode *inode, struct file *file)
2366{ 2380{
2367 return seq_open_net(inode, file, &unix_seq_ops, 2381 return seq_open_net(inode, file, &unix_seq_ops,
2368 sizeof(struct unix_iter_state)); 2382 sizeof(struct seq_net_private));
2369} 2383}
2370 2384
2371static const struct file_operations unix_seq_fops = { 2385static const struct file_operations unix_seq_fops = {
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 47d3002737f..a74864eedfc 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -8,40 +8,31 @@
8#include <net/af_unix.h> 8#include <net/af_unix.h>
9#include <net/tcp_states.h> 9#include <net/tcp_states.h>
10 10
11#define UNIX_DIAG_PUT(skb, attrtype, attrlen) \
12 RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
13
14static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) 11static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
15{ 12{
16 struct unix_address *addr = unix_sk(sk)->addr; 13 struct unix_address *addr = unix_sk(sk)->addr;
17 char *s;
18
19 if (addr) {
20 s = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short));
21 memcpy(s, addr->name->sun_path, addr->len - sizeof(short));
22 }
23 14
24 return 0; 15 if (!addr)
16 return 0;
25 17
26rtattr_failure: 18 return nla_put(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short),
27 return -EMSGSIZE; 19 addr->name->sun_path);
28} 20}
29 21
30static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb) 22static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
31{ 23{
32 struct dentry *dentry = unix_sk(sk)->path.dentry; 24 struct dentry *dentry = unix_sk(sk)->path.dentry;
33 struct unix_diag_vfs *uv;
34 25
35 if (dentry) { 26 if (dentry) {
36 uv = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_VFS, sizeof(*uv)); 27 struct unix_diag_vfs uv = {
37 uv->udiag_vfs_ino = dentry->d_inode->i_ino; 28 .udiag_vfs_ino = dentry->d_inode->i_ino,
38 uv->udiag_vfs_dev = dentry->d_sb->s_dev; 29 .udiag_vfs_dev = dentry->d_sb->s_dev,
30 };
31
32 return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv);
39 } 33 }
40 34
41 return 0; 35 return 0;
42
43rtattr_failure:
44 return -EMSGSIZE;
45} 36}
46 37
47static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb) 38static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
@@ -56,24 +47,28 @@ static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
56 unix_state_unlock(peer); 47 unix_state_unlock(peer);
57 sock_put(peer); 48 sock_put(peer);
58 49
59 RTA_PUT_U32(nlskb, UNIX_DIAG_PEER, ino); 50 return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino);
60 } 51 }
61 52
62 return 0; 53 return 0;
63rtattr_failure:
64 return -EMSGSIZE;
65} 54}
66 55
67static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb) 56static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
68{ 57{
69 struct sk_buff *skb; 58 struct sk_buff *skb;
59 struct nlattr *attr;
70 u32 *buf; 60 u32 *buf;
71 int i; 61 int i;
72 62
73 if (sk->sk_state == TCP_LISTEN) { 63 if (sk->sk_state == TCP_LISTEN) {
74 spin_lock(&sk->sk_receive_queue.lock); 64 spin_lock(&sk->sk_receive_queue.lock);
75 buf = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_ICONS, 65
76 sk->sk_receive_queue.qlen * sizeof(u32)); 66 attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
67 sk->sk_receive_queue.qlen * sizeof(u32));
68 if (!attr)
69 goto errout;
70
71 buf = nla_data(attr);
77 i = 0; 72 i = 0;
78 skb_queue_walk(&sk->sk_receive_queue, skb) { 73 skb_queue_walk(&sk->sk_receive_queue, skb) {
79 struct sock *req, *peer; 74 struct sock *req, *peer;
@@ -94,43 +89,38 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
94 89
95 return 0; 90 return 0;
96 91
97rtattr_failure: 92errout:
98 spin_unlock(&sk->sk_receive_queue.lock); 93 spin_unlock(&sk->sk_receive_queue.lock);
99 return -EMSGSIZE; 94 return -EMSGSIZE;
100} 95}
101 96
102static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb) 97static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
103{ 98{
104 struct unix_diag_rqlen *rql; 99 struct unix_diag_rqlen rql;
105
106 rql = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_RQLEN, sizeof(*rql));
107 100
108 if (sk->sk_state == TCP_LISTEN) { 101 if (sk->sk_state == TCP_LISTEN) {
109 rql->udiag_rqueue = sk->sk_receive_queue.qlen; 102 rql.udiag_rqueue = sk->sk_receive_queue.qlen;
110 rql->udiag_wqueue = sk->sk_max_ack_backlog; 103 rql.udiag_wqueue = sk->sk_max_ack_backlog;
111 } else { 104 } else {
112 rql->udiag_rqueue = (__u32)unix_inq_len(sk); 105 rql.udiag_rqueue = (u32) unix_inq_len(sk);
113 rql->udiag_wqueue = (__u32)unix_outq_len(sk); 106 rql.udiag_wqueue = (u32) unix_outq_len(sk);
114 } 107 }
115 108
116 return 0; 109 return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql);
117
118rtattr_failure:
119 return -EMSGSIZE;
120} 110}
121 111
122static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, 112static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
123 u32 pid, u32 seq, u32 flags, int sk_ino) 113 u32 pid, u32 seq, u32 flags, int sk_ino)
124{ 114{
125 unsigned char *b = skb_tail_pointer(skb);
126 struct nlmsghdr *nlh; 115 struct nlmsghdr *nlh;
127 struct unix_diag_msg *rep; 116 struct unix_diag_msg *rep;
128 117
129 nlh = NLMSG_PUT(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep)); 118 nlh = nlmsg_put(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
130 nlh->nlmsg_flags = flags; 119 flags);
131 120 if (!nlh)
132 rep = NLMSG_DATA(nlh); 121 return -EMSGSIZE;
133 122
123 rep = nlmsg_data(nlh);
134 rep->udiag_family = AF_UNIX; 124 rep->udiag_family = AF_UNIX;
135 rep->udiag_type = sk->sk_type; 125 rep->udiag_type = sk->sk_type;
136 rep->udiag_state = sk->sk_state; 126 rep->udiag_state = sk->sk_state;
@@ -139,33 +129,32 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
139 129
140 if ((req->udiag_show & UDIAG_SHOW_NAME) && 130 if ((req->udiag_show & UDIAG_SHOW_NAME) &&
141 sk_diag_dump_name(sk, skb)) 131 sk_diag_dump_name(sk, skb))
142 goto nlmsg_failure; 132 goto out_nlmsg_trim;
143 133
144 if ((req->udiag_show & UDIAG_SHOW_VFS) && 134 if ((req->udiag_show & UDIAG_SHOW_VFS) &&
145 sk_diag_dump_vfs(sk, skb)) 135 sk_diag_dump_vfs(sk, skb))
146 goto nlmsg_failure; 136 goto out_nlmsg_trim;
147 137
148 if ((req->udiag_show & UDIAG_SHOW_PEER) && 138 if ((req->udiag_show & UDIAG_SHOW_PEER) &&
149 sk_diag_dump_peer(sk, skb)) 139 sk_diag_dump_peer(sk, skb))
150 goto nlmsg_failure; 140 goto out_nlmsg_trim;
151 141
152 if ((req->udiag_show & UDIAG_SHOW_ICONS) && 142 if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
153 sk_diag_dump_icons(sk, skb)) 143 sk_diag_dump_icons(sk, skb))
154 goto nlmsg_failure; 144 goto out_nlmsg_trim;
155 145
156 if ((req->udiag_show & UDIAG_SHOW_RQLEN) && 146 if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
157 sk_diag_show_rqlen(sk, skb)) 147 sk_diag_show_rqlen(sk, skb))
158 goto nlmsg_failure; 148 goto out_nlmsg_trim;
159 149
160 if ((req->udiag_show & UDIAG_SHOW_MEMINFO) && 150 if ((req->udiag_show & UDIAG_SHOW_MEMINFO) &&
161 sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO)) 151 sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
162 goto nlmsg_failure; 152 goto out_nlmsg_trim;
163 153
164 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 154 return nlmsg_end(skb, nlh);
165 return skb->len;
166 155
167nlmsg_failure: 156out_nlmsg_trim:
168 nlmsg_trim(skb, b); 157 nlmsg_cancel(skb, nlh);
169 return -EMSGSIZE; 158 return -EMSGSIZE;
170} 159}
171 160
@@ -189,13 +178,15 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
189 struct unix_diag_req *req; 178 struct unix_diag_req *req;
190 int num, s_num, slot, s_slot; 179 int num, s_num, slot, s_slot;
191 180
192 req = NLMSG_DATA(cb->nlh); 181 req = nlmsg_data(cb->nlh);
193 182
194 s_slot = cb->args[0]; 183 s_slot = cb->args[0];
195 num = s_num = cb->args[1]; 184 num = s_num = cb->args[1];
196 185
197 spin_lock(&unix_table_lock); 186 spin_lock(&unix_table_lock);
198 for (slot = s_slot; slot <= UNIX_HASH_SIZE; s_num = 0, slot++) { 187 for (slot = s_slot;
188 slot < ARRAY_SIZE(unix_socket_table);
189 s_num = 0, slot++) {
199 struct sock *sk; 190 struct sock *sk;
200 struct hlist_node *node; 191 struct hlist_node *node;
201 192
@@ -228,7 +219,7 @@ static struct sock *unix_lookup_by_ino(int ino)
228 struct sock *sk; 219 struct sock *sk;
229 220
230 spin_lock(&unix_table_lock); 221 spin_lock(&unix_table_lock);
231 for (i = 0; i <= UNIX_HASH_SIZE; i++) { 222 for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) {
232 struct hlist_node *node; 223 struct hlist_node *node;
233 224
234 sk_for_each(sk, node, &unix_socket_table[i]) 225 sk_for_each(sk, node, &unix_socket_table[i])
@@ -268,15 +259,14 @@ static int unix_diag_get_exact(struct sk_buff *in_skb,
268 extra_len = 256; 259 extra_len = 256;
269again: 260again:
270 err = -ENOMEM; 261 err = -ENOMEM;
271 rep = alloc_skb(NLMSG_SPACE((sizeof(struct unix_diag_msg) + extra_len)), 262 rep = nlmsg_new(sizeof(struct unix_diag_msg) + extra_len, GFP_KERNEL);
272 GFP_KERNEL);
273 if (!rep) 263 if (!rep)
274 goto out; 264 goto out;
275 265
276 err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid, 266 err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid,
277 nlh->nlmsg_seq, 0, req->udiag_ino); 267 nlh->nlmsg_seq, 0, req->udiag_ino);
278 if (err < 0) { 268 if (err < 0) {
279 kfree_skb(rep); 269 nlmsg_free(rep);
280 extra_len += 256; 270 extra_len += 256;
281 if (extra_len >= PAGE_SIZE) 271 if (extra_len >= PAGE_SIZE)
282 goto out; 272 goto out;
@@ -307,7 +297,7 @@ static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
307 }; 297 };
308 return netlink_dump_start(sock_diag_nlsk, skb, h, &c); 298 return netlink_dump_start(sock_diag_nlsk, skb, h, &c);
309 } else 299 } else
310 return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h)); 300 return unix_diag_get_exact(skb, h, nlmsg_data(h));
311} 301}
312 302
313static const struct sock_diag_handler unix_diag_handler = { 303static const struct sock_diag_handler unix_diag_handler = {
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 2e4444fedbe..4d2b1ec6516 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -114,24 +114,10 @@ config CFG80211_WEXT
114 bool "cfg80211 wireless extensions compatibility" 114 bool "cfg80211 wireless extensions compatibility"
115 depends on CFG80211 115 depends on CFG80211
116 select WEXT_CORE 116 select WEXT_CORE
117 default y
118 help 117 help
119 Enable this option if you need old userspace for wireless 118 Enable this option if you need old userspace for wireless
120 extensions with cfg80211-based drivers. 119 extensions with cfg80211-based drivers.
121 120
122config WIRELESS_EXT_SYSFS
123 bool "Wireless extensions sysfs files"
124 depends on WEXT_CORE && SYSFS
125 help
126 This option enables the deprecated wireless statistics
127 files in /sys/class/net/*/wireless/. The same information
128 is available via the ioctls as well.
129
130 Say N. If you know you have ancient tools requiring it,
131 like very old versions of hal (prior to 0.5.12 release),
132 say Y and update the tools as soon as possible as this
133 option will be removed soon.
134
135config LIB80211 121config LIB80211
136 tristate "Common routines for IEEE802.11 drivers" 122 tristate "Common routines for IEEE802.11 drivers"
137 default n 123 default n
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 884801ac4dd..c1999e45a07 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -60,7 +60,7 @@ bool cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
60 diff = -20; 60 diff = -20;
61 break; 61 break;
62 default: 62 default:
63 return false; 63 return true;
64 } 64 }
65 65
66 sec_chan = ieee80211_get_channel(wiphy, chan->center_freq + diff); 66 sec_chan = ieee80211_get_channel(wiphy, chan->center_freq + diff);
@@ -78,60 +78,17 @@ bool cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
78} 78}
79EXPORT_SYMBOL(cfg80211_can_beacon_sec_chan); 79EXPORT_SYMBOL(cfg80211_can_beacon_sec_chan);
80 80
81int cfg80211_set_freq(struct cfg80211_registered_device *rdev, 81int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
82 struct wireless_dev *wdev, int freq, 82 int freq, enum nl80211_channel_type chantype)
83 enum nl80211_channel_type channel_type)
84{ 83{
85 struct ieee80211_channel *chan; 84 struct ieee80211_channel *chan;
86 int result;
87
88 if (wdev && wdev->iftype == NL80211_IFTYPE_MONITOR)
89 wdev = NULL;
90
91 if (wdev) {
92 ASSERT_WDEV_LOCK(wdev);
93
94 if (!netif_running(wdev->netdev))
95 return -ENETDOWN;
96 }
97 85
98 if (!rdev->ops->set_channel) 86 if (!rdev->ops->set_monitor_channel)
99 return -EOPNOTSUPP; 87 return -EOPNOTSUPP;
100 88
101 chan = rdev_freq_to_chan(rdev, freq, channel_type); 89 chan = rdev_freq_to_chan(rdev, freq, chantype);
102 if (!chan) 90 if (!chan)
103 return -EINVAL; 91 return -EINVAL;
104 92
105 /* Both channels should be able to initiate communication */ 93 return rdev->ops->set_monitor_channel(&rdev->wiphy, chan, chantype);
106 if (wdev && (wdev->iftype == NL80211_IFTYPE_ADHOC ||
107 wdev->iftype == NL80211_IFTYPE_AP ||
108 wdev->iftype == NL80211_IFTYPE_AP_VLAN ||
109 wdev->iftype == NL80211_IFTYPE_MESH_POINT ||
110 wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
111 switch (channel_type) {
112 case NL80211_CHAN_HT40PLUS:
113 case NL80211_CHAN_HT40MINUS:
114 if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, chan,
115 channel_type)) {
116 printk(KERN_DEBUG
117 "cfg80211: Secondary channel not "
118 "allowed to initiate communication\n");
119 return -EINVAL;
120 }
121 break;
122 default:
123 break;
124 }
125 }
126
127 result = rdev->ops->set_channel(&rdev->wiphy,
128 wdev ? wdev->netdev : NULL,
129 chan, channel_type);
130 if (result)
131 return result;
132
133 if (wdev)
134 wdev->channel = chan;
135
136 return 0;
137} 94}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index a87d4355297..907f62c80e2 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -96,69 +96,6 @@ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx)
96 return &rdev->wiphy; 96 return &rdev->wiphy;
97} 97}
98 98
99/* requires cfg80211_mutex to be held! */
100struct cfg80211_registered_device *
101__cfg80211_rdev_from_info(struct genl_info *info)
102{
103 int ifindex;
104 struct cfg80211_registered_device *bywiphyidx = NULL, *byifidx = NULL;
105 struct net_device *dev;
106 int err = -EINVAL;
107
108 assert_cfg80211_lock();
109
110 if (info->attrs[NL80211_ATTR_WIPHY]) {
111 bywiphyidx = cfg80211_rdev_by_wiphy_idx(
112 nla_get_u32(info->attrs[NL80211_ATTR_WIPHY]));
113 err = -ENODEV;
114 }
115
116 if (info->attrs[NL80211_ATTR_IFINDEX]) {
117 ifindex = nla_get_u32(info->attrs[NL80211_ATTR_IFINDEX]);
118 dev = dev_get_by_index(genl_info_net(info), ifindex);
119 if (dev) {
120 if (dev->ieee80211_ptr)
121 byifidx =
122 wiphy_to_dev(dev->ieee80211_ptr->wiphy);
123 dev_put(dev);
124 }
125 err = -ENODEV;
126 }
127
128 if (bywiphyidx && byifidx) {
129 if (bywiphyidx != byifidx)
130 return ERR_PTR(-EINVAL);
131 else
132 return bywiphyidx; /* == byifidx */
133 }
134 if (bywiphyidx)
135 return bywiphyidx;
136
137 if (byifidx)
138 return byifidx;
139
140 return ERR_PTR(err);
141}
142
143struct cfg80211_registered_device *
144cfg80211_get_dev_from_info(struct genl_info *info)
145{
146 struct cfg80211_registered_device *rdev;
147
148 mutex_lock(&cfg80211_mutex);
149 rdev = __cfg80211_rdev_from_info(info);
150
151 /* if it is not an error we grab the lock on
152 * it to assure it won't be going away while
153 * we operate on it */
154 if (!IS_ERR(rdev))
155 mutex_lock(&rdev->mtx);
156
157 mutex_unlock(&cfg80211_mutex);
158
159 return rdev;
160}
161
162struct cfg80211_registered_device * 99struct cfg80211_registered_device *
163cfg80211_get_dev_from_ifindex(struct net *net, int ifindex) 100cfg80211_get_dev_from_ifindex(struct net *net, int ifindex)
164{ 101{
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 8523f387867..609a579255a 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -159,32 +159,6 @@ static inline void cfg80211_unhold_bss(struct cfg80211_internal_bss *bss)
159struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx); 159struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx);
160int get_wiphy_idx(struct wiphy *wiphy); 160int get_wiphy_idx(struct wiphy *wiphy);
161 161
162struct cfg80211_registered_device *
163__cfg80211_rdev_from_info(struct genl_info *info);
164
165/*
166 * This function returns a pointer to the driver
167 * that the genl_info item that is passed refers to.
168 * If successful, it returns non-NULL and also locks
169 * the driver's mutex!
170 *
171 * This means that you need to call cfg80211_unlock_rdev()
172 * before being allowed to acquire &cfg80211_mutex!
173 *
174 * This is necessary because we need to lock the global
175 * mutex to get an item off the list safely, and then
176 * we lock the rdev mutex so it doesn't go away under us.
177 *
178 * We don't want to keep cfg80211_mutex locked
179 * for all the time in order to allow requests on
180 * other interfaces to go through at the same time.
181 *
182 * The result of this can be a PTR_ERR and hence must
183 * be checked with IS_ERR() for errors.
184 */
185extern struct cfg80211_registered_device *
186cfg80211_get_dev_from_info(struct genl_info *info);
187
188/* requires cfg80211_rdev_mutex to be held! */ 162/* requires cfg80211_rdev_mutex to be held! */
189struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx); 163struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx);
190 164
@@ -303,14 +277,17 @@ extern const struct mesh_config default_mesh_config;
303extern const struct mesh_setup default_mesh_setup; 277extern const struct mesh_setup default_mesh_setup;
304int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev, 278int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
305 struct net_device *dev, 279 struct net_device *dev,
306 const struct mesh_setup *setup, 280 struct mesh_setup *setup,
307 const struct mesh_config *conf); 281 const struct mesh_config *conf);
308int cfg80211_join_mesh(struct cfg80211_registered_device *rdev, 282int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
309 struct net_device *dev, 283 struct net_device *dev,
310 const struct mesh_setup *setup, 284 struct mesh_setup *setup,
311 const struct mesh_config *conf); 285 const struct mesh_config *conf);
312int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev, 286int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
313 struct net_device *dev); 287 struct net_device *dev);
288int cfg80211_set_mesh_freq(struct cfg80211_registered_device *rdev,
289 struct wireless_dev *wdev, int freq,
290 enum nl80211_channel_type channel_type);
314 291
315/* MLME */ 292/* MLME */
316int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, 293int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
@@ -441,9 +418,8 @@ cfg80211_can_add_interface(struct cfg80211_registered_device *rdev,
441struct ieee80211_channel * 418struct ieee80211_channel *
442rdev_freq_to_chan(struct cfg80211_registered_device *rdev, 419rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
443 int freq, enum nl80211_channel_type channel_type); 420 int freq, enum nl80211_channel_type channel_type);
444int cfg80211_set_freq(struct cfg80211_registered_device *rdev, 421int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
445 struct wireless_dev *wdev, int freq, 422 int freq, enum nl80211_channel_type chantype);
446 enum nl80211_channel_type channel_type);
447 423
448int ieee80211_get_ratemask(struct ieee80211_supported_band *sband, 424int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
449 const u8 *rates, unsigned int n_rates, 425 const u8 *rates, unsigned int n_rates,
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 2749cb86b46..3b73b07486c 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -14,6 +14,9 @@
14 14
15#define MESH_PATH_TIMEOUT 5000 15#define MESH_PATH_TIMEOUT 5000
16#define MESH_RANN_INTERVAL 5000 16#define MESH_RANN_INTERVAL 5000
17#define MESH_PATH_TO_ROOT_TIMEOUT 6000
18#define MESH_ROOT_INTERVAL 5000
19#define MESH_ROOT_CONFIRMATION_INTERVAL 2000
17 20
18/* 21/*
19 * Minimum interval between two consecutive PREQs originated by the same 22 * Minimum interval between two consecutive PREQs originated by the same
@@ -62,9 +65,15 @@ const struct mesh_config default_mesh_config = {
62 .dot11MeshForwarding = true, 65 .dot11MeshForwarding = true,
63 .rssi_threshold = MESH_RSSI_THRESHOLD, 66 .rssi_threshold = MESH_RSSI_THRESHOLD,
64 .ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED, 67 .ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED,
68 .dot11MeshHWMPactivePathToRootTimeout = MESH_PATH_TO_ROOT_TIMEOUT,
69 .dot11MeshHWMProotInterval = MESH_ROOT_INTERVAL,
70 .dot11MeshHWMPconfirmationInterval = MESH_ROOT_CONFIRMATION_INTERVAL,
65}; 71};
66 72
67const struct mesh_setup default_mesh_setup = { 73const struct mesh_setup default_mesh_setup = {
74 /* cfg80211_join_mesh() will pick a channel if needed */
75 .channel = NULL,
76 .channel_type = NL80211_CHAN_NO_HT,
68 .sync_method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET, 77 .sync_method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET,
69 .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP, 78 .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP,
70 .path_metric = IEEE80211_PATH_METRIC_AIRTIME, 79 .path_metric = IEEE80211_PATH_METRIC_AIRTIME,
@@ -75,7 +84,7 @@ const struct mesh_setup default_mesh_setup = {
75 84
76int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev, 85int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
77 struct net_device *dev, 86 struct net_device *dev,
78 const struct mesh_setup *setup, 87 struct mesh_setup *setup,
79 const struct mesh_config *conf) 88 const struct mesh_config *conf)
80{ 89{
81 struct wireless_dev *wdev = dev->ieee80211_ptr; 90 struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -101,6 +110,51 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
101 if (!rdev->ops->join_mesh) 110 if (!rdev->ops->join_mesh)
102 return -EOPNOTSUPP; 111 return -EOPNOTSUPP;
103 112
113 if (!setup->channel) {
114 /* if no channel explicitly given, use preset channel */
115 setup->channel = wdev->preset_chan;
116 setup->channel_type = wdev->preset_chantype;
117 }
118
119 if (!setup->channel) {
120 /* if we don't have that either, use the first usable channel */
121 enum ieee80211_band band;
122
123 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
124 struct ieee80211_supported_band *sband;
125 struct ieee80211_channel *chan;
126 int i;
127
128 sband = rdev->wiphy.bands[band];
129 if (!sband)
130 continue;
131
132 for (i = 0; i < sband->n_channels; i++) {
133 chan = &sband->channels[i];
134 if (chan->flags & (IEEE80211_CHAN_NO_IBSS |
135 IEEE80211_CHAN_PASSIVE_SCAN |
136 IEEE80211_CHAN_DISABLED |
137 IEEE80211_CHAN_RADAR))
138 continue;
139 setup->channel = chan;
140 break;
141 }
142
143 if (setup->channel)
144 break;
145 }
146
147 /* no usable channel ... */
148 if (!setup->channel)
149 return -EINVAL;
150
151 setup->channel_type = NL80211_CHAN_NO_HT;
152 }
153
154 if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, setup->channel,
155 setup->channel_type))
156 return -EINVAL;
157
104 err = rdev->ops->join_mesh(&rdev->wiphy, dev, conf, setup); 158 err = rdev->ops->join_mesh(&rdev->wiphy, dev, conf, setup);
105 if (!err) { 159 if (!err) {
106 memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len); 160 memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len);
@@ -112,7 +166,7 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
112 166
113int cfg80211_join_mesh(struct cfg80211_registered_device *rdev, 167int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
114 struct net_device *dev, 168 struct net_device *dev,
115 const struct mesh_setup *setup, 169 struct mesh_setup *setup,
116 const struct mesh_config *conf) 170 const struct mesh_config *conf)
117{ 171{
118 struct wireless_dev *wdev = dev->ieee80211_ptr; 172 struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -125,6 +179,45 @@ int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
125 return err; 179 return err;
126} 180}
127 181
182int cfg80211_set_mesh_freq(struct cfg80211_registered_device *rdev,
183 struct wireless_dev *wdev, int freq,
184 enum nl80211_channel_type channel_type)
185{
186 struct ieee80211_channel *channel;
187
188 channel = rdev_freq_to_chan(rdev, freq, channel_type);
189 if (!channel || !cfg80211_can_beacon_sec_chan(&rdev->wiphy,
190 channel,
191 channel_type)) {
192 return -EINVAL;
193 }
194
195 /*
196 * Workaround for libertas (only!), it puts the interface
197 * into mesh mode but doesn't implement join_mesh. Instead,
198 * it is configured via sysfs and then joins the mesh when
199 * you set the channel. Note that the libertas mesh isn't
200 * compatible with 802.11 mesh.
201 */
202 if (rdev->ops->libertas_set_mesh_channel) {
203 if (channel_type != NL80211_CHAN_NO_HT)
204 return -EINVAL;
205
206 if (!netif_running(wdev->netdev))
207 return -ENETDOWN;
208 return rdev->ops->libertas_set_mesh_channel(&rdev->wiphy,
209 wdev->netdev,
210 channel);
211 }
212
213 if (wdev->mesh_id_len)
214 return -EBUSY;
215
216 wdev->preset_chan = channel;
217 wdev->preset_chantype = channel_type;
218 return 0;
219}
220
128void cfg80211_notify_new_peer_candidate(struct net_device *dev, 221void cfg80211_notify_new_peer_candidate(struct net_device *dev,
129 const u8 *macaddr, const u8* ie, u8 ie_len, gfp_t gfp) 222 const u8 *macaddr, const u8* ie, u8 ie_len, gfp_t gfp)
130{ 223{
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index eb90988bbd3..da4406f1192 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -947,8 +947,6 @@ void cfg80211_ch_switch_notify(struct net_device *dev, int freq,
947 if (WARN_ON(!chan)) 947 if (WARN_ON(!chan))
948 goto out; 948 goto out;
949 949
950 wdev->channel = chan;
951
952 nl80211_ch_switch_notify(rdev, dev, freq, type, GFP_KERNEL); 950 nl80211_ch_switch_notify(rdev, dev, freq, type, GFP_KERNEL);
953out: 951out:
954 wdev_unlock(wdev); 952 wdev_unlock(wdev);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 206465dc0ca..3b508eaf2d0 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -70,6 +70,94 @@ static int get_rdev_dev_by_ifindex(struct net *netns, struct nlattr **attrs,
70 return 0; 70 return 0;
71} 71}
72 72
73static struct cfg80211_registered_device *
74__cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs)
75{
76 struct cfg80211_registered_device *rdev = NULL, *tmp;
77 struct net_device *netdev;
78
79 assert_cfg80211_lock();
80
81 if (!attrs[NL80211_ATTR_WIPHY] &&
82 !attrs[NL80211_ATTR_IFINDEX])
83 return ERR_PTR(-EINVAL);
84
85 if (attrs[NL80211_ATTR_WIPHY])
86 rdev = cfg80211_rdev_by_wiphy_idx(
87 nla_get_u32(attrs[NL80211_ATTR_WIPHY]));
88
89 if (attrs[NL80211_ATTR_IFINDEX]) {
90 int ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]);
91 netdev = dev_get_by_index(netns, ifindex);
92 if (netdev) {
93 if (netdev->ieee80211_ptr)
94 tmp = wiphy_to_dev(
95 netdev->ieee80211_ptr->wiphy);
96 else
97 tmp = NULL;
98
99 dev_put(netdev);
100
101 /* not wireless device -- return error */
102 if (!tmp)
103 return ERR_PTR(-EINVAL);
104
105 /* mismatch -- return error */
106 if (rdev && tmp != rdev)
107 return ERR_PTR(-EINVAL);
108
109 rdev = tmp;
110 }
111 }
112
113 if (!rdev)
114 return ERR_PTR(-ENODEV);
115
116 if (netns != wiphy_net(&rdev->wiphy))
117 return ERR_PTR(-ENODEV);
118
119 return rdev;
120}
121
122/*
123 * This function returns a pointer to the driver
124 * that the genl_info item that is passed refers to.
125 * If successful, it returns non-NULL and also locks
126 * the driver's mutex!
127 *
128 * This means that you need to call cfg80211_unlock_rdev()
129 * before being allowed to acquire &cfg80211_mutex!
130 *
131 * This is necessary because we need to lock the global
132 * mutex to get an item off the list safely, and then
133 * we lock the rdev mutex so it doesn't go away under us.
134 *
135 * We don't want to keep cfg80211_mutex locked
136 * for all the time in order to allow requests on
137 * other interfaces to go through at the same time.
138 *
139 * The result of this can be a PTR_ERR and hence must
140 * be checked with IS_ERR() for errors.
141 */
142static struct cfg80211_registered_device *
143cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info)
144{
145 struct cfg80211_registered_device *rdev;
146
147 mutex_lock(&cfg80211_mutex);
148 rdev = __cfg80211_rdev_from_attrs(netns, info->attrs);
149
150 /* if it is not an error we grab the lock on
151 * it to assure it won't be going away while
152 * we operate on it */
153 if (!IS_ERR(rdev))
154 mutex_lock(&rdev->mtx);
155
156 mutex_unlock(&cfg80211_mutex);
157
158 return rdev;
159}
160
73/* policy for the attributes */ 161/* policy for the attributes */
74static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { 162static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
75 [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, 163 [NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
@@ -115,7 +203,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
115 [NL80211_ATTR_STA_VLAN] = { .type = NLA_U32 }, 203 [NL80211_ATTR_STA_VLAN] = { .type = NLA_U32 },
116 [NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ }, 204 [NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ },
117 [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY, 205 [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY,
118 .len = IEEE80211_MAX_MESH_ID_LEN }, 206 .len = IEEE80211_MAX_MESH_ID_LEN },
119 [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 }, 207 [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 },
120 208
121 [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 }, 209 [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 },
@@ -250,8 +338,9 @@ nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
250 338
251static const struct nla_policy 339static const struct nla_policy
252nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = { 340nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = {
253 [NL80211_ATTR_SCHED_SCAN_MATCH_SSID] = { .type = NLA_BINARY, 341 [NL80211_SCHED_SCAN_MATCH_ATTR_SSID] = { .type = NLA_BINARY,
254 .len = IEEE80211_MAX_SSID_LEN }, 342 .len = IEEE80211_MAX_SSID_LEN },
343 [NL80211_SCHED_SCAN_MATCH_ATTR_RSSI] = { .type = NLA_U32 },
255}; 344};
256 345
257/* ifidx get helper */ 346/* ifidx get helper */
@@ -921,7 +1010,12 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
921 if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS)) 1010 if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS))
922 goto nla_put_failure; 1011 goto nla_put_failure;
923 } 1012 }
924 CMD(set_channel, SET_CHANNEL); 1013 if (dev->ops->set_monitor_channel || dev->ops->start_ap ||
1014 dev->ops->join_mesh) {
1015 i++;
1016 if (nla_put_u32(msg, i, NL80211_CMD_SET_CHANNEL))
1017 goto nla_put_failure;
1018 }
925 CMD(set_wds_peer, SET_WDS_PEER); 1019 CMD(set_wds_peer, SET_WDS_PEER);
926 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) { 1020 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) {
927 CMD(tdls_mgmt, TDLS_MGMT); 1021 CMD(tdls_mgmt, TDLS_MGMT);
@@ -1162,18 +1256,22 @@ static int parse_txq_params(struct nlattr *tb[],
1162static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev) 1256static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
1163{ 1257{
1164 /* 1258 /*
1165 * You can only set the channel explicitly for AP, mesh 1259 * You can only set the channel explicitly for WDS interfaces,
1166 * and WDS type interfaces; all others have their channel 1260 * all others have their channel managed via their respective
1167 * managed via their respective "establish a connection" 1261 * "establish a connection" command (connect, join, ...)
1168 * command (connect, join, ...) 1262 *
1263 * For AP/GO and mesh mode, the channel can be set with the
1264 * channel userspace API, but is only stored and passed to the
1265 * low-level driver when the AP starts or the mesh is joined.
1266 * This is for backward compatibility, userspace can also give
1267 * the channel in the start-ap or join-mesh commands instead.
1169 * 1268 *
1170 * Monitors are special as they are normally slaved to 1269 * Monitors are special as they are normally slaved to
1171 * whatever else is going on, so they behave as though 1270 * whatever else is going on, so they have their own special
1172 * you tried setting the wiphy channel itself. 1271 * operation to set the monitor channel if possible.
1173 */ 1272 */
1174 return !wdev || 1273 return !wdev ||
1175 wdev->iftype == NL80211_IFTYPE_AP || 1274 wdev->iftype == NL80211_IFTYPE_AP ||
1176 wdev->iftype == NL80211_IFTYPE_WDS ||
1177 wdev->iftype == NL80211_IFTYPE_MESH_POINT || 1275 wdev->iftype == NL80211_IFTYPE_MESH_POINT ||
1178 wdev->iftype == NL80211_IFTYPE_MONITOR || 1276 wdev->iftype == NL80211_IFTYPE_MONITOR ||
1179 wdev->iftype == NL80211_IFTYPE_P2P_GO; 1277 wdev->iftype == NL80211_IFTYPE_P2P_GO;
@@ -1204,9 +1302,14 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
1204 struct wireless_dev *wdev, 1302 struct wireless_dev *wdev,
1205 struct genl_info *info) 1303 struct genl_info *info)
1206{ 1304{
1305 struct ieee80211_channel *channel;
1207 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; 1306 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
1208 u32 freq; 1307 u32 freq;
1209 int result; 1308 int result;
1309 enum nl80211_iftype iftype = NL80211_IFTYPE_MONITOR;
1310
1311 if (wdev)
1312 iftype = wdev->iftype;
1210 1313
1211 if (!info->attrs[NL80211_ATTR_WIPHY_FREQ]) 1314 if (!info->attrs[NL80211_ATTR_WIPHY_FREQ])
1212 return -EINVAL; 1315 return -EINVAL;
@@ -1221,12 +1324,32 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
1221 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); 1324 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
1222 1325
1223 mutex_lock(&rdev->devlist_mtx); 1326 mutex_lock(&rdev->devlist_mtx);
1224 if (wdev) { 1327 switch (iftype) {
1225 wdev_lock(wdev); 1328 case NL80211_IFTYPE_AP:
1226 result = cfg80211_set_freq(rdev, wdev, freq, channel_type); 1329 case NL80211_IFTYPE_P2P_GO:
1227 wdev_unlock(wdev); 1330 if (wdev->beacon_interval) {
1228 } else { 1331 result = -EBUSY;
1229 result = cfg80211_set_freq(rdev, NULL, freq, channel_type); 1332 break;
1333 }
1334 channel = rdev_freq_to_chan(rdev, freq, channel_type);
1335 if (!channel || !cfg80211_can_beacon_sec_chan(&rdev->wiphy,
1336 channel,
1337 channel_type)) {
1338 result = -EINVAL;
1339 break;
1340 }
1341 wdev->preset_chan = channel;
1342 wdev->preset_chantype = channel_type;
1343 result = 0;
1344 break;
1345 case NL80211_IFTYPE_MESH_POINT:
1346 result = cfg80211_set_mesh_freq(rdev, wdev, freq, channel_type);
1347 break;
1348 case NL80211_IFTYPE_MONITOR:
1349 result = cfg80211_set_monitor_channel(rdev, freq, channel_type);
1350 break;
1351 default:
1352 result = -EINVAL;
1230 } 1353 }
1231 mutex_unlock(&rdev->devlist_mtx); 1354 mutex_unlock(&rdev->devlist_mtx);
1232 1355
@@ -1300,7 +1423,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
1300 } 1423 }
1301 1424
1302 if (!netdev) { 1425 if (!netdev) {
1303 rdev = __cfg80211_rdev_from_info(info); 1426 rdev = __cfg80211_rdev_from_attrs(genl_info_net(info),
1427 info->attrs);
1304 if (IS_ERR(rdev)) { 1428 if (IS_ERR(rdev)) {
1305 mutex_unlock(&cfg80211_mutex); 1429 mutex_unlock(&cfg80211_mutex);
1306 return PTR_ERR(rdev); 1430 return PTR_ERR(rdev);
@@ -1310,8 +1434,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
1310 result = 0; 1434 result = 0;
1311 1435
1312 mutex_lock(&rdev->mtx); 1436 mutex_lock(&rdev->mtx);
1313 } else if (netif_running(netdev) && 1437 } else if (nl80211_can_set_dev_channel(netdev->ieee80211_ptr))
1314 nl80211_can_set_dev_channel(netdev->ieee80211_ptr))
1315 wdev = netdev->ieee80211_ptr; 1438 wdev = netdev->ieee80211_ptr;
1316 else 1439 else
1317 wdev = NULL; 1440 wdev = NULL;
@@ -2213,6 +2336,33 @@ static int nl80211_parse_beacon(struct genl_info *info,
2213 return 0; 2336 return 0;
2214} 2337}
2215 2338
2339static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev,
2340 struct cfg80211_ap_settings *params)
2341{
2342 struct wireless_dev *wdev;
2343 bool ret = false;
2344
2345 mutex_lock(&rdev->devlist_mtx);
2346
2347 list_for_each_entry(wdev, &rdev->netdev_list, list) {
2348 if (wdev->iftype != NL80211_IFTYPE_AP &&
2349 wdev->iftype != NL80211_IFTYPE_P2P_GO)
2350 continue;
2351
2352 if (!wdev->preset_chan)
2353 continue;
2354
2355 params->channel = wdev->preset_chan;
2356 params->channel_type = wdev->preset_chantype;
2357 ret = true;
2358 break;
2359 }
2360
2361 mutex_unlock(&rdev->devlist_mtx);
2362
2363 return ret;
2364}
2365
2216static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) 2366static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
2217{ 2367{
2218 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 2368 struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -2299,9 +2449,35 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
2299 info->attrs[NL80211_ATTR_INACTIVITY_TIMEOUT]); 2449 info->attrs[NL80211_ATTR_INACTIVITY_TIMEOUT]);
2300 } 2450 }
2301 2451
2452 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
2453 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
2454
2455 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
2456 !nl80211_valid_channel_type(info, &channel_type))
2457 return -EINVAL;
2458
2459 params.channel = rdev_freq_to_chan(rdev,
2460 nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]),
2461 channel_type);
2462 if (!params.channel)
2463 return -EINVAL;
2464 params.channel_type = channel_type;
2465 } else if (wdev->preset_chan) {
2466 params.channel = wdev->preset_chan;
2467 params.channel_type = wdev->preset_chantype;
2468 } else if (!nl80211_get_ap_channel(rdev, &params))
2469 return -EINVAL;
2470
2471 if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, params.channel,
2472 params.channel_type))
2473 return -EINVAL;
2474
2302 err = rdev->ops->start_ap(&rdev->wiphy, dev, &params); 2475 err = rdev->ops->start_ap(&rdev->wiphy, dev, &params);
2303 if (!err) 2476 if (!err) {
2477 wdev->preset_chan = params.channel;
2478 wdev->preset_chantype = params.channel_type;
2304 wdev->beacon_interval = params.beacon_interval; 2479 wdev->beacon_interval = params.beacon_interval;
2480 }
2305 return err; 2481 return err;
2306} 2482}
2307 2483
@@ -3413,7 +3589,13 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
3413 nla_put_u32(msg, NL80211_MESHCONF_RSSI_THRESHOLD, 3589 nla_put_u32(msg, NL80211_MESHCONF_RSSI_THRESHOLD,
3414 cur_params.rssi_threshold) || 3590 cur_params.rssi_threshold) ||
3415 nla_put_u32(msg, NL80211_MESHCONF_HT_OPMODE, 3591 nla_put_u32(msg, NL80211_MESHCONF_HT_OPMODE,
3416 cur_params.ht_opmode)) 3592 cur_params.ht_opmode) ||
3593 nla_put_u32(msg, NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT,
3594 cur_params.dot11MeshHWMPactivePathToRootTimeout) ||
3595 nla_put_u16(msg, NL80211_MESHCONF_HWMP_ROOT_INTERVAL,
3596 cur_params.dot11MeshHWMProotInterval) ||
3597 nla_put_u16(msg, NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL,
3598 cur_params.dot11MeshHWMPconfirmationInterval))
3417 goto nla_put_failure; 3599 goto nla_put_failure;
3418 nla_nest_end(msg, pinfoattr); 3600 nla_nest_end(msg, pinfoattr);
3419 genlmsg_end(msg, hdr); 3601 genlmsg_end(msg, hdr);
@@ -3436,7 +3618,6 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
3436 [NL80211_MESHCONF_ELEMENT_TTL] = { .type = NLA_U8 }, 3618 [NL80211_MESHCONF_ELEMENT_TTL] = { .type = NLA_U8 },
3437 [NL80211_MESHCONF_AUTO_OPEN_PLINKS] = { .type = NLA_U8 }, 3619 [NL80211_MESHCONF_AUTO_OPEN_PLINKS] = { .type = NLA_U8 },
3438 [NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR] = { .type = NLA_U32 }, 3620 [NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR] = { .type = NLA_U32 },
3439
3440 [NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 }, 3621 [NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 },
3441 [NL80211_MESHCONF_PATH_REFRESH_TIME] = { .type = NLA_U32 }, 3622 [NL80211_MESHCONF_PATH_REFRESH_TIME] = { .type = NLA_U32 },
3442 [NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT] = { .type = NLA_U16 }, 3623 [NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT] = { .type = NLA_U16 },
@@ -3448,8 +3629,11 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
3448 [NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 }, 3629 [NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 },
3449 [NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 }, 3630 [NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 },
3450 [NL80211_MESHCONF_FORWARDING] = { .type = NLA_U8 }, 3631 [NL80211_MESHCONF_FORWARDING] = { .type = NLA_U8 },
3451 [NL80211_MESHCONF_RSSI_THRESHOLD] = { .type = NLA_U32}, 3632 [NL80211_MESHCONF_RSSI_THRESHOLD] = { .type = NLA_U32 },
3452 [NL80211_MESHCONF_HT_OPMODE] = { .type = NLA_U16}, 3633 [NL80211_MESHCONF_HT_OPMODE] = { .type = NLA_U16 },
3634 [NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT] = { .type = NLA_U32 },
3635 [NL80211_MESHCONF_HWMP_ROOT_INTERVAL] = { .type = NLA_U16 },
3636 [NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL] = { .type = NLA_U16 },
3453}; 3637};
3454 3638
3455static const struct nla_policy 3639static const struct nla_policy
@@ -3459,7 +3643,7 @@ static const struct nla_policy
3459 [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 }, 3643 [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 },
3460 [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG }, 3644 [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG },
3461 [NL80211_MESH_SETUP_IE] = { .type = NLA_BINARY, 3645 [NL80211_MESH_SETUP_IE] = { .type = NLA_BINARY,
3462 .len = IEEE80211_MAX_DATA_LEN }, 3646 .len = IEEE80211_MAX_DATA_LEN },
3463 [NL80211_MESH_SETUP_USERSPACE_AMPE] = { .type = NLA_FLAG }, 3647 [NL80211_MESH_SETUP_USERSPACE_AMPE] = { .type = NLA_FLAG },
3464}; 3648};
3465 3649
@@ -3492,63 +3676,82 @@ do {\
3492 3676
3493 /* Fill in the params struct */ 3677 /* Fill in the params struct */
3494 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout, 3678 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout,
3495 mask, NL80211_MESHCONF_RETRY_TIMEOUT, nla_get_u16); 3679 mask, NL80211_MESHCONF_RETRY_TIMEOUT,
3680 nla_get_u16);
3496 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout, 3681 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout,
3497 mask, NL80211_MESHCONF_CONFIRM_TIMEOUT, nla_get_u16); 3682 mask, NL80211_MESHCONF_CONFIRM_TIMEOUT,
3683 nla_get_u16);
3498 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout, 3684 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout,
3499 mask, NL80211_MESHCONF_HOLDING_TIMEOUT, nla_get_u16); 3685 mask, NL80211_MESHCONF_HOLDING_TIMEOUT,
3686 nla_get_u16);
3500 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks, 3687 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks,
3501 mask, NL80211_MESHCONF_MAX_PEER_LINKS, nla_get_u16); 3688 mask, NL80211_MESHCONF_MAX_PEER_LINKS,
3689 nla_get_u16);
3502 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries, 3690 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries,
3503 mask, NL80211_MESHCONF_MAX_RETRIES, nla_get_u8); 3691 mask, NL80211_MESHCONF_MAX_RETRIES,
3692 nla_get_u8);
3504 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL, 3693 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL,
3505 mask, NL80211_MESHCONF_TTL, nla_get_u8); 3694 mask, NL80211_MESHCONF_TTL, nla_get_u8);
3506 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl, 3695 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl,
3507 mask, NL80211_MESHCONF_ELEMENT_TTL, nla_get_u8); 3696 mask, NL80211_MESHCONF_ELEMENT_TTL,
3697 nla_get_u8);
3508 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, 3698 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks,
3509 mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, nla_get_u8); 3699 mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS,
3510 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor, 3700 nla_get_u8);
3511 mask, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, 3701 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor, mask,
3512 nla_get_u32); 3702 NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
3703 nla_get_u32);
3513 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, 3704 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries,
3514 mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, 3705 mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
3515 nla_get_u8); 3706 nla_get_u8);
3516 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time, 3707 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time,
3517 mask, NL80211_MESHCONF_PATH_REFRESH_TIME, nla_get_u32); 3708 mask, NL80211_MESHCONF_PATH_REFRESH_TIME,
3709 nla_get_u32);
3518 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout, 3710 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout,
3519 mask, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, 3711 mask, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
3520 nla_get_u16); 3712 nla_get_u16);
3521 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout, 3713 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout, mask,
3522 mask, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, 3714 NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
3523 nla_get_u32); 3715 nla_get_u32);
3524 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval, 3716 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval,
3525 mask, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, 3717 mask, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
3526 nla_get_u16); 3718 nla_get_u16);
3527 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval, 3719 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval,
3528 mask, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, 3720 mask, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
3529 nla_get_u16); 3721 nla_get_u16);
3530 FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
3531 dot11MeshHWMPnetDiameterTraversalTime,
3532 mask, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
3533 nla_get_u16);
3534 FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
3535 dot11MeshHWMPRootMode, mask,
3536 NL80211_MESHCONF_HWMP_ROOTMODE,
3537 nla_get_u8);
3538 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, 3722 FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
3539 dot11MeshHWMPRannInterval, mask, 3723 dot11MeshHWMPnetDiameterTraversalTime, mask,
3540 NL80211_MESHCONF_HWMP_RANN_INTERVAL, 3724 NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
3541 nla_get_u16); 3725 nla_get_u16);
3726 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRootMode, mask,
3727 NL80211_MESHCONF_HWMP_ROOTMODE, nla_get_u8);
3728 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRannInterval, mask,
3729 NL80211_MESHCONF_HWMP_RANN_INTERVAL,
3730 nla_get_u16);
3542 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, 3731 FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
3543 dot11MeshGateAnnouncementProtocol, mask, 3732 dot11MeshGateAnnouncementProtocol, mask,
3544 NL80211_MESHCONF_GATE_ANNOUNCEMENTS, 3733 NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
3545 nla_get_u8); 3734 nla_get_u8);
3546 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding, 3735 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding,
3547 mask, NL80211_MESHCONF_FORWARDING, nla_get_u8); 3736 mask, NL80211_MESHCONF_FORWARDING,
3737 nla_get_u8);
3548 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, 3738 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold,
3549 mask, NL80211_MESHCONF_RSSI_THRESHOLD, nla_get_u32); 3739 mask, NL80211_MESHCONF_RSSI_THRESHOLD,
3740 nla_get_u32);
3550 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode, 3741 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode,
3551 mask, NL80211_MESHCONF_HT_OPMODE, nla_get_u16); 3742 mask, NL80211_MESHCONF_HT_OPMODE,
3743 nla_get_u16);
3744 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout,
3745 mask,
3746 NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT,
3747 nla_get_u32);
3748 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMProotInterval,
3749 mask, NL80211_MESHCONF_HWMP_ROOT_INTERVAL,
3750 nla_get_u16);
3751 FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
3752 dot11MeshHWMPconfirmationInterval, mask,
3753 NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL,
3754 nla_get_u16);
3552 if (mask_out) 3755 if (mask_out)
3553 *mask_out = mask; 3756 *mask_out = mask;
3554 3757
@@ -4185,12 +4388,12 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
4185 nla_for_each_nested(attr, 4388 nla_for_each_nested(attr,
4186 info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH], 4389 info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
4187 tmp) { 4390 tmp) {
4188 struct nlattr *ssid; 4391 struct nlattr *ssid, *rssi;
4189 4392
4190 nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX, 4393 nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX,
4191 nla_data(attr), nla_len(attr), 4394 nla_data(attr), nla_len(attr),
4192 nl80211_match_policy); 4395 nl80211_match_policy);
4193 ssid = tb[NL80211_ATTR_SCHED_SCAN_MATCH_SSID]; 4396 ssid = tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID];
4194 if (ssid) { 4397 if (ssid) {
4195 if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) { 4398 if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) {
4196 err = -EINVAL; 4399 err = -EINVAL;
@@ -4201,6 +4404,12 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
4201 request->match_sets[i].ssid.ssid_len = 4404 request->match_sets[i].ssid.ssid_len =
4202 nla_len(ssid); 4405 nla_len(ssid);
4203 } 4406 }
4407 rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI];
4408 if (rssi)
4409 request->rssi_thold = nla_get_u32(rssi);
4410 else
4411 request->rssi_thold =
4412 NL80211_SCAN_RSSI_THOLD_OFF;
4204 i++; 4413 i++;
4205 } 4414 }
4206 } 4415 }
@@ -5058,21 +5267,18 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
5058 nl80211_policy); 5267 nl80211_policy);
5059 if (err) 5268 if (err)
5060 return err; 5269 return err;
5061 if (nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]) {
5062 phy_idx = nla_get_u32(
5063 nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]);
5064 } else {
5065 struct net_device *netdev;
5066 5270
5067 err = get_rdev_dev_by_ifindex(sock_net(skb->sk), 5271 mutex_lock(&cfg80211_mutex);
5068 nl80211_fam.attrbuf, 5272 rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk),
5069 &rdev, &netdev); 5273 nl80211_fam.attrbuf);
5070 if (err) 5274 if (IS_ERR(rdev)) {
5071 return err; 5275 mutex_unlock(&cfg80211_mutex);
5072 dev_put(netdev); 5276 return PTR_ERR(rdev);
5073 phy_idx = rdev->wiphy_idx;
5074 cfg80211_unlock_rdev(rdev);
5075 } 5277 }
5278 phy_idx = rdev->wiphy_idx;
5279 rdev = NULL;
5280 mutex_unlock(&cfg80211_mutex);
5281
5076 if (nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA]) 5282 if (nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA])
5077 cb->args[1] = 5283 cb->args[1] =
5078 (long)nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA]; 5284 (long)nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA];
@@ -5489,18 +5695,18 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
5489 5695
5490 duration = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]); 5696 duration = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]);
5491 5697
5698 if (!rdev->ops->remain_on_channel ||
5699 !(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL))
5700 return -EOPNOTSUPP;
5701
5492 /* 5702 /*
5493 * We should be on that channel for at least one jiffie, 5703 * We should be on that channel for at least a minimum amount of
5494 * and more than 5 seconds seems excessive. 5704 * time (10ms) but no longer than the driver supports.
5495 */ 5705 */
5496 if (!duration || !msecs_to_jiffies(duration) || 5706 if (duration < NL80211_MIN_REMAIN_ON_CHANNEL_TIME ||
5497 duration > rdev->wiphy.max_remain_on_channel_duration) 5707 duration > rdev->wiphy.max_remain_on_channel_duration)
5498 return -EINVAL; 5708 return -EINVAL;
5499 5709
5500 if (!rdev->ops->remain_on_channel ||
5501 !(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL))
5502 return -EOPNOTSUPP;
5503
5504 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] && 5710 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
5505 !nl80211_valid_channel_type(info, &channel_type)) 5711 !nl80211_valid_channel_type(info, &channel_type))
5506 return -EINVAL; 5712 return -EINVAL;
@@ -5771,6 +5977,15 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
5771 if (!(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX)) 5977 if (!(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX))
5772 return -EINVAL; 5978 return -EINVAL;
5773 wait = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]); 5979 wait = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]);
5980
5981 /*
5982 * We should wait on the channel for at least a minimum amount
5983 * of time (10ms) but no longer than the driver supports.
5984 */
5985 if (wait < NL80211_MIN_REMAIN_ON_CHANNEL_TIME ||
5986 wait > rdev->wiphy.max_remain_on_channel_duration)
5987 return -EINVAL;
5988
5774 } 5989 }
5775 5990
5776 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { 5991 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
@@ -6032,6 +6247,24 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
6032 return err; 6247 return err;
6033 } 6248 }
6034 6249
6250 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
6251 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
6252
6253 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
6254 !nl80211_valid_channel_type(info, &channel_type))
6255 return -EINVAL;
6256
6257 setup.channel = rdev_freq_to_chan(rdev,
6258 nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]),
6259 channel_type);
6260 if (!setup.channel)
6261 return -EINVAL;
6262 setup.channel_type = channel_type;
6263 } else {
6264 /* cfg80211_join_mesh() will sort it out */
6265 setup.channel = NULL;
6266 }
6267
6035 return cfg80211_join_mesh(rdev, dev, &setup, &cfg); 6268 return cfg80211_join_mesh(rdev, dev, &setup, &cfg);
6036} 6269}
6037 6270
@@ -6428,7 +6661,7 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
6428 rtnl_lock(); 6661 rtnl_lock();
6429 6662
6430 if (ops->internal_flags & NL80211_FLAG_NEED_WIPHY) { 6663 if (ops->internal_flags & NL80211_FLAG_NEED_WIPHY) {
6431 rdev = cfg80211_get_dev_from_info(info); 6664 rdev = cfg80211_get_dev_from_info(genl_info_net(info), info);
6432 if (IS_ERR(rdev)) { 6665 if (IS_ERR(rdev)) {
6433 if (rtnl) 6666 if (rtnl)
6434 rtnl_unlock(); 6667 rtnl_unlock();
@@ -7127,7 +7360,7 @@ void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
7127{ 7360{
7128 struct sk_buff *msg; 7361 struct sk_buff *msg;
7129 7362
7130 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 7363 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
7131 if (!msg) 7364 if (!msg)
7132 return; 7365 return;
7133 7366
@@ -7203,7 +7436,7 @@ void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
7203{ 7436{
7204 struct sk_buff *msg; 7437 struct sk_buff *msg;
7205 7438
7206 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 7439 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
7207 if (!msg) 7440 if (!msg)
7208 return; 7441 return;
7209 7442
@@ -7419,7 +7652,7 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
7419 struct sk_buff *msg; 7652 struct sk_buff *msg;
7420 void *hdr; 7653 void *hdr;
7421 7654
7422 msg = nlmsg_new(NLMSG_GOODSIZE, gfp); 7655 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
7423 if (!msg) 7656 if (!msg)
7424 return; 7657 return;
7425 7658
@@ -7459,7 +7692,7 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
7459 struct sk_buff *msg; 7692 struct sk_buff *msg;
7460 void *hdr; 7693 void *hdr;
7461 7694
7462 msg = nlmsg_new(NLMSG_GOODSIZE, gfp); 7695 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
7463 if (!msg) 7696 if (!msg)
7464 return; 7697 return;
7465 7698
@@ -7497,7 +7730,7 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
7497 struct sk_buff *msg; 7730 struct sk_buff *msg;
7498 void *hdr; 7731 void *hdr;
7499 7732
7500 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 7733 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
7501 if (!msg) 7734 if (!msg)
7502 return; 7735 return;
7503 7736
@@ -7759,7 +7992,7 @@ void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
7759{ 7992{
7760 struct sk_buff *msg; 7993 struct sk_buff *msg;
7761 7994
7762 msg = nlmsg_new(NLMSG_GOODSIZE, gfp); 7995 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
7763 if (!msg) 7996 if (!msg)
7764 return; 7997 return;
7765 7998
@@ -7780,7 +8013,7 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
7780 struct sk_buff *msg; 8013 struct sk_buff *msg;
7781 void *hdr; 8014 void *hdr;
7782 8015
7783 msg = nlmsg_new(NLMSG_GOODSIZE, gfp); 8016 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
7784 if (!msg) 8017 if (!msg)
7785 return; 8018 return;
7786 8019
@@ -7943,7 +8176,7 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
7943 struct nlattr *pinfoattr; 8176 struct nlattr *pinfoattr;
7944 void *hdr; 8177 void *hdr;
7945 8178
7946 msg = nlmsg_new(NLMSG_GOODSIZE, gfp); 8179 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
7947 if (!msg) 8180 if (!msg)
7948 return; 8181 return;
7949 8182
@@ -7986,7 +8219,7 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
7986 struct nlattr *rekey_attr; 8219 struct nlattr *rekey_attr;
7987 void *hdr; 8220 void *hdr;
7988 8221
7989 msg = nlmsg_new(NLMSG_GOODSIZE, gfp); 8222 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
7990 if (!msg) 8223 if (!msg)
7991 return; 8224 return;
7992 8225
@@ -8030,7 +8263,7 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
8030 struct nlattr *attr; 8263 struct nlattr *attr;
8031 void *hdr; 8264 void *hdr;
8032 8265
8033 msg = nlmsg_new(NLMSG_GOODSIZE, gfp); 8266 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
8034 if (!msg) 8267 if (!msg)
8035 return; 8268 return;
8036 8269
@@ -8074,7 +8307,7 @@ void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
8074 struct sk_buff *msg; 8307 struct sk_buff *msg;
8075 void *hdr; 8308 void *hdr;
8076 8309
8077 msg = nlmsg_new(NLMSG_GOODSIZE, gfp); 8310 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
8078 if (!msg) 8311 if (!msg)
8079 return; 8312 return;
8080 8313
@@ -8109,7 +8342,7 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
8109 struct nlattr *pinfoattr; 8342 struct nlattr *pinfoattr;
8110 void *hdr; 8343 void *hdr;
8111 8344
8112 msg = nlmsg_new(NLMSG_GOODSIZE, gfp); 8345 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
8113 if (!msg) 8346 if (!msg)
8114 return; 8347 return;
8115 8348
@@ -8153,7 +8386,7 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
8153 void *hdr; 8386 void *hdr;
8154 int err; 8387 int err;
8155 8388
8156 msg = nlmsg_new(NLMSG_GOODSIZE, gfp); 8389 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
8157 if (!msg) 8390 if (!msg)
8158 return; 8391 return;
8159 8392
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 6a6181a673c..bc879833b21 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -796,7 +796,15 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
796 case NL80211_IFTYPE_ADHOC: 796 case NL80211_IFTYPE_ADHOC:
797 return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra); 797 return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra);
798 case NL80211_IFTYPE_MONITOR: 798 case NL80211_IFTYPE_MONITOR:
799 case NL80211_IFTYPE_WDS: 799 freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
800 if (freq < 0)
801 return freq;
802 if (freq == 0)
803 return -EINVAL;
804 mutex_lock(&rdev->devlist_mtx);
805 err = cfg80211_set_monitor_channel(rdev, freq, NL80211_CHAN_NO_HT);
806 mutex_unlock(&rdev->devlist_mtx);
807 return err;
800 case NL80211_IFTYPE_MESH_POINT: 808 case NL80211_IFTYPE_MESH_POINT:
801 freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); 809 freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
802 if (freq < 0) 810 if (freq < 0)
@@ -804,9 +812,8 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
804 if (freq == 0) 812 if (freq == 0)
805 return -EINVAL; 813 return -EINVAL;
806 mutex_lock(&rdev->devlist_mtx); 814 mutex_lock(&rdev->devlist_mtx);
807 wdev_lock(wdev); 815 err = cfg80211_set_mesh_freq(rdev, wdev, freq,
808 err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); 816 NL80211_CHAN_NO_HT);
809 wdev_unlock(wdev);
810 mutex_unlock(&rdev->devlist_mtx); 817 mutex_unlock(&rdev->devlist_mtx);
811 return err; 818 return err;
812 default: 819 default:
@@ -839,11 +846,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
839 freq->e = 6; 846 freq->e = 6;
840 return 0; 847 return 0;
841 default: 848 default:
842 if (!wdev->channel) 849 return -EINVAL;
843 return -EINVAL;
844 freq->m = wdev->channel->center_freq;
845 freq->e = 6;
846 return 0;
847 } 850 }
848} 851}
849 852
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 7decbd357d5..1f773f668d1 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -111,9 +111,15 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
111 111
112 wdev->wext.connect.channel = chan; 112 wdev->wext.connect.channel = chan;
113 113
114 /* SSID is not set, we just want to switch channel */ 114 /*
115 * SSID is not set, we just want to switch monitor channel,
116 * this is really just backward compatibility, if the SSID
117 * is set then we use the channel to select the BSS to use
118 * to connect to instead. If we were connected on another
119 * channel we disconnected above and reconnect below.
120 */
115 if (chan && !wdev->wext.connect.ssid_len) { 121 if (chan && !wdev->wext.connect.ssid_len) {
116 err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); 122 err = cfg80211_set_monitor_channel(rdev, freq, NL80211_CHAN_NO_HT);
117 goto out; 123 goto out;
118 } 124 }
119 125
diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c
index cf636627005..277c8d2448d 100644
--- a/net/x25/x25_route.c
+++ b/net/x25/x25_route.c
@@ -66,7 +66,7 @@ out:
66 66
67/** 67/**
68 * __x25_remove_route - remove route from x25_route_list 68 * __x25_remove_route - remove route from x25_route_list
69 * @rt - route to remove 69 * @rt: route to remove
70 * 70 *
71 * Remove route from x25_route_list. If it was there. 71 * Remove route from x25_route_list. If it was there.
72 * Caller must hold x25_route_list_lock. 72 * Caller must hold x25_route_list_lock.
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index ccfbd328a69..6e97855b584 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1500,9 +1500,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1500 if (!dev) 1500 if (!dev)
1501 goto free_dst; 1501 goto free_dst;
1502 1502
1503 /* Copy neighbour for reachability confirmation */
1504 dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour_noref(dst)));
1505
1506 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len); 1503 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1507 xfrm_init_pmtu(dst_prev); 1504 xfrm_init_pmtu(dst_prev);
1508 1505
@@ -2404,9 +2401,11 @@ static unsigned int xfrm_mtu(const struct dst_entry *dst)
2404 return mtu ? : dst_mtu(dst->path); 2401 return mtu ? : dst_mtu(dst->path);
2405} 2402}
2406 2403
2407static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr) 2404static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
2405 struct sk_buff *skb,
2406 const void *daddr)
2408{ 2407{
2409 return dst_neigh_lookup(dst->path, daddr); 2408 return dst->path->ops->neigh_lookup(dst, skb, daddr);
2410} 2409}
2411 2410
2412int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) 2411int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 44293b3fd6a..e75d8e47f35 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -754,58 +754,67 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
754 struct xfrm_usersa_info *p, 754 struct xfrm_usersa_info *p,
755 struct sk_buff *skb) 755 struct sk_buff *skb)
756{ 756{
757 copy_to_user_state(x, p); 757 int ret = 0;
758
759 if (x->coaddr &&
760 nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr))
761 goto nla_put_failure;
762
763 if (x->lastused &&
764 nla_put_u64(skb, XFRMA_LASTUSED, x->lastused))
765 goto nla_put_failure;
766
767 if (x->aead &&
768 nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead))
769 goto nla_put_failure;
770
771 if (x->aalg &&
772 (copy_to_user_auth(x->aalg, skb) ||
773 nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
774 xfrm_alg_auth_len(x->aalg), x->aalg)))
775 goto nla_put_failure;
776
777 if (x->ealg &&
778 nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg))
779 goto nla_put_failure;
780
781 if (x->calg &&
782 nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg))
783 goto nla_put_failure;
784
785 if (x->encap &&
786 nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap))
787 goto nla_put_failure;
788 758
789 if (x->tfcpad && 759 copy_to_user_state(x, p);
790 nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad))
791 goto nla_put_failure;
792
793 if (xfrm_mark_put(skb, &x->mark))
794 goto nla_put_failure;
795
796 if (x->replay_esn &&
797 nla_put(skb, XFRMA_REPLAY_ESN_VAL,
798 xfrm_replay_state_esn_len(x->replay_esn),
799 x->replay_esn))
800 goto nla_put_failure;
801
802 if (x->security && copy_sec_ctx(x->security, skb))
803 goto nla_put_failure;
804
805 return 0;
806 760
807nla_put_failure: 761 if (x->coaddr) {
808 return -EMSGSIZE; 762 ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
763 if (ret)
764 goto out;
765 }
766 if (x->lastused) {
767 ret = nla_put_u64(skb, XFRMA_LASTUSED, x->lastused);
768 if (ret)
769 goto out;
770 }
771 if (x->aead) {
772 ret = nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
773 if (ret)
774 goto out;
775 }
776 if (x->aalg) {
777 ret = copy_to_user_auth(x->aalg, skb);
778 if (!ret)
779 ret = nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
780 xfrm_alg_auth_len(x->aalg), x->aalg);
781 if (ret)
782 goto out;
783 }
784 if (x->ealg) {
785 ret = nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
786 if (ret)
787 goto out;
788 }
789 if (x->calg) {
790 ret = nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
791 if (ret)
792 goto out;
793 }
794 if (x->encap) {
795 ret = nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
796 if (ret)
797 goto out;
798 }
799 if (x->tfcpad) {
800 ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad);
801 if (ret)
802 goto out;
803 }
804 ret = xfrm_mark_put(skb, &x->mark);
805 if (ret)
806 goto out;
807 if (x->replay_esn) {
808 ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
809 xfrm_replay_state_esn_len(x->replay_esn),
810 x->replay_esn);
811 if (ret)
812 goto out;
813 }
814 if (x->security)
815 ret = copy_sec_ctx(x->security, skb);
816out:
817 return ret;
809} 818}
810 819
811static int dump_one_state(struct xfrm_state *x, int count, void *ptr) 820static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
@@ -825,15 +834,12 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
825 p = nlmsg_data(nlh); 834 p = nlmsg_data(nlh);
826 835
827 err = copy_to_user_state_extra(x, p, skb); 836 err = copy_to_user_state_extra(x, p, skb);
828 if (err) 837 if (err) {
829 goto nla_put_failure; 838 nlmsg_cancel(skb, nlh);
830 839 return err;
840 }
831 nlmsg_end(skb, nlh); 841 nlmsg_end(skb, nlh);
832 return 0; 842 return 0;
833
834nla_put_failure:
835 nlmsg_cancel(skb, nlh);
836 return err;
837} 843}
838 844
839static int xfrm_dump_sa_done(struct netlink_callback *cb) 845static int xfrm_dump_sa_done(struct netlink_callback *cb)
@@ -904,6 +910,7 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
904 struct xfrmu_spdinfo spc; 910 struct xfrmu_spdinfo spc;
905 struct xfrmu_spdhinfo sph; 911 struct xfrmu_spdhinfo sph;
906 struct nlmsghdr *nlh; 912 struct nlmsghdr *nlh;
913 int err;
907 u32 *f; 914 u32 *f;
908 915
909 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); 916 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
@@ -922,15 +929,15 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
922 sph.spdhcnt = si.spdhcnt; 929 sph.spdhcnt = si.spdhcnt;
923 sph.spdhmcnt = si.spdhmcnt; 930 sph.spdhmcnt = si.spdhmcnt;
924 931
925 if (nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc) || 932 err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
926 nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph)) 933 if (!err)
927 goto nla_put_failure; 934 err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
935 if (err) {
936 nlmsg_cancel(skb, nlh);
937 return err;
938 }
928 939
929 return nlmsg_end(skb, nlh); 940 return nlmsg_end(skb, nlh);
930
931nla_put_failure:
932 nlmsg_cancel(skb, nlh);
933 return -EMSGSIZE;
934} 941}
935 942
936static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 943static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -965,6 +972,7 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net,
965 struct xfrmk_sadinfo si; 972 struct xfrmk_sadinfo si;
966 struct xfrmu_sadhinfo sh; 973 struct xfrmu_sadhinfo sh;
967 struct nlmsghdr *nlh; 974 struct nlmsghdr *nlh;
975 int err;
968 u32 *f; 976 u32 *f;
969 977
970 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0); 978 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
@@ -978,15 +986,15 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net,
978 sh.sadhmcnt = si.sadhmcnt; 986 sh.sadhmcnt = si.sadhmcnt;
979 sh.sadhcnt = si.sadhcnt; 987 sh.sadhcnt = si.sadhcnt;
980 988
981 if (nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt) || 989 err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt);
982 nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh)) 990 if (!err)
983 goto nla_put_failure; 991 err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
992 if (err) {
993 nlmsg_cancel(skb, nlh);
994 return err;
995 }
984 996
985 return nlmsg_end(skb, nlh); 997 return nlmsg_end(skb, nlh);
986
987nla_put_failure:
988 nlmsg_cancel(skb, nlh);
989 return -EMSGSIZE;
990} 998}
991 999
992static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 1000static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -1439,9 +1447,8 @@ static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buf
1439 1447
1440static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb) 1448static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
1441{ 1449{
1442 if (xp->security) { 1450 if (xp->security)
1443 return copy_sec_ctx(xp->security, skb); 1451 return copy_sec_ctx(xp->security, skb);
1444 }
1445 return 0; 1452 return 0;
1446} 1453}
1447static inline size_t userpolicy_type_attrsize(void) 1454static inline size_t userpolicy_type_attrsize(void)
@@ -1477,6 +1484,7 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
1477 struct sk_buff *in_skb = sp->in_skb; 1484 struct sk_buff *in_skb = sp->in_skb;
1478 struct sk_buff *skb = sp->out_skb; 1485 struct sk_buff *skb = sp->out_skb;
1479 struct nlmsghdr *nlh; 1486 struct nlmsghdr *nlh;
1487 int err;
1480 1488
1481 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, 1489 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
1482 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags); 1490 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
@@ -1485,22 +1493,19 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
1485 1493
1486 p = nlmsg_data(nlh); 1494 p = nlmsg_data(nlh);
1487 copy_to_user_policy(xp, p, dir); 1495 copy_to_user_policy(xp, p, dir);
1488 if (copy_to_user_tmpl(xp, skb) < 0) 1496 err = copy_to_user_tmpl(xp, skb);
1489 goto nlmsg_failure; 1497 if (!err)
1490 if (copy_to_user_sec_ctx(xp, skb)) 1498 err = copy_to_user_sec_ctx(xp, skb);
1491 goto nlmsg_failure; 1499 if (!err)
1492 if (copy_to_user_policy_type(xp->type, skb) < 0) 1500 err = copy_to_user_policy_type(xp->type, skb);
1493 goto nlmsg_failure; 1501 if (!err)
1494 if (xfrm_mark_put(skb, &xp->mark)) 1502 err = xfrm_mark_put(skb, &xp->mark);
1495 goto nla_put_failure; 1503 if (err) {
1496 1504 nlmsg_cancel(skb, nlh);
1505 return err;
1506 }
1497 nlmsg_end(skb, nlh); 1507 nlmsg_end(skb, nlh);
1498 return 0; 1508 return 0;
1499
1500nla_put_failure:
1501nlmsg_failure:
1502 nlmsg_cancel(skb, nlh);
1503 return -EMSGSIZE;
1504} 1509}
1505 1510
1506static int xfrm_dump_policy_done(struct netlink_callback *cb) 1511static int xfrm_dump_policy_done(struct netlink_callback *cb)
@@ -1688,6 +1693,7 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
1688{ 1693{
1689 struct xfrm_aevent_id *id; 1694 struct xfrm_aevent_id *id;
1690 struct nlmsghdr *nlh; 1695 struct nlmsghdr *nlh;
1696 int err;
1691 1697
1692 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0); 1698 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
1693 if (nlh == NULL) 1699 if (nlh == NULL)
@@ -1703,35 +1709,39 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
1703 id->flags = c->data.aevent; 1709 id->flags = c->data.aevent;
1704 1710
1705 if (x->replay_esn) { 1711 if (x->replay_esn) {
1706 if (nla_put(skb, XFRMA_REPLAY_ESN_VAL, 1712 err = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
1707 xfrm_replay_state_esn_len(x->replay_esn), 1713 xfrm_replay_state_esn_len(x->replay_esn),
1708 x->replay_esn)) 1714 x->replay_esn);
1709 goto nla_put_failure;
1710 } else { 1715 } else {
1711 if (nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), 1716 err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
1712 &x->replay)) 1717 &x->replay);
1713 goto nla_put_failure;
1714 } 1718 }
1715 if (nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft)) 1719 if (err)
1716 goto nla_put_failure; 1720 goto out_cancel;
1717 1721 err = nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
1718 if ((id->flags & XFRM_AE_RTHR) && 1722 if (err)
1719 nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff)) 1723 goto out_cancel;
1720 goto nla_put_failure;
1721
1722 if ((id->flags & XFRM_AE_ETHR) &&
1723 nla_put_u32(skb, XFRMA_ETIMER_THRESH,
1724 x->replay_maxage * 10 / HZ))
1725 goto nla_put_failure;
1726 1724
1727 if (xfrm_mark_put(skb, &x->mark)) 1725 if (id->flags & XFRM_AE_RTHR) {
1728 goto nla_put_failure; 1726 err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
1727 if (err)
1728 goto out_cancel;
1729 }
1730 if (id->flags & XFRM_AE_ETHR) {
1731 err = nla_put_u32(skb, XFRMA_ETIMER_THRESH,
1732 x->replay_maxage * 10 / HZ);
1733 if (err)
1734 goto out_cancel;
1735 }
1736 err = xfrm_mark_put(skb, &x->mark);
1737 if (err)
1738 goto out_cancel;
1729 1739
1730 return nlmsg_end(skb, nlh); 1740 return nlmsg_end(skb, nlh);
1731 1741
1732nla_put_failure: 1742out_cancel:
1733 nlmsg_cancel(skb, nlh); 1743 nlmsg_cancel(skb, nlh);
1734 return -EMSGSIZE; 1744 return err;
1735} 1745}
1736 1746
1737static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh, 1747static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -2155,7 +2165,7 @@ static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
2155 const struct xfrm_migrate *mp; 2165 const struct xfrm_migrate *mp;
2156 struct xfrm_userpolicy_id *pol_id; 2166 struct xfrm_userpolicy_id *pol_id;
2157 struct nlmsghdr *nlh; 2167 struct nlmsghdr *nlh;
2158 int i; 2168 int i, err;
2159 2169
2160 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0); 2170 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
2161 if (nlh == NULL) 2171 if (nlh == NULL)
@@ -2167,21 +2177,25 @@ static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
2167 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel)); 2177 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
2168 pol_id->dir = dir; 2178 pol_id->dir = dir;
2169 2179
2170 if (k != NULL && (copy_to_user_kmaddress(k, skb) < 0)) 2180 if (k != NULL) {
2171 goto nlmsg_failure; 2181 err = copy_to_user_kmaddress(k, skb);
2172 2182 if (err)
2173 if (copy_to_user_policy_type(type, skb) < 0) 2183 goto out_cancel;
2174 goto nlmsg_failure; 2184 }
2175 2185 err = copy_to_user_policy_type(type, skb);
2186 if (err)
2187 goto out_cancel;
2176 for (i = 0, mp = m ; i < num_migrate; i++, mp++) { 2188 for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
2177 if (copy_to_user_migrate(mp, skb) < 0) 2189 err = copy_to_user_migrate(mp, skb);
2178 goto nlmsg_failure; 2190 if (err)
2191 goto out_cancel;
2179 } 2192 }
2180 2193
2181 return nlmsg_end(skb, nlh); 2194 return nlmsg_end(skb, nlh);
2182nlmsg_failure: 2195
2196out_cancel:
2183 nlmsg_cancel(skb, nlh); 2197 nlmsg_cancel(skb, nlh);
2184 return -EMSGSIZE; 2198 return err;
2185} 2199}
2186 2200
2187static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2201static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
@@ -2354,6 +2368,7 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct
2354{ 2368{
2355 struct xfrm_user_expire *ue; 2369 struct xfrm_user_expire *ue;
2356 struct nlmsghdr *nlh; 2370 struct nlmsghdr *nlh;
2371 int err;
2357 2372
2358 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0); 2373 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
2359 if (nlh == NULL) 2374 if (nlh == NULL)
@@ -2363,13 +2378,11 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct
2363 copy_to_user_state(x, &ue->state); 2378 copy_to_user_state(x, &ue->state);
2364 ue->hard = (c->data.hard != 0) ? 1 : 0; 2379 ue->hard = (c->data.hard != 0) ? 1 : 0;
2365 2380
2366 if (xfrm_mark_put(skb, &x->mark)) 2381 err = xfrm_mark_put(skb, &x->mark);
2367 goto nla_put_failure; 2382 if (err)
2383 return err;
2368 2384
2369 return nlmsg_end(skb, nlh); 2385 return nlmsg_end(skb, nlh);
2370
2371nla_put_failure:
2372 return -EMSGSIZE;
2373} 2386}
2374 2387
2375static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c) 2388static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
@@ -2470,7 +2483,7 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
2470 struct nlmsghdr *nlh; 2483 struct nlmsghdr *nlh;
2471 struct sk_buff *skb; 2484 struct sk_buff *skb;
2472 int len = xfrm_sa_len(x); 2485 int len = xfrm_sa_len(x);
2473 int headlen; 2486 int headlen, err;
2474 2487
2475 headlen = sizeof(*p); 2488 headlen = sizeof(*p);
2476 if (c->event == XFRM_MSG_DELSA) { 2489 if (c->event == XFRM_MSG_DELSA) {
@@ -2485,8 +2498,9 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
2485 return -ENOMEM; 2498 return -ENOMEM;
2486 2499
2487 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0); 2500 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
2501 err = -EMSGSIZE;
2488 if (nlh == NULL) 2502 if (nlh == NULL)
2489 goto nla_put_failure; 2503 goto out_free_skb;
2490 2504
2491 p = nlmsg_data(nlh); 2505 p = nlmsg_data(nlh);
2492 if (c->event == XFRM_MSG_DELSA) { 2506 if (c->event == XFRM_MSG_DELSA) {
@@ -2499,24 +2513,23 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
2499 id->proto = x->id.proto; 2513 id->proto = x->id.proto;
2500 2514
2501 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p)); 2515 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
2516 err = -EMSGSIZE;
2502 if (attr == NULL) 2517 if (attr == NULL)
2503 goto nla_put_failure; 2518 goto out_free_skb;
2504 2519
2505 p = nla_data(attr); 2520 p = nla_data(attr);
2506 } 2521 }
2507 2522 err = copy_to_user_state_extra(x, p, skb);
2508 if (copy_to_user_state_extra(x, p, skb)) 2523 if (err)
2509 goto nla_put_failure; 2524 goto out_free_skb;
2510 2525
2511 nlmsg_end(skb, nlh); 2526 nlmsg_end(skb, nlh);
2512 2527
2513 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); 2528 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2514 2529
2515nla_put_failure: 2530out_free_skb:
2516 /* Somebody screwed up with xfrm_sa_len! */
2517 WARN_ON(1);
2518 kfree_skb(skb); 2531 kfree_skb(skb);
2519 return -1; 2532 return err;
2520} 2533}
2521 2534
2522static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c) 2535static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c)
@@ -2557,9 +2570,10 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2557 struct xfrm_tmpl *xt, struct xfrm_policy *xp, 2570 struct xfrm_tmpl *xt, struct xfrm_policy *xp,
2558 int dir) 2571 int dir)
2559{ 2572{
2573 __u32 seq = xfrm_get_acqseq();
2560 struct xfrm_user_acquire *ua; 2574 struct xfrm_user_acquire *ua;
2561 struct nlmsghdr *nlh; 2575 struct nlmsghdr *nlh;
2562 __u32 seq = xfrm_get_acqseq(); 2576 int err;
2563 2577
2564 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0); 2578 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
2565 if (nlh == NULL) 2579 if (nlh == NULL)
@@ -2575,21 +2589,19 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2575 ua->calgos = xt->calgos; 2589 ua->calgos = xt->calgos;
2576 ua->seq = x->km.seq = seq; 2590 ua->seq = x->km.seq = seq;
2577 2591
2578 if (copy_to_user_tmpl(xp, skb) < 0) 2592 err = copy_to_user_tmpl(xp, skb);
2579 goto nlmsg_failure; 2593 if (!err)
2580 if (copy_to_user_state_sec_ctx(x, skb)) 2594 err = copy_to_user_state_sec_ctx(x, skb);
2581 goto nlmsg_failure; 2595 if (!err)
2582 if (copy_to_user_policy_type(xp->type, skb) < 0) 2596 err = copy_to_user_policy_type(xp->type, skb);
2583 goto nlmsg_failure; 2597 if (!err)
2584 if (xfrm_mark_put(skb, &xp->mark)) 2598 err = xfrm_mark_put(skb, &xp->mark);
2585 goto nla_put_failure; 2599 if (err) {
2600 nlmsg_cancel(skb, nlh);
2601 return err;
2602 }
2586 2603
2587 return nlmsg_end(skb, nlh); 2604 return nlmsg_end(skb, nlh);
2588
2589nla_put_failure:
2590nlmsg_failure:
2591 nlmsg_cancel(skb, nlh);
2592 return -EMSGSIZE;
2593} 2605}
2594 2606
2595static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt, 2607static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
@@ -2681,8 +2693,9 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
2681 int dir, const struct km_event *c) 2693 int dir, const struct km_event *c)
2682{ 2694{
2683 struct xfrm_user_polexpire *upe; 2695 struct xfrm_user_polexpire *upe;
2684 struct nlmsghdr *nlh;
2685 int hard = c->data.hard; 2696 int hard = c->data.hard;
2697 struct nlmsghdr *nlh;
2698 int err;
2686 2699
2687 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0); 2700 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
2688 if (nlh == NULL) 2701 if (nlh == NULL)
@@ -2690,22 +2703,20 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
2690 2703
2691 upe = nlmsg_data(nlh); 2704 upe = nlmsg_data(nlh);
2692 copy_to_user_policy(xp, &upe->pol, dir); 2705 copy_to_user_policy(xp, &upe->pol, dir);
2693 if (copy_to_user_tmpl(xp, skb) < 0) 2706 err = copy_to_user_tmpl(xp, skb);
2694 goto nlmsg_failure; 2707 if (!err)
2695 if (copy_to_user_sec_ctx(xp, skb)) 2708 err = copy_to_user_sec_ctx(xp, skb);
2696 goto nlmsg_failure; 2709 if (!err)
2697 if (copy_to_user_policy_type(xp->type, skb) < 0) 2710 err = copy_to_user_policy_type(xp->type, skb);
2698 goto nlmsg_failure; 2711 if (!err)
2699 if (xfrm_mark_put(skb, &xp->mark)) 2712 err = xfrm_mark_put(skb, &xp->mark);
2700 goto nla_put_failure; 2713 if (err) {
2714 nlmsg_cancel(skb, nlh);
2715 return err;
2716 }
2701 upe->hard = !!hard; 2717 upe->hard = !!hard;
2702 2718
2703 return nlmsg_end(skb, nlh); 2719 return nlmsg_end(skb, nlh);
2704
2705nla_put_failure:
2706nlmsg_failure:
2707 nlmsg_cancel(skb, nlh);
2708 return -EMSGSIZE;
2709} 2720}
2710 2721
2711static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 2722static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
@@ -2725,13 +2736,13 @@ static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct
2725 2736
2726static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c) 2737static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
2727{ 2738{
2739 int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
2728 struct net *net = xp_net(xp); 2740 struct net *net = xp_net(xp);
2729 struct xfrm_userpolicy_info *p; 2741 struct xfrm_userpolicy_info *p;
2730 struct xfrm_userpolicy_id *id; 2742 struct xfrm_userpolicy_id *id;
2731 struct nlmsghdr *nlh; 2743 struct nlmsghdr *nlh;
2732 struct sk_buff *skb; 2744 struct sk_buff *skb;
2733 int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); 2745 int headlen, err;
2734 int headlen;
2735 2746
2736 headlen = sizeof(*p); 2747 headlen = sizeof(*p);
2737 if (c->event == XFRM_MSG_DELPOLICY) { 2748 if (c->event == XFRM_MSG_DELPOLICY) {
@@ -2747,8 +2758,9 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_e
2747 return -ENOMEM; 2758 return -ENOMEM;
2748 2759
2749 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0); 2760 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
2761 err = -EMSGSIZE;
2750 if (nlh == NULL) 2762 if (nlh == NULL)
2751 goto nlmsg_failure; 2763 goto out_free_skb;
2752 2764
2753 p = nlmsg_data(nlh); 2765 p = nlmsg_data(nlh);
2754 if (c->event == XFRM_MSG_DELPOLICY) { 2766 if (c->event == XFRM_MSG_DELPOLICY) {
@@ -2763,29 +2775,29 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_e
2763 memcpy(&id->sel, &xp->selector, sizeof(id->sel)); 2775 memcpy(&id->sel, &xp->selector, sizeof(id->sel));
2764 2776
2765 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p)); 2777 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
2778 err = -EMSGSIZE;
2766 if (attr == NULL) 2779 if (attr == NULL)
2767 goto nlmsg_failure; 2780 goto out_free_skb;
2768 2781
2769 p = nla_data(attr); 2782 p = nla_data(attr);
2770 } 2783 }
2771 2784
2772 copy_to_user_policy(xp, p, dir); 2785 copy_to_user_policy(xp, p, dir);
2773 if (copy_to_user_tmpl(xp, skb) < 0) 2786 err = copy_to_user_tmpl(xp, skb);
2774 goto nlmsg_failure; 2787 if (!err)
2775 if (copy_to_user_policy_type(xp->type, skb) < 0) 2788 err = copy_to_user_policy_type(xp->type, skb);
2776 goto nlmsg_failure; 2789 if (!err)
2777 2790 err = xfrm_mark_put(skb, &xp->mark);
2778 if (xfrm_mark_put(skb, &xp->mark)) 2791 if (err)
2779 goto nla_put_failure; 2792 goto out_free_skb;
2780 2793
2781 nlmsg_end(skb, nlh); 2794 nlmsg_end(skb, nlh);
2782 2795
2783 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2796 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2784 2797
2785nla_put_failure: 2798out_free_skb:
2786nlmsg_failure:
2787 kfree_skb(skb); 2799 kfree_skb(skb);
2788 return -1; 2800 return err;
2789} 2801}
2790 2802
2791static int xfrm_notify_policy_flush(const struct km_event *c) 2803static int xfrm_notify_policy_flush(const struct km_event *c)
@@ -2793,24 +2805,27 @@ static int xfrm_notify_policy_flush(const struct km_event *c)
2793 struct net *net = c->net; 2805 struct net *net = c->net;
2794 struct nlmsghdr *nlh; 2806 struct nlmsghdr *nlh;
2795 struct sk_buff *skb; 2807 struct sk_buff *skb;
2808 int err;
2796 2809
2797 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC); 2810 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
2798 if (skb == NULL) 2811 if (skb == NULL)
2799 return -ENOMEM; 2812 return -ENOMEM;
2800 2813
2801 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0); 2814 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
2815 err = -EMSGSIZE;
2802 if (nlh == NULL) 2816 if (nlh == NULL)
2803 goto nlmsg_failure; 2817 goto out_free_skb;
2804 if (copy_to_user_policy_type(c->data.type, skb) < 0) 2818 err = copy_to_user_policy_type(c->data.type, skb);
2805 goto nlmsg_failure; 2819 if (err)
2820 goto out_free_skb;
2806 2821
2807 nlmsg_end(skb, nlh); 2822 nlmsg_end(skb, nlh);
2808 2823
2809 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2824 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2810 2825
2811nlmsg_failure: 2826out_free_skb:
2812 kfree_skb(skb); 2827 kfree_skb(skb);
2813 return -1; 2828 return err;
2814} 2829}
2815 2830
2816static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 2831static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
@@ -2853,15 +2868,14 @@ static int build_report(struct sk_buff *skb, u8 proto,
2853 ur->proto = proto; 2868 ur->proto = proto;
2854 memcpy(&ur->sel, sel, sizeof(ur->sel)); 2869 memcpy(&ur->sel, sel, sizeof(ur->sel));
2855 2870
2856 if (addr && 2871 if (addr) {
2857 nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr)) 2872 int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr);
2858 goto nla_put_failure; 2873 if (err) {
2859 2874 nlmsg_cancel(skb, nlh);
2875 return err;
2876 }
2877 }
2860 return nlmsg_end(skb, nlh); 2878 return nlmsg_end(skb, nlh);
2861
2862nla_put_failure:
2863 nlmsg_cancel(skb, nlh);
2864 return -EMSGSIZE;
2865} 2879}
2866 2880
2867static int xfrm_send_report(struct net *net, u8 proto, 2881static int xfrm_send_report(struct net *net, u8 proto,
@@ -2945,9 +2959,12 @@ static struct xfrm_mgr netlink_mgr = {
2945static int __net_init xfrm_user_net_init(struct net *net) 2959static int __net_init xfrm_user_net_init(struct net *net)
2946{ 2960{
2947 struct sock *nlsk; 2961 struct sock *nlsk;
2962 struct netlink_kernel_cfg cfg = {
2963 .groups = XFRMNLGRP_MAX,
2964 .input = xfrm_netlink_rcv,
2965 };
2948 2966
2949 nlsk = netlink_kernel_create(net, NETLINK_XFRM, XFRMNLGRP_MAX, 2967 nlsk = netlink_kernel_create(net, NETLINK_XFRM, THIS_MODULE, &cfg);
2950 xfrm_netlink_rcv, NULL, THIS_MODULE);
2951 if (nlsk == NULL) 2968 if (nlsk == NULL)
2952 return -ENOMEM; 2969 return -ENOMEM;
2953 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */ 2970 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 372ec6502aa..4ee6f237001 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -5763,21 +5763,21 @@ static struct nf_hook_ops selinux_ipv4_ops[] = {
5763 { 5763 {
5764 .hook = selinux_ipv4_postroute, 5764 .hook = selinux_ipv4_postroute,
5765 .owner = THIS_MODULE, 5765 .owner = THIS_MODULE,
5766 .pf = PF_INET, 5766 .pf = NFPROTO_IPV4,
5767 .hooknum = NF_INET_POST_ROUTING, 5767 .hooknum = NF_INET_POST_ROUTING,
5768 .priority = NF_IP_PRI_SELINUX_LAST, 5768 .priority = NF_IP_PRI_SELINUX_LAST,
5769 }, 5769 },
5770 { 5770 {
5771 .hook = selinux_ipv4_forward, 5771 .hook = selinux_ipv4_forward,
5772 .owner = THIS_MODULE, 5772 .owner = THIS_MODULE,
5773 .pf = PF_INET, 5773 .pf = NFPROTO_IPV4,
5774 .hooknum = NF_INET_FORWARD, 5774 .hooknum = NF_INET_FORWARD,
5775 .priority = NF_IP_PRI_SELINUX_FIRST, 5775 .priority = NF_IP_PRI_SELINUX_FIRST,
5776 }, 5776 },
5777 { 5777 {
5778 .hook = selinux_ipv4_output, 5778 .hook = selinux_ipv4_output,
5779 .owner = THIS_MODULE, 5779 .owner = THIS_MODULE,
5780 .pf = PF_INET, 5780 .pf = NFPROTO_IPV4,
5781 .hooknum = NF_INET_LOCAL_OUT, 5781 .hooknum = NF_INET_LOCAL_OUT,
5782 .priority = NF_IP_PRI_SELINUX_FIRST, 5782 .priority = NF_IP_PRI_SELINUX_FIRST,
5783 } 5783 }
@@ -5789,14 +5789,14 @@ static struct nf_hook_ops selinux_ipv6_ops[] = {
5789 { 5789 {
5790 .hook = selinux_ipv6_postroute, 5790 .hook = selinux_ipv6_postroute,
5791 .owner = THIS_MODULE, 5791 .owner = THIS_MODULE,
5792 .pf = PF_INET6, 5792 .pf = NFPROTO_IPV6,
5793 .hooknum = NF_INET_POST_ROUTING, 5793 .hooknum = NF_INET_POST_ROUTING,
5794 .priority = NF_IP6_PRI_SELINUX_LAST, 5794 .priority = NF_IP6_PRI_SELINUX_LAST,
5795 }, 5795 },
5796 { 5796 {
5797 .hook = selinux_ipv6_forward, 5797 .hook = selinux_ipv6_forward,
5798 .owner = THIS_MODULE, 5798 .owner = THIS_MODULE,
5799 .pf = PF_INET6, 5799 .pf = NFPROTO_IPV6,
5800 .hooknum = NF_INET_FORWARD, 5800 .hooknum = NF_INET_FORWARD,
5801 .priority = NF_IP6_PRI_SELINUX_FIRST, 5801 .priority = NF_IP6_PRI_SELINUX_FIRST,
5802 } 5802 }
diff --git a/security/selinux/netlink.c b/security/selinux/netlink.c
index 161e01a6c7e..8a77725423e 100644
--- a/security/selinux/netlink.c
+++ b/security/selinux/netlink.c
@@ -19,6 +19,7 @@
19#include <linux/netlink.h> 19#include <linux/netlink.h>
20#include <linux/selinux_netlink.h> 20#include <linux/selinux_netlink.h>
21#include <net/net_namespace.h> 21#include <net/net_namespace.h>
22#include <net/netlink.h>
22 23
23#include "security.h" 24#include "security.h"
24 25
@@ -47,7 +48,7 @@ static void selnl_add_payload(struct nlmsghdr *nlh, int len, int msgtype, void *
47{ 48{
48 switch (msgtype) { 49 switch (msgtype) {
49 case SELNL_MSG_SETENFORCE: { 50 case SELNL_MSG_SETENFORCE: {
50 struct selnl_msg_setenforce *msg = NLMSG_DATA(nlh); 51 struct selnl_msg_setenforce *msg = nlmsg_data(nlh);
51 52
52 memset(msg, 0, len); 53 memset(msg, 0, len);
53 msg->val = *((int *)data); 54 msg->val = *((int *)data);
@@ -55,7 +56,7 @@ static void selnl_add_payload(struct nlmsghdr *nlh, int len, int msgtype, void *
55 } 56 }
56 57
57 case SELNL_MSG_POLICYLOAD: { 58 case SELNL_MSG_POLICYLOAD: {
58 struct selnl_msg_policyload *msg = NLMSG_DATA(nlh); 59 struct selnl_msg_policyload *msg = nlmsg_data(nlh);
59 60
60 memset(msg, 0, len); 61 memset(msg, 0, len);
61 msg->seqno = *((u32 *)data); 62 msg->seqno = *((u32 *)data);
@@ -81,7 +82,9 @@ static void selnl_notify(int msgtype, void *data)
81 goto oom; 82 goto oom;
82 83
83 tmp = skb->tail; 84 tmp = skb->tail;
84 nlh = NLMSG_PUT(skb, 0, 0, msgtype, len); 85 nlh = nlmsg_put(skb, 0, 0, msgtype, len, 0);
86 if (!nlh)
87 goto out_kfree_skb;
85 selnl_add_payload(nlh, len, msgtype, data); 88 selnl_add_payload(nlh, len, msgtype, data);
86 nlh->nlmsg_len = skb->tail - tmp; 89 nlh->nlmsg_len = skb->tail - tmp;
87 NETLINK_CB(skb).dst_group = SELNLGRP_AVC; 90 NETLINK_CB(skb).dst_group = SELNLGRP_AVC;
@@ -89,7 +92,7 @@ static void selnl_notify(int msgtype, void *data)
89out: 92out:
90 return; 93 return;
91 94
92nlmsg_failure: 95out_kfree_skb:
93 kfree_skb(skb); 96 kfree_skb(skb);
94oom: 97oom:
95 printk(KERN_ERR "SELinux: OOM in %s\n", __func__); 98 printk(KERN_ERR "SELinux: OOM in %s\n", __func__);
@@ -108,8 +111,12 @@ void selnl_notify_policyload(u32 seqno)
108 111
109static int __init selnl_init(void) 112static int __init selnl_init(void)
110{ 113{
114 struct netlink_kernel_cfg cfg = {
115 .groups = SELNLGRP_MAX,
116 };
117
111 selnl = netlink_kernel_create(&init_net, NETLINK_SELINUX, 118 selnl = netlink_kernel_create(&init_net, NETLINK_SELINUX,
112 SELNLGRP_MAX, NULL, NULL, THIS_MODULE); 119 THIS_MODULE, &cfg);
113 if (selnl == NULL) 120 if (selnl == NULL)
114 panic("SELinux: Cannot create netlink socket."); 121 panic("SELinux: Cannot create netlink socket.");
115 netlink_set_nonroot(NETLINK_SELINUX, NL_NONROOT_RECV); 122 netlink_set_nonroot(NETLINK_SELINUX, NL_NONROOT_RECV);